diff options
-rw-r--r-- | client/test_func.py | 11 | ||||
-rw-r--r-- | modules/__init__.py | 0 | ||||
-rwxr-xr-x | modules/baseobj.py | 115 | ||||
-rwxr-xr-x | modules/test.py | 26 | ||||
-rwxr-xr-x | modules/web_svc.py | 53 | ||||
-rwxr-xr-x | scripts/funcd | 12 | ||||
-rw-r--r-- | server/__init__.py | 0 | ||||
-rwxr-xr-x | server/codes.py | 252 | ||||
-rwxr-xr-x | server/config_data.py | 57 | ||||
-rwxr-xr-x | server/logger.py | 55 | ||||
-rwxr-xr-x | server/module_loader.py | 53 | ||||
-rwxr-xr-x | server/server.py | 215 | ||||
-rwxr-xr-x | server/utils.py | 50 | ||||
-rwxr-xr-x | server/yaml/__init__.py | 17 | ||||
-rwxr-xr-x | server/yaml/dump.py | 296 | ||||
-rwxr-xr-x | server/yaml/implicit.py | 46 | ||||
-rwxr-xr-x | server/yaml/inline.py | 38 | ||||
-rwxr-xr-x | server/yaml/klass.py | 48 | ||||
-rwxr-xr-x | server/yaml/load.py | 327 | ||||
-rwxr-xr-x | server/yaml/ordered_dict.py | 31 | ||||
-rwxr-xr-x | server/yaml/redump.py | 16 | ||||
-rwxr-xr-x | server/yaml/stream.py | 193 | ||||
-rwxr-xr-x | server/yaml/timestamp.py | 145 | ||||
-rwxr-xr-x | server/yaml/ypath.py | 462 |
24 files changed, 2518 insertions, 0 deletions
diff --git a/client/test_func.py b/client/test_func.py new file mode 100644 index 0000000..35ce100 --- /dev/null +++ b/client/test_func.py @@ -0,0 +1,11 @@ +#!/usr/bin/python + + +import xmlrpclib + +s = xmlrpclib.ServerProxy("http://127.0.0.1:51234") + +print s.test_add(1, 2) + + + diff --git a/modules/__init__.py b/modules/__init__.py new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/modules/__init__.py diff --git a/modules/baseobj.py b/modules/baseobj.py new file mode 100755 index 0000000..702f0a7 --- /dev/null +++ b/modules/baseobj.py @@ -0,0 +1,115 @@ +""" +Virt-factory backend code. + +Copyright 2006, Red Hat, Inc +Michael DeHaan <mdehaan@redhat.com> +Scott Seago <sseago@redhat.com> + +This software may be freely redistributed under the terms of the GNU +general public license. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +""" + +import string +import exceptions +import os + +class BaseObject(object): + + FIELDS = [] # subclasses should define a list of db column names here + + def load(self,hash,key,default=None): + """ + Access a hash element safely... + """ + # FIXME: it would be cool if load starts with a copy of the hash + # and clears off entries as recieved, such that we can tell if any + # entries are not loaded. This should result in a warning in the return + # object. + assert hash is not None, "hash is None" + assert key is not None, "key is None" + if hash.has_key(key): + return hash[key] + else: + return default + + def to_datastruct(self,to_caller=False): + """ + Return a hash representation of this object. + Defers to self.to_datastruct_internal which subclasses must implement. + """ + ds = self.to_datastruct_internal() + if to_caller: + # don't send NULLs + ds = self.remove_nulls(ds) + return ds + + def to_datastruct_internal(self): + """ + Subclasses: implement this. + """ + raise exceptions.NotImplementedError + + def deserialize(self, args): + for x in self.FIELDS: + if args.has_key(x): + setattr(self, x, args[x]) + else: + setattr(self, x, None) + + def serialize(self): + result = {} + for x in self.FIELDS: + result[x] = getattr(self, x, None) + return result + + def remove_nulls(self, x): + """ + If any entries are None in the datastructure, prune them. + XMLRPC can't marshall None and this is our workaround. Objects + that are None are removed from the hash -- including hash keys that + are not None and have None for the value. The WUI or other SW + should know how to deal with these returns. + """ + assert x is not None, "datastructure is None" + if type(x) == list: + newx = [] + for i in x: + if type(i) == list or type(i) == dict: + newx.append(self.remove_nulls(i)) + elif i is not None: + newx.append(i) + x = newx + elif type(x) == dict: + newx = {} + for i,j in x.iteritems(): + if type(j) == list or type(j) == dict: + newx[i] = self.remove_nulls(x) + elif j is not None: + newx[i] = j + x = newx + return x + + # ======================== + # random utility functions + + def is_printable(self, stringy): + # FIXME: use regex package + + if stringy == None: + return False + if type(stringy) != str: + stringy = "%s" % stringy + try: + for letter in stringy: + if letter not in string.printable: + return False + return True + except: + return False + + +
\ No newline at end of file diff --git a/modules/test.py b/modules/test.py new file mode 100755 index 0000000..7783b4e --- /dev/null +++ b/modules/test.py @@ -0,0 +1,26 @@ +#!/usr/bin/python + + +from codes import * +from modules import web_svc + + + +class Test(web_svc.WebSvc): + def __init__(self): + self.methods = { + "test_add": self.add, + "test_blippy": self.blippy, + } + web_svc.WebSvc.__init__(self) + + def add(self, numb1, numb2): + return success(int(numb1) + int(numb2)) + + def blippy(self, foo): + fh = open("/tmp/blippy","w+") + fh.close() + return success(foo) + +methods = Test() +register_rpc = methods.register_rpc diff --git a/modules/web_svc.py b/modules/web_svc.py new file mode 100755 index 0000000..ed4ec19 --- /dev/null +++ b/modules/web_svc.py @@ -0,0 +1,53 @@ +#!/usr/bin/python + +## Virt-factory backend code. +## +## Copyright 2006, Red Hat, Inc +## Adrian Likins <alikins@redhat.com +## +## This software may be freely redistributed under the terms of the GNU +## general public license. +## +## You should have received a copy of the GNU General Public License +## along with this program; if not, write to the Free Software +## Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +## + +from codes import * + +import baseobj +from server import config_data +from server import logger + +import os +import threading +import time +import traceback + + +class WebSvc(object): + def __init__(self): + + config_obj = config_data.Config() + config_result = config_obj.get() + self.config = config_result + self.__init_log() + + def __init_log(self): + # lets see what happens when we c&p the stuff from server.py + log = logger.Logger() + self.logger = log.logger + + def register_rpc(self, handlers): + for meth in self.methods: + handlers[meth] = self.methods[meth] + + def offset_and_limit(self, args): + return args.get('offset', 0), args.get('limit', 100000) + + +class AuthWebSvc(WebSvc): + def __init__(self): + WebSvc.__init__(self) + + diff --git a/scripts/funcd b/scripts/funcd new file mode 100755 index 0000000..165dd68 --- /dev/null +++ b/scripts/funcd @@ -0,0 +1,12 @@ +#!/usr/bin/python + + +import sys +import distutils.sysconfig + +sys.path.append("%s/func" % distutils.sysconfig.get_python_lib()) + +from server import server + +if __name__ == "__main__": + server.main(sys.argv) diff --git a/server/__init__.py b/server/__init__.py new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/server/__init__.py diff --git a/server/codes.py b/server/codes.py new file mode 100755 index 0000000..82bfb0a --- /dev/null +++ b/server/codes.py @@ -0,0 +1,252 @@ +#!/usr/bin/python +""" +Virt-factory backend code. + +Copyright 2006, Red Hat, Inc +Michael DeHaan <mdehaan@redhat.com> +Scott Seago <sseago@redhat.com> + +This software may be freely redistributed under the terms of the GNU +general public license. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +""" + +import exceptions +import string +import sys +import traceback + + + +# internal codes for the types of operations (used by validation logic) +OP_ADD = "add" +OP_EDIT = "edit" +OP_DELETE = "delete" +OP_LIST = "list" +OP_METHOD = "method" +OP_GET = "get" + +# error codes for the web service. +SUCCESS = ERR_SUCCESS = 0 +ERR_TOKEN_EXPIRED = 1 +ERR_TOKEN_INVALID = 2 +ERR_USER_INVALID = 3 +ERR_PASSWORD_INVALID = 4 +ERR_INTERNAL_ERROR = 5 +ERR_INVALID_ARGUMENTS = 6 +ERR_NO_SUCH_OBJECT = 7 +ERR_ORPHANED_OBJECT = 8 +ERR_SQL = 9 +ERR_MISCONFIGURED = 10 +ERR_UNCAUGHT = 11 +ERR_INVALID_METHOD = 12 +ERR_TASK = 13 +ERR_REG_TOKEN_INVALID = 14 +ERR_REG_TOKEN_EXHAUSTED = 15 +ERR_PUPPET_NODE_NOT_SIGNED = 16 + + + +class FuncException(exceptions.Exception): + error_code = ERR_INTERNAL_ERROR + + def __init__(self, **kwargs): + self.job_id = self.load(kwargs,"job_id") + self.stacktrace = self.load(kwargs,"stacktrace") + self.invalid_fields = self.load(kwargs,"invalid_fields") + self.data = self.load(kwargs,"data") + self.comment = self.load(kwargs,"comment") + self.tb_data = traceback.extract_stack() + exceptions.Exception.__init__(self) + + + def format(self): + msg = """ +Exception Name: %s +Exception Comment: %s +Exception Data: %s +Stack Trace: +%s""" % (self.__class__, self.comment, self.data, + string.join(traceback.format_list(self.tb_data))) + + return msg + + def ok(self): + return self.error_code == 0 + + def load(self,hash,key,default=None): + if hash.has_key(key): + return hash[key] + else: + return default + + def __get_additional_data(self): + data = {} + if not self.job_id is None: + data["job_id"] = self.job_id + if not self.stacktrace is None: + data["stacktrace"] = self.stacktrace + if not self.invalid_fields is None: + data["invalid_fields"] = self.invalid_fields + if not self.data is None: + data["data"] = self.data + if not self.comment is None: + data["comment"] = self.comment + return data + + def to_datastruct(self): + return (self.error_code, self.__get_additional_data()) + +#FIXME: hack +VirtFactoryException = FuncException + +class SuccessException(VirtFactoryException): + """ + Not an error / return success and data to caller. + """ + error_code = ERR_SUCCESS + +class TokenExpiredException(VirtFactoryException): + """ + The user token that was passed in has been logged out + due to inactivity. Call user_login again. + """ + error_code = ERR_TOKEN_EXPIRED + +class TokenInvalidException(VirtFactoryException): + """ + The user token doesn't exist, so this function call isn't + permitted. Call user_login to get a valid token. + """ + error_code = ERR_TOKEN_INVALID + +class RegTokenInvalidException(VirtFactoryException): + """ + The registration token doesn't exist, so this function call isn't + permitted. + """ + error_code = ERR_REG_TOKEN_INVALID + +class RegTokenExhaustedException(VirtFactoryException): + """ + The registration token that was passed in has been used + it allowed number of uses. + """ + error_code = ERR_REG_TOKEN_EXHAUSTED + +class UserInvalidException(VirtFactoryException): + """ + Can't log in this user since the user account doesn't + exist in the database. + """ + error_code = ERR_USER_INVALID + +class PasswordInvalidException(VirtFactoryException): + """ + Wrong password. Bzzzt. Try again. + """ + error_code = ERR_PASSWORD_INVALID + +class InternalErrorException(VirtFactoryException): + """ + FIXME: This is a generic error code, and if something is + throwing that error, it probably + should be changed to throw something more specific. + """ + error_code = ERR_INTERNAL_ERROR + +class InvalidArgumentsException(VirtFactoryException): + """ + The arguments passed in to this function failed to pass + validation. See additional_data for the + names of which arguments were rejected. + """ + error_code = ERR_INVALID_ARGUMENTS + +class NoSuchObjectException(VirtFactoryException): + """ + The id passed in doesn't refer to an object. + """ + error_code = ERR_NO_SUCH_OBJECT + +class InvalidMethodException(VirtFactoryException): + """The method called does not exist""" + error_code = ERR_INVALID_METHOD + +class UncaughtException(VirtFactoryException): + """ + The python code choked. additional_data contains the + stacktrace, and it's ok to give the stacktrace + since the user is already logged in. user_login shouldn't + give stacktraces for security reasons. + """ + error_code = ERR_UNCAUGHT + +class OrphanedObjectException(VirtFactoryException): + """ + A delete can't proceed because another object references + this one, or an add can't proceed because a + referenced object doesn't exist. + """ + error_code = ERR_ORPHANED_OBJECT + +class SQLException(VirtFactoryException): + """ + The code died inside a SQL call. This is probably + a sign that the validation prior to making + the call needs to be improved, or maybe SQL was just + more efficient (i.e. referential integrity). + """ + error_code = ERR_SQL + +class TaskException(VirtFactoryException): + """ + Something went wrong with the background task engine + """ + error_code = ERR_TASK + +class MisconfiguredException(VirtFactoryException): + """ + The virt-factory service isn't properly configured and + no calls can be processed until this is corrected on the + server side. The UI/WUI/etc is non-functional and should + display a splash screen telling the user to finish their + setup of the virt-factory service by running "vf_server init", edit + /var/lib/virt-factory/settings, and then run "vf_server import". + """ + error_code = ERR_MISCONFIGURED + + +class PuppetNodeNotSignedException(VirtFactoryException): + """ + The puppet node certificate could not be signed, either + because there was no matching certificate requrest or + due to another puppetca error. + """ + error_code = ERR_PUPPET_NODE_NOT_SIGNED + +def success(data=None,job_id=None): + """ + Shortcut around success exception that returns equivalent data + w/o raise. + """ + ret = SuccessException(data=data, job_id=job_id) + return ret + + +if __name__ == "__main__": + # run this module as main to generate a ruby compatible constants + # file. + module = sys.modules[__name__] + for x in sorted(module.__dict__.keys()): + obj = module.__dict__[x] + if (type(obj) == int or type(obj) == str) and not x.startswith("__"): + if type(obj) == int: + print "%s = %s" % (x, obj) + else: + print "%s = \"%s\"" % (x, obj) + + diff --git a/server/config_data.py b/server/config_data.py new file mode 100755 index 0000000..9ccca75 --- /dev/null +++ b/server/config_data.py @@ -0,0 +1,57 @@ +#!/usr/bin/python + +# Virt-factory backend code. +# +# Copyright 2006, Red Hat, Inc +# Michael DeHaan <mdehaan@redhat.com> +# Scott Seago <sseago@redhat.com> +# Adrian Likins <alikins@redhat.com> +# +# This software may be freely redistributed under the terms of the GNU +# general public license. + +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + + +from codes import * + +import os +import yaml + +CONFIG_FILE = "/etc/virt-factory/settings" + +# from the comments in http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66531 +#class Singleton(object): +# def __new__(type): +# if not '_the_instance' in type.__dict__: +# type._the_instance = object.__new__(type) +# return type._the_instance + + +class Config: + + # this class is a Borg + __shared_state = {} + has_read = False + + def __init__(self): + self.__dict__ = self.__shared_state + if not self.has_read: + self.read() + print "***** CONFIG RELOAD *****" + Config.has_read = True + + def read(self): + if not os.path.exists(CONFIG_FILE): + raise MisconfiguredException(comment="Missing %s" % CONFIG_FILE) + config_file = open(CONFIG_FILE) + data = config_file.read() + self.ds = yaml.load(data).next() + + + def get(self): + return self.ds + + diff --git a/server/logger.py b/server/logger.py new file mode 100755 index 0000000..0b9d791 --- /dev/null +++ b/server/logger.py @@ -0,0 +1,55 @@ +#!/usr/bin/python + +## Virt-factory backend code. +## +## Copyright 2006, Red Hat, Inc +## Michael DeHaan <mdehaan@redhat.com +## Adrian Likins <alikins@redhat.com +## +## This software may be freely redistributed under the terms of the GNU +## general public license. +## +## You should have received a copy of the GNU General Public License +## along with this program; if not, write to the Free Software +## Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +## +## + + +import logging +import config_data + + +# from the comments in http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66531 +class Singleton(object): + def __new__(type, *args, **kwargs): + if not '_the_instance' in type.__dict__: + type._the_instance = object.__new__(type, *args, **kwargs) + return type._the_instance + +# logging is weird, we don't want to setup multiple handlers +# so make sure we do that mess only once +class Logger(Singleton): + __no_handlers = True + def __init__(self, logfilepath ="/var/log/virt-factory/svclog"): + + self.config = config_data.Config().get() + if self.config.has_key("loglevel"): + self.loglevel = logging._levelNames[self.config["loglevel"]] + else: + self.loglevel = logging.INFO + self.__setup_logging() + if self.__no_handlers: + self.__setup_handlers(logfilepath=logfilepath) + + def __setup_logging(self): + self.logger = logging.getLogger("svc") + + def __setup_handlers(self, logfilepath="/var/log/virt-factory/svclog"): + handler = logging.FileHandler(logfilepath, "a") + self.logger.setLevel(self.loglevel) + formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s") + handler.setFormatter(formatter) + self.logger.addHandler(handler) + self.__no_handlers = False + diff --git a/server/module_loader.py b/server/module_loader.py new file mode 100755 index 0000000..10631fe --- /dev/null +++ b/server/module_loader.py @@ -0,0 +1,53 @@ +#!/usr/bin/python + + +import distutils.sysconfig +import os +import sys +import glob +from rhpl.translate import _, N_, textdomain, utf8 + +module_file_path="modules/" +mod_path="server/" +sys.path.insert(0, mod_path) + +def load_modules(module_path=module_file_path, blacklist=None): + filenames = glob.glob("%s/*.py" % module_file_path) + filenames = filenames + glob.glob("%s/*.pyc" % module_file_path) + filesnames = filenames + glob.glob("%s/*.pyo" % module_file_path) + + mods = {} + + print sys.path + + for fn in filenames: + basename = os.path.basename(fn) + if basename == "__init__.py": + continue + if basename[-3:] == ".py": + modname = basename[:-3] + elif basename[-4:] in [".pyc", ".pyo"]: + modname = basename[:-4] + + + try: + blip = __import__("modules.%s" % ( modname), globals(), locals(), [modname]) + if not hasattr(blip, "register_rpc"): + errmsg = _("%(module_path)s/%(modname)s module not a proper module") + print errmsg % {'module_path': module_path, 'modname':modname} + continue + mods[modname] = blip + except ImportError, e: + # shouldn't this be fatal? + print e + raise + + return mods + + + + +if __name__ == "__main__": + print load_modules(module_path) + + diff --git a/server/server.py b/server/server.py new file mode 100755 index 0000000..d297b06 --- /dev/null +++ b/server/server.py @@ -0,0 +1,215 @@ +#!/usr/bin/python +""" +Virt-factory backend code. + +Copyright 2006, Red Hat, Inc +Michael DeHaan <mdehaan@redhat.com> +Scott Seago <sseago@redhat.com> +Adrian Likins <alikins@redhat.com> + +This software may be freely redistributed under the terms of the GNU +general public license. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +""" + +import SimpleXMLRPCServer +import os +import subprocess +import socket + +SERVE_ON = (None,None) + +# FIXME: logrotate + +from codes import * + +import config_data +import logger +import module_loader +import utils + + +MODULE_PATH="modules/" +modules = module_loader.load_modules(MODULE_PATH) +print "modules", modules + + +#from busrpc.services import RPCDispatcher +#from busrpc.config import DeploymentConfig + +from rhpl.translate import _, N_, textdomain, utf8 +I18N_DOMAIN = "vf_server" + + +class Singleton(object): + def __new__(type, *args, **kwargs): + if not '_the_instance' in type.__dict__: + type._the_instance = object.__new__(type, *args, **kwargs) + type._the_instance.init(*args, **kwargs) + return type._the_instance + +class XmlRpcInterface(Singleton): + + def init(self): + """ + Constructor sets up SQLAlchemy (database ORM) and logging. + """ + config_obj = config_data.Config() + self.config = config_obj.get() + + self.tables = {} + self.tokens = [] + + self.logger = logger.Logger().logger + + self.__setup_handlers() + + def __setup_handlers(self): + """ + Add RPC functions from each class to the global list so they can be called. + FIXME: eventually calling most functions should go from here through getattr. + """ + self.handlers = {} + print "ffffffffffff", modules.keys() + for x in modules.keys(): + print "x", x + try: + modules[x].register_rpc(self.handlers) + self.logger.debug("adding %s" % x) + except AttributeError, e: + self.logger.warning("module %s could not be loaded, it did not have a register_rpc method" % modules[x]) + + + # FIXME: find some more elegant way to surface the handlers? + # FIXME: aforementioned login/session token requirement + + def get_dispatch_method(self, method): + if method in self.handlers: + return FuncApiMethod(self.logger, method, + self.handlers[method]) + + else: + self.logger.info("Unhandled method call for method: %s " % method) + raise InvalidMethodException + + def _dispatch(self, method, params): + """ + the SimpleXMLRPCServer class will call _dispatch if it doesn't + find a handler method + """ + return self.get_dispatch_method(method)(*params) + +class BusRpcWrapper: + + def __init__(self, config): + self.rpc_interface = None + + def __getattr__(self, name): + if self.rpc_interface == None: + self.rpc_interface = XmlRpcInterface() + return self.rpc_interface.get_dispatch_method(name) + + def __repr__(self): + return ("<BusRpcWrapper>") + +class FuncApiMethod: + def __init__(self, logger, name, method): + self.logger = logger + self.__method = method + self.__name = name + + def __log_exc(self): + """ + Log an exception. + """ + (t, v, tb) = sys.exc_info() + self.logger.info("Exception occured: %s" % t ) + self.logger.info("Exception value: %s" % v) + self.logger.info("Exception Info:\n%s" % string.join(traceback.format_list(traceback.extract_tb(tb)))) + + def __call__(self, *args): + self.logger.debug("(X) -------------------------------------------") + try: + rc = self.__method(*args) + except FuncException, e: + self.__log_exc() + rc = e + except: + self.logger.debug("Not a virt-factory specific exception") + self.__log_exc() + raise + rc = rc.to_datastruct() + self.logger.debug("Return code for %s: %s" % (self.__name, rc)) + return rc + + +def serve(websvc): + """ + Code for starting the XMLRPC service. + FIXME: make this HTTPS (see RRS code) and make accompanying Rails changes.. + """ + server =FuncXMLRPCServer(('', 51234)) + server.register_instance(websvc) + server.serve_forever() + +def serve_qpid(config_path, register_with_bridge=False, is_bridge_server=False): + """ + Code for starting the QPID RPC service. + """ + config = DeploymentConfig(config_path) + dispatcher = RPCDispatcher(config, register_with_bridge, is_bridge_server=is_bridge_server) + + try: + dispatcher.start() + except KeyboardInterrupt: + dispatcher.stop() + print "Exiting..." + +class FuncXMLRPCServer(SimpleXMLRPCServer.SimpleXMLRPCServer): + def __init__(self, args): + self.allow_reuse_address = True + SimpleXMLRPCServer.SimpleXMLRPCServer.__init__(self, args) + +def main(argv): + """ + Start things up. + """ + + websvc = XmlRpcInterface() + + for arg in sys.argv: + if arg == "import" or arg == "--import": + prov_obj = provisioning.Provisioning() + prov_obj.init(None, {}) + return + elif arg == "sync" or arg == "--sync": + prov_obj = provisioning.Provisioning() + prov_obj.sync(None, {}) # just for testing + return + if "qpid" in sys.argv or "--qpid" in sys.argv: + if "daemon" in sys.argv or "--daemon" in sys.argv: + utils.daemonize("/var/run/vf_server_qpid.pid") + else: + print "serving...\n" + serve_qpid("/etc/virt-factory/qpid.conf") + else: + if "daemon" in sys.argv or "--daemon" in sys.argv: + utils.daemonize("/var/run/vf_server.pid") + else: + print "serving...\n" + # daemonize only if --daemonize, because I forget to type "debug" -- MPD + serve(websvc) + +# FIXME: upgrades? database upgrade logic would be nice to have here, as would general creation (?) +# FIXME: command line way to add a distro would be nice to have in the future, rsync import is a bit heavy handed. +# (and might not be enough for RHEL, but is good for Fedora/Centos) + + +if __name__ == "__main__": + textdomain(I18N_DOMAIN) + main(sys.argv) + + diff --git a/server/utils.py b/server/utils.py new file mode 100755 index 0000000..552db54 --- /dev/null +++ b/server/utils.py @@ -0,0 +1,50 @@ +#!/usr/bin/python +""" +Virt-factory backend code. + +Copyright 2006, Red Hat, Inc +Michael DeHaan <mdehaan@redhat.com> +Scott Seago <sseago@redhat.com> +Adrian Likins <alikins@redhat.com> + +This software may be freely redistributed under the terms of the GNU +general public license. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. +""" + +import os +import string +import sys +import traceback + +# this is kind of handy, so keep it around for now +# but we really need to fix out server side logging and error +# reporting so we don't need it +def trace_me(): + x = traceback.extract_stack() + bar = string.join(traceback.format_list(x)) + return bar + + +def daemonize(pidfile=None): + """ + Daemonize this process with the UNIX double-fork trick. + Writes the new PID to the provided file name if not None. + """ + + print pidfile + pid = os.fork() + if pid > 0: + sys.exit(0) + os.setsid() + os.umask(0) + pid = os.fork() + + + if pid > 0: + if pidfile is not None: + open(pidfile, "w").write(str(pid)) + sys.exit(0) diff --git a/server/yaml/__init__.py b/server/yaml/__init__.py new file mode 100755 index 0000000..419d1f3 --- /dev/null +++ b/server/yaml/__init__.py @@ -0,0 +1,17 @@ +__version__ = "0.32"
+from load import loadFile, load, Parser, l
+from dump import dump, dumpToFile, Dumper, d
+from stream import YamlLoaderException, StringStream, FileStream
+from timestamp import timestamp
+import sys
+if sys.hexversion >= 0x02020000:
+ from redump import loadOrdered
+
+try:
+ from ypath import ypath
+except NameError:
+ def ypath(expr,target='',cntx=''):
+ raise NotImplementedError("ypath requires Python 2.2")
+
+if sys.hexversion < 0x02010000:
+ raise 'YAML is not tested for pre-2.1 versions of Python'
diff --git a/server/yaml/dump.py b/server/yaml/dump.py new file mode 100755 index 0000000..c55dbfe --- /dev/null +++ b/server/yaml/dump.py @@ -0,0 +1,296 @@ +import types
+import string
+from types import StringType, UnicodeType, IntType, FloatType
+from types import DictType, ListType, TupleType, InstanceType
+from klass import hasMethod, isDictionary
+import re
+
+"""
+ The methods from this module that are exported to the top
+ level yaml package should remain stable. If you call
+ directly into other methods of this module, be aware that
+ they may change or go away in future implementations.
+ Contact the authors if there are methods in this file
+ that you wish to remain stable.
+"""
+
+def dump(*data):
+ return Dumper().dump(*data)
+
+def d(data): return dump(data)
+
+def dumpToFile(file, *data):
+ return Dumper().dumpToFile(file, *data)
+
+class Dumper:
+ def __init__(self):
+ self.currIndent = "\n"
+ self.indent = " "
+ self.keysrt = None
+ self.alphaSort = 1 # legacy -- on by default
+
+ def setIndent(self, indent):
+ self.indent = indent
+ return self
+
+ def setSort(self, sort_hint):
+ self.keysrt = sortMethod(sort_hint)
+ return self
+
+ def dump(self, *data):
+ self.result = []
+ self.output = self.outputToString
+ self.dumpDocuments(data)
+ return string.join(self.result,"")
+
+ def outputToString(self, data):
+ self.result.append(data)
+
+ def dumpToFile(self, file, *data):
+ self.file = file
+ self.output = self.outputToFile
+ self.dumpDocuments(data)
+
+ def outputToFile(self, data):
+ self.file.write(data)
+
+ def dumpDocuments(self, data):
+ for obj in data:
+ self.anchors = YamlAnchors(obj)
+ self.output("---")
+ self.dumpData(obj)
+ self.output("\n")
+
+ def indentDump(self, data):
+ oldIndent = self.currIndent
+ self.currIndent += self.indent
+ self.dumpData(data)
+ self.currIndent = oldIndent
+
+ def dumpData(self, data):
+ anchor = self.anchors.shouldAnchor(data)
+ if anchor:
+ self.output(" &%d" % anchor )
+ else:
+ anchor = self.anchors.isAlias(data)
+ if anchor:
+ self.output(" *%d" % anchor )
+ return
+ if (data is None):
+ self.output(' ~')
+ elif hasMethod(data, 'to_yaml'):
+ self.dumpTransformedObject(data)
+ elif hasMethod(data, 'to_yaml_implicit'):
+ self.output(" " + data.to_yaml_implicit())
+ elif type(data) is InstanceType:
+ self.dumpRawObject(data)
+ elif isDictionary(data):
+ self.dumpDict(data)
+ elif type(data) in [ListType, TupleType]:
+ self.dumpList(data)
+ else:
+ self.dumpScalar(data)
+
+ def dumpTransformedObject(self, data):
+ obj_yaml = data.to_yaml()
+ if type(obj_yaml) is not TupleType:
+ self.raiseToYamlSyntaxError()
+ (data, typestring) = obj_yaml
+ if typestring:
+ self.output(" " + typestring)
+ self.dumpData(data)
+
+ def dumpRawObject(self, data):
+ self.output(' !!%s.%s' % (data.__module__, data.__class__.__name__))
+ self.dumpData(data.__dict__)
+
+ def dumpDict(self, data):
+ keys = data.keys()
+ if len(keys) == 0:
+ self.output(" {}")
+ return
+ if self.keysrt:
+ keys = sort_keys(keys,self.keysrt)
+ else:
+ if self.alphaSort:
+ keys.sort()
+ for key in keys:
+ self.output(self.currIndent)
+ self.dumpKey(key)
+ self.output(":")
+ self.indentDump(data[key])
+
+ def dumpKey(self, key):
+ if type(key) is TupleType:
+ self.output("?")
+ self.indentDump(key)
+ self.output("\n")
+ else:
+ self.output(quote(key))
+
+ def dumpList(self, data):
+ if len(data) == 0:
+ self.output(" []")
+ return
+ for item in data:
+ self.output(self.currIndent)
+ self.output("-")
+ self.indentDump(item)
+
+ def dumpScalar(self, data):
+ if isUnicode(data):
+ self.output(' "%s"' % repr(data)[2:-1])
+ elif isMulti(data):
+ self.dumpMultiLineScalar(data.splitlines())
+ else:
+ self.output(" ")
+ self.output(quote(data))
+
+ def dumpMultiLineScalar(self, lines):
+ self.output(" |")
+ if lines[-1] == "":
+ self.output("+")
+ for line in lines:
+ self.output(self.currIndent)
+ self.output(line)
+
+ def raiseToYamlSyntaxError(self):
+ raise """
+to_yaml should return tuple w/object to dump
+and optional YAML type. Example:
+({'foo': 'bar'}, '!!foobar')
+"""
+
+#### ANCHOR-RELATED METHODS
+
+def accumulate(obj,occur):
+ typ = type(obj)
+ if obj is None or \
+ typ is IntType or \
+ typ is FloatType or \
+ ((typ is StringType or typ is UnicodeType) \
+ and len(obj) < 32): return
+ obid = id(obj)
+ if 0 == occur.get(obid,0):
+ occur[obid] = 1
+ if typ is ListType:
+ for x in obj:
+ accumulate(x,occur)
+ if typ is DictType:
+ for (x,y) in obj.items():
+ accumulate(x,occur)
+ accumulate(y,occur)
+ else:
+ occur[obid] = occur[obid] + 1
+
+class YamlAnchors:
+ def __init__(self,data):
+ occur = {}
+ accumulate(data,occur)
+ anchorVisits = {}
+ for (obid, occur) in occur.items():
+ if occur > 1:
+ anchorVisits[obid] = 0
+ self._anchorVisits = anchorVisits
+ self._currentAliasIndex = 0
+ def shouldAnchor(self,obj):
+ ret = self._anchorVisits.get(id(obj),None)
+ if 0 == ret:
+ self._currentAliasIndex = self._currentAliasIndex + 1
+ ret = self._currentAliasIndex
+ self._anchorVisits[id(obj)] = ret
+ return ret
+ return 0
+ def isAlias(self,obj):
+ return self._anchorVisits.get(id(obj),0)
+
+### SORTING METHODS
+
+def sort_keys(keys,fn):
+ tmp = []
+ for key in keys:
+ val = fn(key)
+ if val is None: val = '~'
+ tmp.append((val,key))
+ tmp.sort()
+ return [ y for (x,y) in tmp ]
+
+def sortMethod(sort_hint):
+ typ = type(sort_hint)
+ if DictType == typ:
+ return sort_hint.get
+ elif ListType == typ or TupleType == typ:
+ indexes = {}; idx = 0
+ for item in sort_hint:
+ indexes[item] = idx
+ idx += 1
+ return indexes.get
+ else:
+ return sort_hint
+
+### STRING QUOTING AND SCALAR HANDLING
+def isStr(data):
+ # XXX 2.1 madness
+ if type(data) == type(''):
+ return 1
+ if type(data) == type(u''):
+ return 1
+ return 0
+
+def doubleUpQuotes(data):
+ return data.replace("'", "''")
+
+def quote(data):
+ if not isStr(data):
+ return str(data)
+ single = "'"
+ double = '"'
+ quote = ''
+ if len(data) == 0:
+ return "''"
+ if hasSpecialChar(data) or data[0] == single:
+ data = `data`[1:-1]
+ data = string.replace(data, r"\x08", r"\b")
+ quote = double
+ elif needsSingleQuote(data):
+ quote = single
+ data = doubleUpQuotes(data)
+ return "%s%s%s" % (quote, data, quote)
+
+def needsSingleQuote(data):
+ if re.match(r"^-?\d", data):
+ return 1
+ if re.match(r"\*\S", data):
+ return 1
+ if data[0] in ['&', ' ']:
+ return 1
+ if data[0] == '"':
+ return 1
+ if data[-1] == ' ':
+ return 1
+ return (re.search(r'[:]', data) or re.search(r'(\d\.){2}', data))
+
+def hasSpecialChar(data):
+ # need test to drive out '#' from this
+ return re.search(r'[\t\b\r\f#]', data)
+
+def isMulti(data):
+ if not isStr(data):
+ return 0
+ if hasSpecialChar(data):
+ return 0
+ return re.search("\n", data)
+
+def isUnicode(data):
+ return type(data) == unicode
+
+def sloppyIsUnicode(data):
+ # XXX - hack to make tests pass for 2.1
+ return repr(data)[:2] == "u'" and repr(data) != data
+
+import sys
+if sys.hexversion < 0x20200000:
+ isUnicode = sloppyIsUnicode
+
+
+
diff --git a/server/yaml/implicit.py b/server/yaml/implicit.py new file mode 100755 index 0000000..6172564 --- /dev/null +++ b/server/yaml/implicit.py @@ -0,0 +1,46 @@ +import re
+import string
+from timestamp import timestamp, matchTime
+
+DATETIME_REGEX = re.compile("^[0-9]{4}-[0-9]{2}-[0-9]{2}$")
+FLOAT_REGEX = re.compile("^[-+]?[0-9][0-9,]*\.[0-9]*$")
+SCIENTIFIC_REGEX = re.compile("^[-+]?[0-9]+(\.[0-9]*)?[eE][-+][0-9]+$")
+OCTAL_REGEX = re.compile("^[-+]?([0][0-7,]*)$")
+HEX_REGEX = re.compile("^[-+]?0x[0-9a-fA-F,]+$")
+INT_REGEX = re.compile("^[-+]?(0|[1-9][0-9,]*)$")
+
+def convertImplicit(val):
+ if val == '~':
+ return None
+ if val == '+':
+ return 1
+ if val == '-':
+ return 0
+ if val[0] == "'" and val[-1] == "'":
+ val = val[1:-1]
+ return string.replace(val, "''", "\'")
+ if val[0] == '"' and val[-1] == '"':
+ if re.search(r"\u", val):
+ val = "u" + val
+ unescapedStr = eval (val)
+ return unescapedStr
+ if matchTime.match(val):
+ return timestamp(val)
+ if INT_REGEX.match(val):
+ return int(cleanseNumber(val))
+ if OCTAL_REGEX.match(val):
+ return int(val, 8)
+ if HEX_REGEX.match(val):
+ return int(val, 16)
+ if FLOAT_REGEX.match(val):
+ return float(cleanseNumber(val))
+ if SCIENTIFIC_REGEX.match(val):
+ return float(cleanseNumber(val))
+ return val
+
+def cleanseNumber(str):
+ if str[0] == '+':
+ str = str[1:]
+ str = string.replace(str,',','')
+ return str
+
diff --git a/server/yaml/inline.py b/server/yaml/inline.py new file mode 100755 index 0000000..8e647de --- /dev/null +++ b/server/yaml/inline.py @@ -0,0 +1,38 @@ +import re
+import string
+
+class InlineTokenizer:
+ def __init__(self, data):
+ self.data = data
+
+ def punctuation(self):
+ puncts = [ '[', ']', '{', '}' ]
+ for punct in puncts:
+ if self.data[0] == punct:
+ self.data = self.data[1:]
+ return punct
+
+ def up_to_comma(self):
+ match = re.match('(.*?)\s*, (.*)', self.data)
+ if match:
+ self.data = match.groups()[1]
+ return match.groups()[0]
+
+ def up_to_end_brace(self):
+ match = re.match('(.*?)(\s*[\]}].*)', self.data)
+ if match:
+ self.data = match.groups()[1]
+ return match.groups()[0]
+
+ def next(self):
+ self.data = string.strip(self.data)
+ productions = [
+ self.punctuation,
+ self.up_to_comma,
+ self.up_to_end_brace
+ ]
+ for production in productions:
+ token = production()
+ if token:
+ return token
+
diff --git a/server/yaml/klass.py b/server/yaml/klass.py new file mode 100755 index 0000000..edcf5a8 --- /dev/null +++ b/server/yaml/klass.py @@ -0,0 +1,48 @@ +import new
+import re
+
+class DefaultResolver:
+ def resolveType(self, data, typestring):
+ match = re.match('!!(.*?)\.(.*)', typestring)
+ if not match:
+ raise "Invalid private type specifier"
+ (modname, classname) = match.groups()
+ return makeClass(modname, classname, data)
+
+def makeClass(module, classname, dict):
+ exec('import %s' % (module))
+ klass = eval('%s.%s' % (module, classname))
+ obj = new.instance(klass)
+ if hasMethod(obj, 'from_yaml'):
+ return obj.from_yaml(dict)
+ obj.__dict__ = dict
+ return obj
+
+def hasMethod(object, method_name):
+ try:
+ klass = object.__class__
+ except:
+ return 0
+ if not hasattr(klass, method_name):
+ return 0
+ method = getattr(klass, method_name)
+ if not callable(method):
+ return 0
+ return 1
+
+def isDictionary(data):
+ return isinstance(data, dict)
+
+try:
+ isDictionary({})
+except:
+ def isDictionary(data): return type(data) == type({}) # XXX python 2.1
+
+if __name__ == '__main__':
+ print isDictionary({'foo': 'bar'})
+ try:
+ print isDictionary(dict())
+ from ordered_dict import OrderedDict
+ print isDictionary(OrderedDict())
+ except:
+ pass
diff --git a/server/yaml/load.py b/server/yaml/load.py new file mode 100755 index 0000000..259178d --- /dev/null +++ b/server/yaml/load.py @@ -0,0 +1,327 @@ +import re, string
+from implicit import convertImplicit
+from inline import InlineTokenizer
+from klass import DefaultResolver
+from stream import YamlLoaderException, FileStream, StringStream, NestedDocs
+
+try:
+ iter(list()) # is iter supported by this version of Python?
+except:
+ # XXX - Python 2.1 does not support iterators
+ class StopIteration: pass
+ class iter:
+ def __init__(self,parser):
+ self._docs = []
+ try:
+ while 1:
+ self._docs.append(parser.next())
+ except StopIteration: pass
+ self._idx = 0
+ def __len__(self): return len(self._docs)
+ def __getitem__(self,idx): return self._docs[idx]
+ def next(self):
+ if self._idx < len(self._docs):
+ ret = self._docs[self._idx]
+ self._idx = self._idx + 1
+ return ret
+ raise StopIteration
+
+def loadFile(filename, typeResolver=None):
+ return loadStream(FileStream(filename),typeResolver)
+
+def load(str, typeResolver=None):
+ return loadStream(StringStream(str), typeResolver)
+
+def l(str): return load(str).next()
+
+def loadStream(stream, typeResolver):
+ return iter(Parser(stream, typeResolver))
+
+def tryProductions(productions, value):
+ for production in productions:
+ results = production(value)
+ if results:
+ (ok, result) = results
+ if ok:
+ return (1, result)
+
+def dumpDictionary(): return {}
+
+class Parser:
+ def __init__(self, stream, typeResolver=None):
+ try:
+ self.dictionary = dict
+ except:
+ self.dictionary = dumpDictionary
+ self.nestedDocs = NestedDocs(stream)
+ self.aliases = {}
+ if typeResolver:
+ self.typeResolver = typeResolver
+ else:
+ self.typeResolver = DefaultResolver()
+
+ def error(self, msg):
+ self.nestedDocs.error(msg, self.line)
+
+ def nestPop(self):
+ line = self.nestedDocs.pop()
+ if line is not None:
+ self.line = line
+ return 1
+
+ def value(self, indicator):
+ return getToken(indicator+"\s*(.*)", self.line)
+
+ def getNextDocument(self): raise "getNextDocument() deprecated--use next()"
+
+ def next(self):
+ line = self.nestedDocs.popDocSep()
+ indicator = getIndicator(line)
+ if indicator:
+ return self.parse_value(indicator)
+ if line:
+ self.nestedDocs.nestToNextLine()
+ return self.parseLines()
+ raise StopIteration
+
+ def __iter__(self): return self
+
+ def parseLines(self):
+ peekLine = self.nestedDocs.peek()
+ if peekLine:
+ if re.match("\s*-", peekLine):
+ return self.parse_collection([], self.parse_seq_line)
+ else:
+ return self.parse_collection(self.dictionary(), self.parse_map_line)
+ raise StopIteration
+
+ def parse_collection(self, items, lineParser):
+ while self.nestPop():
+ if self.line:
+ lineParser(items)
+ return items
+
+ def parse_seq_line(self, items):
+ value = self.value("-")
+ if value is not None:
+ items.append(self.parse_seq_value(value))
+ else:
+ self.error("missing '-' for seq")
+
+ def parse_map_line(self, items):
+ if (self.line == '?'):
+ self.parse_map_line_nested(items)
+ else:
+ self.parse_map_line_simple(items, self.line)
+
+ def parse_map_line_nested(self, items):
+ self.nestedDocs.nestToNextLine()
+ key = self.parseLines()
+ if self.nestPop():
+ value = self.value(':')
+ if value is not None:
+ items[tuple(key)] = self.parse_value(value)
+ return
+ self.error("key has no value for nested map")
+
+ def parse_map_line_simple(self, items, line):
+ map_item = self.key_value(line)
+ if map_item:
+ (key, value) = map_item
+ key = convertImplicit(key)
+ if items.has_key(key):
+ self.error("Duplicate key "+key)
+ items[key] = self.parse_value(value)
+ else:
+ self.error("bad key for map")
+
+ def is_map(self, value):
+ # XXX - need real tokenizer
+ if len(value) == 0:
+ return 0
+ if value[0] == "'":
+ return 0
+ if re.search(':(\s|$)', value):
+ return 1
+
+ def parse_seq_value(self, value):
+ if self.is_map(value):
+ return self.parse_compressed_map(value)
+ else:
+ return self.parse_value(value)
+
+ def parse_compressed_map(self, value):
+ items = self.dictionary()
+ line = self.line
+ token = getToken("(\s*-\s*)", line)
+ self.nestedDocs.nestBySpecificAmount(len(token))
+ self.parse_map_line_simple(items, value)
+ return self.parse_collection(items, self.parse_map_line)
+
+ def parse_value(self, value):
+ (alias, value) = self.testForRepeatOfAlias(value)
+ if alias:
+ return value
+ (alias, value) = self.testForAlias(value)
+ value = self.parse_unaliased_value(value)
+ if alias:
+ self.aliases[alias] = value
+ return value
+
+ def parse_unaliased_value(self, value):
+ match = re.match(r"(!\S*)(.*)", value)
+ if match:
+ (url, value) = match.groups()
+ value = self.parse_untyped_value(value)
+ if url[:2] == '!!':
+ return self.typeResolver.resolveType(value, url)
+ else:
+ # XXX - allows syntax, but ignores it
+ return value
+ return self.parse_untyped_value(value)
+
+ def parseInlineArray(self, value):
+ if re.match("\s*\[", value):
+ return self.parseInline([], value, ']',
+ self.parseInlineArrayItem)
+
+ def parseInlineHash(self, value):
+ if re.match("\s*{", value):
+ return self.parseInline(self.dictionary(), value, '}',
+ self.parseInlineHashItem)
+
+ def parseInlineArrayItem(self, result, token):
+ return result.append(convertImplicit(token))
+
+ def parseInlineHashItem(self, result, token):
+ (key, value) = self.key_value(token)
+ result[key] = value
+
+ def parseInline(self, result, value, end_marker, itemMethod):
+ tokenizer = InlineTokenizer(value)
+ tokenizer.next()
+ while 1:
+ token = tokenizer.next()
+ if token == end_marker:
+ break
+ itemMethod(result, token)
+ return (1, result)
+
+ def parseSpecial(self, value):
+ productions = [
+ self.parseMultiLineScalar,
+ self.parseInlineHash,
+ self.parseInlineArray,
+ ]
+ return tryProductions(productions, value)
+
+ def parse_untyped_value(self, value):
+ parse = self.parseSpecial(value)
+ if parse:
+ (ok, data) = parse
+ return data
+ token = getToken("(\S.*)", value)
+ if token:
+ lines = [token] + \
+ pruneTrailingEmpties(self.nestedDocs.popNestedLines())
+ return convertImplicit(joinLines(lines))
+ else:
+ self.nestedDocs.nestToNextLine()
+ return self.parseLines()
+
+ def parseNative(self, value):
+ return (1, convertImplicit(value))
+
+ def parseMultiLineScalar(self, value):
+ if value == '>':
+ return (1, self.parseFolded())
+ elif value == '|':
+ return (1, joinLiteral(self.parseBlock()))
+ elif value == '|+':
+ return (1, joinLiteral(self.unprunedBlock()))
+
+ def parseFolded(self):
+ data = self.parseBlock()
+ i = 0
+ resultString = ''
+ while i < len(data)-1:
+ resultString = resultString + data[i]
+ resultString = resultString + foldChar(data[i], data[i+1])
+ i = i + 1
+ return resultString + data[-1] + "\n"
+
+ def unprunedBlock(self):
+ self.nestedDocs.nestToNextLine()
+ data = []
+ while self.nestPop():
+ data.append(self.line)
+ return data
+
+ def parseBlock(self):
+ return pruneTrailingEmpties(self.unprunedBlock())
+
+ def testForAlias(self, value):
+ match = re.match("&(\S*)\s*(.*)", value)
+ if match:
+ return match.groups()
+ return (None, value)
+
+ def testForRepeatOfAlias(self, value):
+ match = re.match("\*(\S+)", value)
+ if match:
+ alias = match.groups()[0]
+ if self.aliases.has_key(alias):
+ return (alias, self.aliases[alias])
+ else:
+ self.error("Unknown alias")
+ return (None, value)
+
+ def key_value(self, str):
+ if str[-1] == ' ':
+ self.error("Trailing spaces not allowed without quotes.")
+ # XXX This allows mis-balanced " vs. ' stuff
+ match = re.match("[\"'](.+)[\"']\s*:\s*(.*)", str)
+ if match:
+ (key, value) = match.groups()
+ return (key, value)
+ match = re.match("(.+?)\s*:\s*(.*)", str)
+ if match:
+ (key, value) = match.groups()
+ if len(value) and value[0] == '#':
+ value = ''
+ return (key, value)
+
+def getToken(regex, value):
+ match = re.search(regex, value)
+ if match:
+ return match.groups()[0]
+
+def pruneTrailingEmpties(data):
+ while len(data) > 0 and data[-1] == '':
+ data = data[:-1]
+ return data
+
+def foldChar(line1, line2):
+ if re.match("^\S", line1) and re.match("^\S", line2):
+ return " "
+ return "\n"
+
+def getIndicator(line):
+ if line:
+ header = r"(#YAML:\d+\.\d+\s*){0,1}"
+ match = re.match("--- "+header+"(\S*.*)", line)
+ if match:
+ return match.groups()[-1]
+
+def joinLines(lines):
+ result = ''
+ for line in lines[:-1]:
+ if line[-1] == '\\':
+ result = result + line[:-1]
+ else:
+ result = result + line + " "
+ return result + lines[-1]
+
+def joinLiteral(data):
+ return string.join(data,"\n") + "\n"
+
diff --git a/server/yaml/ordered_dict.py b/server/yaml/ordered_dict.py new file mode 100755 index 0000000..b3788b7 --- /dev/null +++ b/server/yaml/ordered_dict.py @@ -0,0 +1,31 @@ +# This is extremely crude implementation of an OrderedDict. +# If you know of a better implementation, please send it to +# the author Steve Howell. You can find my email via +# the YAML mailing list or wiki. + +class OrderedDict(dict): + def __init__(self): + self._keys = [] + + def __setitem__(self, key, val): + self._keys.append(key) + dict.__setitem__(self, key, val) + + def keys(self): + return self._keys + + def items(self): + return [(key, self[key]) for key in self._keys] + +if __name__ == '__main__': + data = OrderedDict() + data['z'] = 26 + data['m'] = 13 + data['a'] = 1 + for key in data.keys(): + print "The value for %s is %s" % (key, data[key]) + print data + + + + diff --git a/server/yaml/redump.py b/server/yaml/redump.py new file mode 100755 index 0000000..56ea958 --- /dev/null +++ b/server/yaml/redump.py @@ -0,0 +1,16 @@ +from ordered_dict import OrderedDict +from load import Parser +from dump import Dumper +from stream import StringStream + +def loadOrdered(stream): + parser = Parser(StringStream(stream)) + parser.dictionary = OrderedDict + return iter(parser) + +def redump(stream): + docs = list(loadOrdered(stream)) + dumper = Dumper() + dumper.alphaSort = 0 + return dumper.dump(*docs) + diff --git a/server/yaml/stream.py b/server/yaml/stream.py new file mode 100755 index 0000000..cc78c4b --- /dev/null +++ b/server/yaml/stream.py @@ -0,0 +1,193 @@ +import re
+import string
+
+def indentLevel(line):
+ n = 0
+ while n < len(line) and line[n] == ' ':
+ n = n + 1
+ return n
+
+class LineNumberStream:
+ def __init__(self, filename=None):
+ self.curLine = 0
+ self.filename = filename
+
+ def get(self):
+ line = self.getLine()
+ self.curLine += 1 # used by subclass
+ if line:
+ line = noLineFeed(line)
+ return line
+
+ def lastLineRead(self):
+ return self.curLine
+
+class FileStream(LineNumberStream):
+ def __init__(self, filename):
+ self.fp = open(filename)
+ LineNumberStream.__init__(self, filename)
+
+ def getLine(self):
+ line = self.fp.readline()
+ if line == '': line = None
+ return line
+
+class StringStream(LineNumberStream):
+ def __init__(self, text):
+ self.lines = split(text)
+ self.numLines = len(self.lines)
+ LineNumberStream.__init__(self)
+
+ def getLine(self):
+ if self.curLine < self.numLines:
+ return self.lines[self.curLine]
+
+def split(text):
+ lines = string.split(text, '\n')
+ if lines[-1] == '':
+ lines.pop()
+ return lines
+
+def eatNewLines(stream):
+ while 1:
+ line = stream.get()
+ if line is None or len(string.strip(line)):
+ return line
+
+COMMENT_LINE_REGEX = re.compile(R"\s*#")
+def isComment(line):
+ return line is not None and COMMENT_LINE_REGEX.match(line)
+
+class CommentEater:
+ def __init__(self, stream):
+ self.stream = stream
+ self.peeked = 1
+ self.line = eatNewLines(stream)
+ self.eatComments()
+
+ def eatComments(self):
+ while isComment(self.line):
+ self.line = self.stream.get()
+
+ def peek(self):
+ if self.peeked:
+ return self.line
+ self.peeked = 1
+ self.line = self.stream.get()
+ self.eatComments()
+ return self.line
+
+ def lastLineRead(self):
+ return self.stream.lastLineRead()
+
+ def pop(self):
+ data = self.peek()
+ self.peeked = 0
+ return data
+
+class NestedText:
+ def __init__(self, stream):
+ self.commentEater = CommentEater(stream)
+ self.reset()
+
+ def lastLineRead(self):
+ return self.commentEater.lastLineRead()
+
+ def reset(self):
+ self.indentLevel = 0
+ self.oldIndents = [0]
+
+ def peek(self):
+ nextLine = self.commentEater.peek()
+ if nextLine is not None:
+ if indentLevel(nextLine) >= self.indentLevel:
+ return nextLine[self.indentLevel:]
+ elif nextLine == '':
+ return ''
+
+ def pop(self):
+ line = self.peek()
+ if line is None:
+ self.indentLevel = self.oldIndents.pop()
+ return
+ self.commentEater.pop()
+ return line
+
+ def popNestedLines(self):
+ nextLine = self.peek()
+ if nextLine is None or nextLine == '' or nextLine[0] != ' ':
+ return []
+ self.nestToNextLine()
+ lines = []
+ while 1:
+ line = self.pop()
+ if line is None:
+ break
+ lines.append(line)
+ return lines
+
+ def nestToNextLine(self):
+ line = self.commentEater.peek()
+ indentation = indentLevel(line)
+ if len(self.oldIndents) > 1 and indentation <= self.indentLevel:
+ self.error("Inadequate indentation", line)
+ self.setNewIndent(indentation)
+
+ def nestBySpecificAmount(self, adjust):
+ self.setNewIndent(self.indentLevel + adjust)
+
+ def setNewIndent(self, indentLevel):
+ self.oldIndents.append(self.indentLevel)
+ self.indentLevel = indentLevel
+
+class YamlLoaderException(Exception):
+ def __init__(self, *args):
+ (self.msg, self.lineNum, self.line, self.filename) = args
+
+ def __str__(self):
+ msg = """\
+%(msg)s:
+near line %(lineNum)d:
+%(line)s
+""" % self.__dict__
+ if self.filename:
+ msg += "file: " + self.filename
+ return msg
+
+class NestedDocs(NestedText):
+ def __init__(self, stream):
+ self.filename = stream.filename
+ NestedText.__init__(self,stream)
+ line = NestedText.peek(self)
+ self.sep = '---'
+ if self.startsWithSep(line):
+ self.eatenDocSep = NestedText.pop(self)
+ else:
+ self.eatenDocSep = self.sep
+
+ def startsWithSep(self,line):
+ if line and self.sep == line[:3]: return 1
+ return 0
+
+ def popDocSep(self):
+ line = self.eatenDocSep
+ self.eatenDocSep = None
+ self.reset()
+ return line
+
+ def pop(self):
+ if self.eatenDocSep is not None:
+ raise "error"
+ line = self.commentEater.peek()
+ if line and self.startsWithSep(line):
+ self.eatenDocSep = NestedText.pop(self)
+ return None
+ return NestedText.pop(self)
+
+ def error(self, msg, line):
+ raise YamlLoaderException(msg, self.lastLineRead(), line, self.filename)
+
+def noLineFeed(s):
+ while s[-1:] in ('\n', '\r'):
+ s = s[:-1]
+ return s
diff --git a/server/yaml/timestamp.py b/server/yaml/timestamp.py new file mode 100755 index 0000000..abcb2e6 --- /dev/null +++ b/server/yaml/timestamp.py @@ -0,0 +1,145 @@ +import time, re, string +from types import ListType, TupleType + +PRIVATE_NOTICE = """ + This module is considered to be private implementation + details and is subject to change. Please only use the + objects and methods exported to the top level yaml package. +""" + +# +# Time specific operations +# + +_splitTime = re.compile('\-|\s|T|t|:|\.|Z') +matchTime = re.compile(\ + '\d+-\d+-\d+([\s|T|t]\d+:\d+:\d+.\d+(Z|(\s?[\-|\+]\d+:\d+)))?') + +def _parseTime(val): + if not matchTime.match(val): raise ValueError(val) + tpl = _splitTime.split(val) + if not(tpl): raise ValueError(val) + siz = len(tpl) + sec = 0 + if 3 == siz: + tpl += [0,0,0,0,0,-1] + elif 7 == siz: + tpl.append(0) + tpl.append(-1) + elif 8 == siz: + if len(tpl.pop()) > 0: raise ValueError(val) + tpl.append(0) + tpl.append(-1) + elif 9 == siz or 10 == siz: + mn = int(tpl.pop()) + hr = int(tpl.pop()) + sec = (hr*60+mn)*60 + if val.find("+") > -1: sec = -sec + if 10 == siz: tpl.pop() + tpl.append(0) + tpl.append(-1) + else: + raise ValueError(val) + idx = 0 + while idx < 9: + tpl[idx] = int(tpl[idx]) + idx += 1 + if tpl[1] < 1 or tpl[1] > 12: raise ValueError(val) + if tpl[2] < 1 or tpl[2] > 31: raise ValueError(val) + if tpl[3] > 24: raise ValueError(val) + if tpl[4] > 61: raise ValueError(val) + if tpl[5] > 61: raise ValueError(val) + if tpl[0] > 2038: + #TODO: Truncation warning + tpl = (2038,1,18,0,0,0,0,0,-1) + tpl = tuple(tpl) + ret = time.mktime(tpl) + ret = time.localtime(ret+sec) + ret = ret[:8] + (0,) + return ret + + +class _timestamp: + def __init__(self,val=None): + if not val: + self.__tval = time.gmtime() + else: + typ = type(val) + if ListType == typ: + self.__tval = tuple(val) + elif TupleType == typ: + self.__tval = val + else: + self.__tval = _parseTime(val) + if 9 != len(self.__tval): raise ValueError + def __getitem__(self,idx): return self.__tval[idx] + def __len__(self): return 9 + def strftime(self,format): return time.strftime(format,self.__tval) + def mktime(self): return time.mktime(self.__tval) + def asctime(self): return time.asctime(self.__tval) + def isotime(self): + return "%04d-%02d-%02dT%02d:%02d:%02d.00Z" % self.__tval[:6] + def __repr__(self): return "yaml.timestamp('%s')" % self.isotime() + def __str__(self): return self.isotime() + def to_yaml_implicit(self): return self.isotime() + def __hash__(self): return hash(self.__tval[:6]) + def __cmp__(self,other): + try: + return cmp(self.__tval[:6],other.__tval[:6]) + except AttributeError: + return -1 + +try: # inherit from mx.DateTime functionality if available + from mx import DateTime + class timestamp(_timestamp): + def __init__(self,val=None): + _timestamp.__init__(self,val) + self.__mxdt = DateTime.mktime(self.__tval) + def __getattr__(self, name): + return getattr(self.__mxdt, name) +except: + class timestamp(_timestamp): pass + + + +def unquote(expr): + """ + summary: > + Simply returns the unquoted string, and the + length of the quoted string token at the + beginning of the expression. + """ + tok = expr[0] + if "'" == tok: + idx = 1 + odd = 0 + ret = "" + while idx < len(expr): + chr = expr[idx] + if "'" == chr: + if odd: ret += chr + odd = not odd + else: + if odd: + tok = expr[:idx] + break + ret += chr + idx += 1 + if "'" == tok: tok = expr + return (ret,len(tok)) + if '"' == tok: + idx = 1 + esc = 0 + while idx < len(expr): + chr = expr[idx] + if '"' == chr and not esc: + tok = expr[:idx] + '"' + break + if '\\' == chr and not esc: esc = 1 + else: esc = 0 + idx += 1 + if '"' == tok: + raise SyntaxError("unmatched quote: " + expr) + ret = eval(tok) #TODO: find better way to unquote + return (ret,len(tok)) + return (expr,len(expr)) diff --git a/server/yaml/ypath.py b/server/yaml/ypath.py new file mode 100755 index 0000000..51d9d2f --- /dev/null +++ b/server/yaml/ypath.py @@ -0,0 +1,462 @@ +from types import ListType, StringType, IntType, DictType, InstanceType
+import re
+from urllib import quote
+from timestamp import unquote
+
+noTarget = object()
+
+def escape(node):
+ """
+ summary: >
+ This function escapes a given key so that it
+ may appear within a ypath. URI style escaping
+ is used so that ypath expressions can be a
+ valid URI expression.
+ """
+ typ = type(node)
+ if typ is IntType: return str(node)
+ if typ is StringType:
+ return quote(node,'')
+ raise ValueError("TODO: Support more than just string and integer keys.")
+
+class context:
+ """
+ summary: >
+ A ypath visit context through a YAML rooted graph.
+ This is implemented as a 3-tuple including the parent
+ node, the current key/index and the value. This is
+ an immutable object so it can be cached.
+ properties:
+ key: mapping key or index within the parent collection
+ value: current value within the parent's range
+ parent: the parent context
+ root: the very top of the yaml graph
+ path: a tuple of the domain keys
+ notes: >
+ The context class doesn't yet handle going down the
+ domain side of the tree...
+ """
+ def __init__(self,parent,key,value):
+ """
+ args:
+ parent: parent context (or None if this is the root)
+ key: mapping key or index for this context
+ value: value of current location...
+ """
+ self.parent = parent
+ self.key = key
+ self.value = value
+ if parent:
+ assert parent.__class__ is self.__class__
+ self.path = parent.path + (escape(key),)
+ self.root = parent.root
+ else:
+ assert not key
+ self.path = tuple()
+ self.root = self
+ def __setattr__(self,attname,attval):
+ if attname in ('parent','key','value'):
+ if self.__dict__.get(attname):
+ raise ValueError("context is read-only")
+ self.__dict__[attname] = attval
+ def __hash__(self): return hash(self.path)
+ def __cmp__(self,other):
+ try:
+ return cmp(self.path,other.path)
+ except AttributeError:
+ return -1
+ def __str__(self):
+ if self.path:
+ return "/".join(('',)+self.path)
+ else:
+ return '/'
+
+def to_context(target):
+ if type(target) is InstanceType:
+ if target.__class__ is context:
+ return target
+ return context(None,None,target)
+
+def context_test():
+ lst = ['value']
+ map = {'key':lst}
+ x = context(None,None,map)
+ y = context(x,'key',lst)
+ z = context(y,0,'value')
+ assert ('key',) == y.path
+ assert 'key' == y.key
+ assert lst == y.value
+ assert x == y.parent
+ assert x == y.root
+ assert 0 == z.key
+ assert 'value' == z.value
+ assert y == z.parent
+ assert x == z.root
+ assert hash(x)
+ assert hash(y)
+ assert hash(z)
+ assert '/' == str(x)
+ assert '/key' == str(y)
+ assert '/key/0' == str(z)
+
+class null_seg:
+ """
+ summary: >
+ This is the simplest path segment, it
+ doesn't return any results and doesn't
+ depend upon its context. It also happens to
+ be the base class which all segments derive.
+ """
+ def __iter__(self):
+ return self
+ def next_null(self):
+ raise StopIteration
+ def bind(self,cntx):
+ """
+ summary: >
+ The bind function is called whenever
+ the parent context has changed.
+ """
+ assert(cntx.__class__ is context)
+ self.cntx = cntx
+ def apply(self,target):
+ self.bind(to_context(target))
+ return iter(self)
+ def exists(self,cntx):
+ try:
+ self.bind(cntx)
+ self.next()
+ return 1
+ except StopIteration:
+ return 0
+ next = next_null
+
+class self_seg(null_seg):
+ """
+ summary: >
+ This path segment returns the context
+ node exactly once.
+ """
+ def __str__(self): return '.'
+ def next_self(self):
+ self.next = self.next_null
+ return self.cntx
+ def bind(self,cntx):
+ null_seg.bind(self,cntx)
+ self.next = self.next_self
+
+class root_seg(self_seg):
+ def __str__(self): return '/'
+ def bind(self,cntx):
+ self_seg.bind(self,cntx.root)
+
+class parent_seg(self_seg):
+ def __str__(self): return '..'
+ def bind(self,cntx):
+ if cntx.parent: cntx = cntx.parent
+ self_seg.bind(self,cntx)
+
+class wild_seg(null_seg):
+ """
+ summary: >
+ The wild segment simply loops through
+ all of the sub-contexts for a given object.
+ If there aren't any children, this isn't an
+ error it just doesn't return anything.
+ """
+ def __str__(self): return '*'
+ def next_wild(self):
+ key = self.keys.next()
+ return context(self.cntx,key,self.values[key])
+ def bind(self,cntx):
+ null_seg.bind(self,cntx)
+ typ = type(cntx.value)
+ if typ is ListType:
+ self.keys = iter(xrange(0,len(cntx.value)))
+ self.values = cntx.value
+ self.next = self.next_wild
+ return
+ if typ is DictType:
+ self.keys = iter(cntx.value)
+ self.values = cntx.value
+ self.next = self.next_wild
+ return
+ self.next = self.next_null
+
+class trav_seg(null_seg):
+ """
+ summary: >
+ This is a recursive traversal of the range, preorder.
+ It is a recursive combination of self and wild.
+ """
+ def __str__(self): return '/'
+ def next(self):
+ while 1:
+ (cntx,seg) = self.stk[-1]
+ if not seg:
+ seg = wild_seg()
+ seg.bind(cntx)
+ self.stk[-1] = (cntx,seg)
+ return cntx
+ try:
+ cntx = seg.next()
+ self.stk.append((cntx,None))
+ except StopIteration:
+ self.stk.pop()
+ if not(self.stk):
+ self.next = self.next_null
+ raise StopIteration
+
+ def bind(self,cntx):
+ null_seg.bind(self,cntx)
+ self.stk = [(cntx,None)]
+
+class match_seg(self_seg):
+ """
+ summary: >
+ Matches a particular key within the
+ current context. Kinda boring.
+ """
+ def __str__(self): return str(self.key)
+ def __init__(self,key):
+ #TODO: Do better implicit typing
+ try:
+ key = int(key)
+ except: pass
+ self.key = key
+ def bind(self,cntx):
+ try:
+ mtch = cntx.value[self.key]
+ cntx = context(cntx,self.key,mtch)
+ self_seg.bind(self,cntx)
+ except:
+ null_seg.bind(self,cntx)
+
+class conn_seg(null_seg):
+ """
+ summary: >
+ When two segments are connected via a slash,
+ this is a composite. For each context of the
+ parent, it binds the child, and returns each
+ context of the child.
+ """
+ def __str__(self):
+ if self.parent.__class__ == root_seg:
+ return "/%s" % self.child
+ return "%s/%s" % (self.parent, self.child)
+ def __init__(self,parent,child):
+ self.parent = parent
+ self.child = child
+ def next(self):
+ while 1:
+ try:
+ return self.child.next()
+ except StopIteration:
+ cntx = self.parent.next()
+ self.child.bind(cntx)
+
+ def bind(self,cntx):
+ null_seg.bind(self,cntx)
+ self.parent.bind(cntx)
+ try:
+ cntx = self.parent.next()
+ except StopIteration:
+ return
+ self.child.bind(cntx)
+
+
+class pred_seg(null_seg):
+ def __str__(self): return "%s[%s]" % (self.parent, self.filter)
+ def __init__(self,parent,filter):
+ self.parent = parent
+ self.filter = filter
+ def next(self):
+ while 1:
+ ret = self.parent.next()
+ if self.filter.exists(ret):
+ return ret
+ def bind(self,cntx):
+ null_seg.bind(self,cntx)
+ self.parent.bind(cntx)
+
+class or_seg(null_seg):
+ def __str__(self): return "%s|%s" % (self.lhs,self.rhs)
+ def __init__(self,lhs,rhs):
+ self.rhs = rhs
+ self.lhs = lhs
+ self.unq = {}
+ def next(self):
+ seg = self.lhs
+ try:
+ nxt = seg.next()
+ self.unq[nxt] = nxt
+ return nxt
+ except StopIteration: pass
+ seg = self.rhs
+ while 1:
+ nxt = seg.next()
+ if self.unq.get(nxt,None):
+ continue
+ return nxt
+ def bind(self,cntx):
+ null_seg.bind(self,cntx)
+ self.lhs.bind(cntx)
+ self.rhs.bind(cntx)
+
+class scalar:
+ def __init__(self,val):
+ self.val = val
+ def __str__(self):
+ return str(self.val)
+ def value(self):
+ return self.val
+
+class equal_pred:
+ def exists_true(self,cntx): return 1
+ def exists_false(self,cntx): return 0
+ def exists_scalar(self,cntx):
+ self.rhs.bind(cntx)
+ try:
+ while 1:
+ cntx = self.rhs.next()
+ if str(cntx.value) == self.lhs: #TODO: Remove type hack
+ return 1
+ except StopIteration: pass
+ return 0
+ def exists_segment(self,cntx):
+ raise NotImplementedError()
+ def __init__(self,lhs,rhs):
+ if lhs.__class__ == scalar:
+ if rhs.__class__ == scalar:
+ if rhs.value() == lhs.value():
+ self.exists = self.exists_true
+ else:
+ self.exists = self.exists_false
+ else:
+ self.exists = self.exists_scalar
+ else:
+ if rhs.__class__ == scalar:
+ (lhs,rhs) = (rhs,lhs)
+ self.exists = self.exists_scalar
+ else:
+ self.exists = self.exists_segment
+ self.lhs = str(lhs.value()) #TODO: Remove type hack
+ self.rhs = rhs
+
+matchSegment = re.compile(r"""^(\w+|/|\.|\*|\"|\')""")
+
+def parse_segment(expr):
+ """
+ Segments occur between the slashes...
+ """
+ mtch = matchSegment.search(expr)
+ if not(mtch): return (None,expr)
+ tok = mtch.group(); siz = len(tok)
+ if '/' == tok: return (trav_seg(),expr)
+ elif '.' == tok:
+ if len(expr) > 1 and '.' == expr[1]:
+ seg = parent_seg()
+ siz = 2
+ else:
+ seg = self_seg()
+ elif '*' == tok: seg = wild_seg()
+ elif '"' == tok or "'" == tok:
+ (cur,siz) = unquote(expr)
+ seg = match_seg(cur)
+ else:
+ seg = match_seg(tok)
+ return (seg,expr[siz:])
+
+matchTerm = re.compile(r"""^(\w+|/|\.|\(|\"|\')""")
+
+def parse_term(expr):
+ mtch = matchTerm.search(expr)
+ if not(mtch): return (None,expr)
+ tok = mtch.group(); siz = len(tok)
+ if '/' == tok or '.' == tok:
+ return parse(expr)
+ if '(' == tok:
+ (term,expr) = parse_predicate(expr)
+ assert ')' == expr[0]
+ return (term,expr[1:])
+ elif '"' == tok or "'" == tok:
+ (val,siz) = unquote(expr)
+ else:
+ val = tok; siz = len(tok)
+ return (scalar(val),expr[siz:])
+
+def parse_predicate(expr):
+ (term,expr) = parse_term(expr)
+ if not term: raise SyntaxError("term expected: '%s'" % expr)
+ tok = expr[0]
+ if '=' == tok:
+ (rhs,expr) = parse_term(expr[1:])
+ return (equal_pred(term,rhs),expr)
+ if '(' == tok:
+ raise "No functions allowed... yet!"
+ if ']' == tok or ')' == tok:
+ if term.__class__ is scalar:
+ term = match_seg(str(term))
+ return (term,expr)
+ raise SyntaxError("ypath: expecting operator '%s'" % expr)
+
+def parse_start(expr):
+ """
+ Initial checking on the expression, and
+ determine if it is relative or absolute.
+ """
+ if type(expr) != StringType or len(expr) < 1:
+ raise TypeError("string required: " + repr(expr))
+ if '/' == expr[0]:
+ ypth = root_seg()
+ else:
+ ypth = self_seg()
+ expr = '/' + expr
+ return (ypth,expr)
+
+def parse(expr):
+ """
+ This the parser entry point, the top level node
+ is always a root or self segment. The self isn't
+ strictly necessary, but it keeps things simple.
+ """
+ (ypth,expr) = parse_start(expr)
+ while expr:
+ tok = expr[0]
+ if '/' == tok:
+ (child, expr) = parse_segment(expr[1:])
+ if child: ypth = conn_seg(ypth,child)
+ continue
+ if '[' == tok:
+ (filter, expr) = parse_predicate(expr[1:])
+ assert ']' == expr[0]
+ expr = expr[1:]
+ ypth = pred_seg(ypth,filter)
+ continue
+ if '|' == tok:
+ (rhs, expr) = parse(expr[1:])
+ ypth = or_seg(ypth,rhs)
+ continue
+ if '(' == tok:
+ (child,expr) = parse(expr[1:])
+ assert ')' == expr[0]
+ expr = expr[1:]
+ ypth = conn_seg(ypth,child)
+ continue
+ break
+ return (ypth,expr)
+
+class convert_to_value(null_seg):
+ def __init__(self,itr):
+ self.itr = itr
+ def next(self):
+ return self.itr.next().value
+ def bind(self,cntx):
+ self.itr.bind(cntx)
+
+def ypath(expr,target=noTarget,cntx=0):
+ (ret,expr) = parse(expr)
+ if expr: raise SyntaxError("ypath parse error `%s`" % expr)
+ if not cntx: ret = convert_to_value(ret)
+ if target is noTarget: return ret
+ return ret.apply(target)
|