diff options
author | Michal Minar <miminar@redhat.com> | 2013-02-26 14:41:18 +0100 |
---|---|---|
committer | Michal Minar <miminar@redhat.com> | 2013-02-26 14:41:18 +0100 |
commit | 282aa360c1ab3c58622d9e284bb0b3600e8997b4 (patch) | |
tree | dc1e102c42e06596b7f09433cbfec7c1261809e9 /src/software | |
parent | 4214208d559ed88f3b29bc046d4679824bf5faa7 (diff) | |
download | openlmi-providers-282aa360c1ab3c58622d9e284bb0b3600e8997b4.tar.gz openlmi-providers-282aa360c1ab3c58622d9e284bb0b3600e8997b4.tar.xz openlmi-providers-282aa360c1ab3c58622d9e284bb0b3600e8997b4.zip |
lot of bugfixes in communication
communication:
* now the client can detect YumWorker's early termination and restart it
* session level is counted on client and server side
- this allows to restore session state, when process is restarted
* minimized session nesting on server to 1 at max
- thanks to counter on client side
- reduces communication traffic
* improved logging
api changes:
* PackageInfo's pkgid renamed to objid
* Repository's name renamed to repoid
caption renamed to name
* added include_repos and exclude_repos to YumDB methods, where
it makes sense
other bugfixes:
* fixed issues with repo enable
* assigning None to Instance properties with CIMProperty values
instead of skipping the value assignment to prevent using
old reference to value
allowed for repository management
- enable/disable
allowed to list/filter packages of particular repositories
added filter for packages to match those belonging to repository
testing:
* improved testing speed using run.py script
* removed unfinishable tests
Diffstat (limited to 'src/software')
16 files changed, 467 insertions, 259 deletions
diff --git a/src/software/openlmi/software/LMI_InstalledSoftwareIdentity.py b/src/software/openlmi/software/LMI_InstalledSoftwareIdentity.py index 58c6518..08dc674 100644 --- a/src/software/openlmi/software/LMI_InstalledSoftwareIdentity.py +++ b/src/software/openlmi/software/LMI_InstalledSoftwareIdentity.py @@ -287,8 +287,7 @@ class LMI_InstalledSoftwareIdentity(CIMProvider2): model["System"] = ComputerSystem.get_path() for pkg_info in ydb.get_package_list('installed', sort=True): - model["InstalledSoftware"] = \ - SoftwareIdentity.pkg2model( + model["InstalledSoftware"] = SoftwareIdentity.pkg2model( pkg_info, model=pkg_model) yield model diff --git a/src/software/openlmi/software/core/SoftwareIdentity.py b/src/software/openlmi/software/core/SoftwareIdentity.py index 7bcf769..edf1268 100644 --- a/src/software/openlmi/software/core/SoftwareIdentity.py +++ b/src/software/openlmi/software/core/SoftwareIdentity.py @@ -160,12 +160,21 @@ class Values(object): # Vendor_Reserved = 0x8000.. @cmpi_logging.trace_function -def object_path2pkg(op, kind='installed'): +def object_path2pkg(op, + kind='installed', + include_repos=None, + exclude_repos=None, + repoid=None, + return_all=False): """ @param op must contain precise information of package, otherwise an error is raised @param kind one of yumdb.jobs.YumGetPackageList.SUPPORTED_KINDS says, where to look for given package + @param repoid if not None, specifies repoid filter on package; + note, that this does not make sure, that repoid will be enabled. + @param return_all if True, return list of matching packages as returned + by YumDB.filter_packages(), otherwise single package is returned """ if not isinstance(kind, basestring): raise TypeError("kind must be a string") @@ -189,11 +198,16 @@ def object_path2pkg(op, kind='installed'): pkglist = YumDB.get_instance().filter_packages(kind, allow_duplicates=kind not in ('installed', 'avail_reinst'), + include_repos=include_repos, + exclude_repos=exclude_repos, + repoid=repoid, **util.nevra2filter(match)) + if return_all is True: + return pkglist if len(pkglist) > 0: return pkglist[0] raise pywbem.CIMError(pywbem.CIM_ERR_NOT_FOUND, - 'No matching package found for InstanceID=\"%s\".' % + 'No matching package found for package nevra=\"%s\".' % instid) @cmpi_logging.trace_function @@ -233,8 +247,10 @@ def pkg2model(pkg_info, keys_only=True, model=None): #model['IdentityInfoType'] = ['',] # TODO #model['IdentityInfoValue'] = ['',] # TODO if pkg_info.installed: - model['InstallDate'] = pywbem.CIMDateTime( - pkg_info.install_time) + model['InstallDate'] = pywbem.CIMDateTime(pkg_info.install_time) + else: + model['InstallDate'] = pywbem.CIMProperty( + 'InstallDate', None, type='datetime') model['IsEntity'] = True #model['IsLargeBuildNumber'] = bool(False) # TODO #model['Languages'] = ['',] # TODO @@ -256,6 +272,7 @@ def pkg2model(pkg_info, keys_only=True, model=None): except ValueError: cmpi_logging.logger.error('Could not convert epoch "%s"' ' to integer for package \"%s\"!' % (pkg_info.epoch, pkg_info)) + model["Epoch"] = pywbem.CIMProperty('Epoch', None, type='uint32') model['Version'] = pkg_info.version model['Release'] = pkg_info.release model['Architecture'] = pkg_info.arch diff --git a/src/software/openlmi/software/core/SoftwareIdentityResource.py b/src/software/openlmi/software/core/SoftwareIdentityResource.py index d8e6877..722c29b 100644 --- a/src/software/openlmi/software/core/SoftwareIdentityResource.py +++ b/src/software/openlmi/software/core/SoftwareIdentityResource.py @@ -535,7 +535,7 @@ def object_path2repo(env, op, kind='enabled'): raise pywbem.CIMError(pywbem.CIM_ERR_INVALID_PARAMETER, 'SystemCreationClassName of \"%s\" must be a subclass of "%s".' % (op["CreationClassName"], "CIM_SoftwareIdentityResource")) - repos = YumDB.get_instance().filter_repositories(kind, name=op["Name"]) + repos = YumDB.get_instance().filter_repositories(kind, repoid=op["Name"]) if len(repos) < 1: raise pywbem.CIMError(pywbem.CIM_ERR_NOT_FOUND, 'No matching repository found for Name=\"%s\".' % op["Name"]) @@ -546,30 +546,30 @@ def _fill_non_keys(repo, model): """ Fills into the model of instance all non-key properties. """ - for slot in repo.__slots__: - cmpi_logging.logger.info("repo[name=%s].%s=%s", - repo.name, slot, str(getattr(repo, slot))) model['AccessContext'] = Values.AccessContext.Other if repo.mirror_list: - model['AccessInfo'] = repo.mirror_list + access_info = repo.mirror_list elif repo.base_urls: if len(repo.base_urls) > 0: if len(repo.base_urls) > 1: cmpi_logging.logger.warn( 'multiple base urls found for repository "%s", selecting' ' the last one', repo) - model['AccessInfo'] = repo.base_urls[-1] + access_info = repo.base_urls[-1] else: cmpi_logging.logger.error( 'no base url found for repository "%s"' % repo) + access_info = pywbem.CIMProperty('AccessInfo', + None, type='string') + model["AccessInfo"] = access_info model['AvailableRequestedStates'] = [ Values.AvailableRequestedStates.Enabled, Values.AvailableRequestedStates.Disabled] - model['Caption'] = repo.caption + model['Caption'] = repo.name model['Cost'] = pywbem.Sint32(repo.cost) model['Description'] = "[%s] - %s for %s architecture with cost %d" % ( - repo.name, repo.caption, repo.basearch, repo.cost) - model['ElementName'] = repo.name + repo.repoid, repo.name, repo.basearch, repo.cost) + model['ElementName'] = repo.repoid model['EnabledDefault'] = Values.EnabledDefault.Not_Applicable if repo.enabled: model['EnabledState'] = Values.EnabledState.Enabled @@ -583,10 +583,16 @@ def _fill_non_keys(repo, model): model['HealthState'] = Values.HealthState.Major_failure if repo.revision is not None: model["Generation"] = pywbem.Uint64(repo.revision) + else: + model['Generation'] = pywbem.CIMProperty('Generation', + None, type='uint64') model['InfoFormat'] = Values.InfoFormat.URL - model['InstanceID'] = 'LMI:REPO:' + repo.name + model['InstanceID'] = 'LMI:REPO:' + repo.repoid if repo.mirror_list: model["MirrorList"] = repo.mirror_list + else: + model['MirrorList'] = pywbem.CIMProperty('MirrorList', + None, type='string') model['OperationalStatus'] = [ Values.OperationalStatus.OK if repo.ready else Values.OperationalStatus.Error] model['OtherAccessContext'] = "YUM package repository" @@ -607,6 +613,9 @@ def _fill_non_keys(repo, model): model['TimeOfLastStateChange'] = pywbem.CIMDateTime(repo.last_edit) if repo.last_update is not None: model['TimeOfLastUpdate'] = pywbem.CIMDateTime(repo.last_update) + else: + model['TimeOfLastUpdate'] = pywbem.CIMProperty('TimeOfLastUpdate', + None, type='datetime') model['TransitioningToState'] = Values.TransitioningToState.Not_Applicable @cmpi_logging.trace_function @@ -631,7 +640,7 @@ def repo2model(repo, keys_only=True, model=None): else: _set_key = model.__setitem__ _set_key('CreationClassName', "LMI_SoftwareIdentityResource") - _set_key("Name", repo.name) + _set_key("Name", repo.repoid) _set_key("SystemCreationClassName", "Linux_ComputerSystem") _set_key("SystemName", ComputerSystem.get_path()["Name"]) if not keys_only: diff --git a/src/software/openlmi/software/util/__init__.py b/src/software/openlmi/software/util/__init__.py index e48ea29..33eba4a 100644 --- a/src/software/openlmi/software/util/__init__.py +++ b/src/software/openlmi/software/util/__init__.py @@ -25,6 +25,7 @@ import platform import re +import signal RE_EVRA = re.compile( r'^(?P<epoch>\d+):(?P<ver>[^-]+)-(?P<rel>.+)\.(?P<arch>[^.]+)$') @@ -152,3 +153,16 @@ def pkg2nevra(pkg, with_epoch='NOT_ZERO'): """ return make_nevra(pkg.name, pkg.epoch, pkg.version, pkg.release, pkg.arch, with_epoch) + +def get_signal_name(signal_num): + """ + @return name of signal for signal_num argument + """ + if not isinstance(signal_num, (int, long)): + raise TypeError("signal_num must be an integer") + try: + return dict((v, k) for k, v in signal.__dict__.items())[signal_num] + except KeyError: + return "UNKNOWN_SIGNAL(%d)" % signal_num + + diff --git a/src/software/openlmi/software/yumdb/__init__.py b/src/software/openlmi/software/yumdb/__init__.py index e9a83c3..512602a 100644 --- a/src/software/openlmi/software/yumdb/__init__.py +++ b/src/software/openlmi/software/yumdb/__init__.py @@ -36,7 +36,7 @@ import errno import os import re import time -from multiprocessing import Process, JoinableQueue, Queue +from multiprocessing import Process, JoinableQueue, Queue #pylint: disable=W0404 import Queue as TQueue # T as threaded import threading import yum @@ -49,7 +49,12 @@ from openlmi.software.yumdb.packagecheck import PackageFile from openlmi.software.yumdb.packagecheck import PackageCheck from openlmi.software.yumdb.process import YumWorker from openlmi.software.yumdb.repository import Repository -from openlmi.software.util import singletonmixin +from openlmi.software.util import get_signal_name, singletonmixin + +# Maximum time in seconds to wait for a job to accomplish. +# If timeout expires, spawned process is checked (it might +# be possibly killed) and is respawned in case it's dead. +MAX_JOB_WAIT_TIME = 120 # this may be used as an argument to YumWorker YUM_WORKER_DEBUG_LOGGING_CONFIG = { @@ -74,6 +79,23 @@ YUM_WORKER_DEBUG_LOGGING_CONFIG = { } } +def log_reply_error(job, reply): + """ + Raises an exception in case of error occured in worker process + while processing job. + """ + if isinstance(reply, tuple): + cmpi_logging.logger.error( + "YumDB: job %s(id=%s) failed with error %s: %s", + job.__class__.__name__, job.jobid, + reply[0].__name__, str(reply[1])) + cmpi_logging.logger.trace_warn( + "YumDB: job %s(id=%s) exception traceback:\n%s%s: %s", + job.__class__.__name__, job.jobid, "".join(reply[2]), + reply[0].__name__, str(reply[1])) + reply[1].tb_printed = True + raise reply[1] + class YumDB(singletonmixin.Singleton): """ Context manager for accessing yum/rpm database. @@ -107,12 +129,13 @@ class YumDB(singletonmixin.Singleton): self._process = None self._yum_args = (args, kwargs) - # used to access _replies dictionary and _expected list + self._session_lock = threading.RLock() + self._session_level = 0 + + # used to guard access to _expected list and _process self._reply_lock = threading.Lock() # used to wait for job to be processed and received self._reply_cond = threading.Condition(self._reply_lock) - # { job_id : reply, ... } - self._replies = {} # ids of all expected jobs -- those to be processed by YumWorker self._expected = [] cmpi_logging.logger.trace_info('YumDB: initialized') @@ -121,50 +144,112 @@ class YumDB(singletonmixin.Singleton): # Private methods # ************************************************************************* @cmpi_logging.trace_method - def _wait_for_reply(self, job): + def _handle_reply_timeout(self, job): + """ + This is called when timeout occurs while waiting on downlink queue for + reply. Delay can be caused by worker process's early termination (bug). + This handler tries to recover from such an situation. + """ + if not self._worker.is_alive(): + if self._worker.exitcode < 0: + cmpi_logging.logger.error("[jobid=%d] worker" + " process(pid=%d) killed by signal %s", job.jobid, + self._worker.pid, get_signal_name(-self._process.exitcode)) + else: + cmpi_logging.logger.error("[jobid=%d] worker" + " process(pid=%d) is dead - exit code: %d", + job.jobid, self._process.pid, self._worker.exitcode) + with self._reply_lock: + self._process = None + cmpi_logging.logger.error( + "[jobid=%d] starting new worker process", job.jobid) + self._expected = [] + if not isinstance(job, jobs.YumBeginSession): + with self._session_lock: + if self._session_level > 0: + cmpi_logging.logger.info('restoring session ' + 'level=%d', self._session_level) + new_session_job = jobs.YumBeginSession() + self._worker.uplink.put(new_session_job) + (_, reply) = self._worker.downlink.get() + log_reply_error(new_session_job, reply) + self._worker.uplink.put(job) + self._expected.append(job.jobid) + # other waiting processes need to resend their requests + self._reply_cond.notifyAll() + else: + cmpi_logging.logger.info("[jobid=%d] process is running," + " waiting some more", job.jobid) + + @cmpi_logging.trace_method + def _receive_reply(self, job): """ - Blocks until job is processed by YumWorker and received. + Block on downlink queue to receive expected replies from worker + process. Only one thread can be executing this code at any time. - Only one thread can block on downlink channel to obtain reply. If - it's reply for him, he takes it and leaves, otherwise he adds it to - _replies dictionary and notifies other threads. This thread is the - one, whose job appears as first in _expected list. + In case, that worker process terminated due to some error. Restart it + and resend all the job requests again. + """ + while True: + cmpi_logging.logger.debug("[jobid=%d] blocking on downlink queue", + job.jobid) + try: + jobid, reply = self._worker.downlink.get( + block=True, timeout=MAX_JOB_WAIT_TIME) + if jobid == job.jobid: + with self._reply_lock: + cmpi_logging.logger.debug( + "[jobid=%d] received desired reply", job.jobid) + self._expected.remove(job.jobid) + if len(self._expected): + self._reply_cond.notify() + return reply + else: + # this should not happen + cmpi_logging.logger.error("[jobid=%d] received reply" + " for another thread (jobid=%d)", + job.jobid, jobid) + except TQueue.Empty: + cmpi_logging.logger.warn("[jobid=%d] wait for job reply timeout" + "(%d seconds) occured", job.jobid, MAX_JOB_WAIT_TIME) + self._handle_reply_timeout(job) + + @cmpi_logging.trace_method + def _send_and_receive(self, job): + """ + Sends a request to server and blocks until job is processed by + YumWorker and reply is received. + + Only one thread can block on downlink channel to obtain reply. This + thread is the one, whose job appears as first in _expected list. Server + processes input jobs sequentially. That's why it's safe to presume, + that jobs are received in the same order as they were send. Thanks to + that we don't have to care about receiving replies for the other + waiting threads. @return result of job """ with self._reply_lock: + self._worker.uplink.put(job) self._expected.append(job.jobid) while True: - if job.jobid in self._replies: - self._expected.remove(job.jobid) - return self._replies.pop(job.jobid) + if job.jobid not in self._expected: + # process terminated, resending job + cmpi_logging.logger.warn("[jobid=%d] job removed" + " from expected list, sending request again", job.jobid) + self._worker.uplink.put(job) + self._expected.append(job.jobid) elif job.jobid == self._expected[0]: + # now it's our turn to block on downlink break - else: + else: # another thread blocks on downlink -> let's sleep cmpi_logging.logger.debug( - "[jobid=%d] another %s threads expecting reply," + "[jobid=%d] another %d threads expecting reply," " suspending...", job.jobid, len(self._expected) - 1) self._reply_cond.wait() cmpi_logging.logger.debug( "[jobid=%d] received reply, waking up", job.jobid) - while True: - cmpi_logging.logger.debug("[jobid=%d] blocking on downlink queue", - job.jobid) - jobid, reply = self._worker.downlink.get() - with self._reply_lock: - if jobid != job.jobid: - cmpi_logging.logger.debug("[jobid=%d] received reply" - " for another thread (jobid=%d)", job.jobid, jobid) - self._replies[jobid] = reply - self._reply_cond.notifyAll() - else: - cmpi_logging.logger.debug( - "[jobid=%d] received desired reply", job.jobid) - self._expected.remove(job.jobid) - if len(self._expected): - self._reply_cond.notify() - break - return reply + return self._receive_reply(job) def _do_job(self, job): """ @@ -176,19 +261,8 @@ class YumDB(singletonmixin.Singleton): """ cmpi_logging.logger.trace_verbose("YumDB: doing %s(id=%s) job", job.__class__.__name__, job.jobid) - self._worker.uplink.put(job) - reply = self._wait_for_reply(job) - if isinstance(reply, tuple): - cmpi_logging.logger.error( - "YumDB: job %s(id=%s) failed with error %s: %s", - job.__class__.__name__, job.jobid, - reply[0].__name__, str(reply[1])) - cmpi_logging.logger.trace_warn( - "YumDB: job %s(id=%s) exception traceback:\n%s%s: %s", - job.__class__.__name__, job.jobid, "".join(reply[2]), - reply[0].__name__, str(reply[1])) - reply[1].tb_printed = True - raise reply[1] + reply = self._send_and_receive(job) + log_reply_error(job, reply) cmpi_logging.logger.trace_verbose("YumDB: job %s(id=%s) done", job.__class__.__name__, job.jobid) return reply @@ -222,14 +296,24 @@ class YumDB(singletonmixin.Singleton): @cmpi_logging.trace_method def __enter__(self): - self._do_job(jobs.YumBeginSession()) - cmpi_logging.logger.trace_info('YumDB: new session started') - return self + with self._session_lock: + if self._session_level == 0: + self._do_job(jobs.YumBeginSession()) + cmpi_logging.logger.trace_info('YumDB: new session started') + self._session_level += 1 + cmpi_logging.logger.trace_info('YumDB: nested to session level=%d', + self._session_level) + return self @cmpi_logging.trace_method def __exit__(self, exc_type, exc_value, traceback): - self._do_job(jobs.YumEndSession()) - cmpi_logging.logger.trace_info('YumDB: session ended') + with self._session_lock: + if self._session_level == 1: + self._do_job(jobs.YumEndSession()) + cmpi_logging.logger.trace_info('YumDB: session ended') + cmpi_logging.logger.trace_info('YumDB: emerged from session' + ' level=%d', self._session_level) + self._session_level = max(self._session_level - 1, 0) # ************************************************************************* # Public methods @@ -239,21 +323,24 @@ class YumDB(singletonmixin.Singleton): """ Shut down the YumWorker process. """ - if self._process is not None: - cmpi_logging.logger.info('YumDB: terminating YumWorker') - self._process.uplink.put(None) # terminating command - self._process.uplink.join() - self._process.join() - cmpi_logging.logger.info('YumDB: YumWorker terminated') - self._process = None - else: - cmpi_logging.logger.warn("YunDB: clean_up called, when process" - " not initialized!") + with self._reply_lock: + if self._process is not None: + cmpi_logging.logger.info('YumDB: terminating YumWorker') + self._process.uplink.put(None) # terminating command + self._process.uplink.join() + self._process.join() + cmpi_logging.logger.info('YumDB: YumWorker terminated') + self._process = None + else: + cmpi_logging.logger.warn("YunDB: clean_up called, when process" + " not initialized!") @cmpi_logging.trace_method def get_package_list(self, kind, allow_duplicates=False, - sort=False): + sort=False, + include_repos=None, + exclude_repos=None): """ @param kind is one of: jobs.YumGetPackageList.SUPPORTED_KINDS @param allow_duplicates says, whether to list all found versions @@ -261,12 +348,15 @@ class YumDB(singletonmixin.Singleton): @return [pkg1, pkg2, ...], pkgi is instance of yumdb.PackageInfo """ return self._do_job(jobs.YumGetPackageList( - kind, allow_duplicates=allow_duplicates, sort=sort)) + kind, allow_duplicates=allow_duplicates, sort=sort, + include_repos=include_repos, exclude_repos=exclude_repos)) @cmpi_logging.trace_method def filter_packages(self, kind, allow_duplicates=False, sort=False, + include_repos=None, + exclude_repos=None, **filters): """ Similar to get_package_list(), but applies filter on packages. @@ -274,6 +364,7 @@ class YumDB(singletonmixin.Singleton): """ return self._do_job(jobs.YumFilterPackages( kind, allow_duplicates=allow_duplicates, sort=sort, + include_repos=include_repos, exclude_repos=exclude_repos, **filters)) @cmpi_logging.trace_method @@ -341,10 +432,10 @@ class YumDB(singletonmixin.Singleton): return self._do_job(jobs.YumFilterRepositories(kind, **filters)) @cmpi_logging.trace_method - def set_repository_enabled(self, repo, enable): + def set_repository_enabled(self, repoid, enable): """ Enable or disable repository. @param enable is a boolean """ - return self._do_job(jobs.YumSetRepositoryEnabled(repo, enable)) + return self._do_job(jobs.YumSetRepositoryEnabled(repoid, enable)) diff --git a/src/software/openlmi/software/yumdb/errors.py b/src/software/openlmi/software/yumdb/errors.py index e888712..35dad14 100644 --- a/src/software/openlmi/software/yumdb/errors.py +++ b/src/software/openlmi/software/yumdb/errors.py @@ -42,10 +42,10 @@ class PackageNotFound(Exception): """Raised, when requested package could not be found.""" pass class RepositoryNotFound(Exception): - """Raised, when requested repository cound not be found.""" - def __init__(self, repo_name): - Exception.__init__(self, "No such repository: %s" % repo_name) + """Raised, when requested repository cound not be found.""" + def __init__(self, repoid): + Exception.__init__(self, "No such repository: %s" % repoid) class RepositoryChangeError(Exception): - """Raised, when modification of repository failed.""" - pass + """Raised, when modification of repository failed.""" + pass diff --git a/src/software/openlmi/software/yumdb/jobs.py b/src/software/openlmi/software/yumdb/jobs.py index 6e6e2da..437f123 100644 --- a/src/software/openlmi/software/yumdb/jobs.py +++ b/src/software/openlmi/software/yumdb/jobs.py @@ -117,23 +117,42 @@ class YumGetPackageList(YumJob): #pylint: disable=R0903 in result for single (name, arch) of package differing in their version + sort - whether to sort packages by nevra + + include_repos - either a string passable to RepoStorage.enableRepo() + or a list of repository names, that will be temporared enabled before + listing packages; this is applied after disabling of repositories + + exclude_repos - either a string passable to RepoStorage.disableRepo() + or a list of repository names, that will be temporared disabled before + listing packages; this is applied before enabling of repositories + Worker replies with [pkg1, pkg2, ...]. """ - __slots__ = ('kind', 'allow_duplicates', 'sort') + __slots__ = ('kind', 'allow_duplicates', 'sort', 'include_repos', + 'exclude_repos') SUPPORTED_KINDS = ( 'installed', 'available', 'avail_reinst' , 'avail_notinst', 'all') - def __init__(self, kind, allow_duplicates, sort=False): + def __init__(self, kind, allow_duplicates, sort=False, + include_repos=None, exclude_repos=None): YumJob.__init__(self) if not isinstance(kind, basestring): raise TypeError("kind must be a string") if not kind in self.SUPPORTED_KINDS: raise ValueError("kind must be one of {%s}" % ", ".join(self.SUPPORTED_KINDS)) + for arg in ('include_repos', 'exclude_repos'): + val = locals()[arg] + if ( not val is None + and not isinstance(arg, (tuple, list, basestring))): + raise TypeError("expected list or string for %s" % arg) self.kind = kind self.allow_duplicates = bool(allow_duplicates) self.sort = bool(sort) + self.include_repos = include_repos + self.exclude_repos = exclude_repos class YumFilterPackages(YumGetPackageList): #pylint: disable=R0903 """ @@ -149,21 +168,23 @@ class YumFilterPackages(YumGetPackageList): #pylint: disable=R0903 """ __slots__ = ( 'name', 'epoch', 'version', 'release', 'arch', - 'nevra', 'envra', 'evra') + 'nevra', 'envra', 'evra', 'repoid') def __init__(self, kind, allow_duplicates, - sort=False, + sort=False, include_repos=None, exclude_repos=None, name=None, epoch=None, version=None, release=None, arch=None, nevra=None, evra=None, - envra=None): + envra=None, + repoid=None): if nevra is not None and not util.RE_NEVRA.match(nevra): raise ValueError("Invalid nevra: %s" % nevra) if evra is not None and not util.RE_EVRA.match(evra): raise ValueError("Invalid evra: %s" % evra) if envra is not None and not util.RE_ENVRA.match(evra): raise ValueError("Invalid envra: %s" % envra) - YumGetPackageList.__init__(self, kind, allow_duplicates, sort) + YumGetPackageList.__init__(self, kind, allow_duplicates, sort, + include_repos=include_repos, exclude_repos=exclude_repos) self.name = name self.epoch = None if epoch is None else str(epoch) self.version = version @@ -172,6 +193,7 @@ class YumFilterPackages(YumGetPackageList): #pylint: disable=R0903 self.nevra = nevra self.evra = evra self.envra = envra + self.repoid = repoid class YumSpecificPackageJob(YumJob): #pylint: disable=R0903 """ @@ -278,12 +300,12 @@ class YumFilterRepositories(YumGetRepositoryList): Worker replies with [repo1, repo2, ...]. """ - __slots__ = ('name', 'gpg_check', 'repo_gpg_check') + __slots__ = ('repoid', 'gpg_check', 'repo_gpg_check') def __init__(self, kind, - name=None, gpg_check=None, repo_gpg_check=None): + repoid=None, gpg_check=None, repo_gpg_check=None): YumGetRepositoryList.__init__(self, kind) - self.name = name + self.repoid = repoid self.gpg_check = None if gpg_check is None else bool(gpg_check) self.repo_gpg_check = ( None if repo_gpg_check is None else bool(repo_gpg_check)) @@ -292,14 +314,14 @@ class YumSpecificRepositoryJob(YumJob): #pylint: disable=R0903 """ Abstract job taking instance of yumdb.Repository as argument. Arguments: - repo - plays different role depending on job subclass + repoid - plays different role depending on job subclass """ - __slots__ = ('repo', ) - def __init__(self, repo): - if not isinstance(repo, Repository): - raise TypeError("repo must be instance of yumdb.Repository") + __slots__ = ('repoid', ) + def __init__(self, repoid): + if not isinstance(repoid, Repository): + raise TypeError("repoid must be instance of yumdb.Repository") YumJob.__init__(self) - self.repo = repo + self.repoid = repoid class YumSetRepositoryEnabled(YumSpecificRepositoryJob):#pylint: disable=R0903 """ @@ -308,7 +330,7 @@ class YumSetRepositoryEnabled(YumSpecificRepositoryJob):#pylint: disable=R0903 enable - boolean representing next state """ __slots__ = ('enable', ) - def __init__(self, pkg, enable): - YumSpecificRepositoryJob.__init__(self, pkg) + def __init__(self, repoid, enable): + YumSpecificRepositoryJob.__init__(self, repoid) self.enable = bool(enable) diff --git a/src/software/openlmi/software/yumdb/packagecheck.py b/src/software/openlmi/software/yumdb/packagecheck.py index fbb5b21..d63c725 100644 --- a/src/software/openlmi/software/yumdb/packagecheck.py +++ b/src/software/openlmi/software/yumdb/packagecheck.py @@ -89,17 +89,17 @@ class PackageCheck(object): Metadata for package concerning verification. It contains metadata for each file installed in "files" attribute. """ - __slots__ = ("pkgid", "file_checksum_type", "files") + __slots__ = ("objid", "file_checksum_type", "files") - def __init__(self, pkgid, file_checksum_type, files=None): + def __init__(self, objid, file_checksum_type, files=None): """ - @param pkgid is an in of original yum package object, which is used + @param objid is an in of original yum package object, which is used by server for subsequent operations on this package requested by client """ if files is not None and not isinstance( files, (list, tuple, set, dict)): raise TypeError("files must be an iterable container") - self.pkgid = pkgid + self.objid = objid self.file_checksum_type = file_checksum_type if not isinstance(files, dict): self.files = OrderedDict() diff --git a/src/software/openlmi/software/yumdb/packageinfo.py b/src/software/openlmi/software/yumdb/packageinfo.py index b2cd2b8..42ec3c4 100644 --- a/src/software/openlmi/software/yumdb/packageinfo.py +++ b/src/software/openlmi/software/yumdb/packageinfo.py @@ -36,23 +36,23 @@ class PackageInfo(object): -- results in segfaults. To speed up looking up of original yum package object on server, an - atribute "pkgid" is provided. + atribute "objid" is provided. """ __slots__ = ( - "pkgid", + "objid", "name", "epoch", "version", "release", "architecture", 'summary', 'description', 'license', 'group', 'vendor', - 'size', + "repoid", 'size', 'installed', # boolean 'install_time' # datetime instance ) - def __init__(self, pkgid, name, epoch, version, release, arch, **kwargs): + def __init__(self, objid, name, epoch, version, release, arch, **kwargs): """ - @param pkgid is an in of original yum package object, which is used + @param objid is an in of original yum package object, which is used by server for subsequent operations on this package requested by client """ - self.pkgid = pkgid + self.objid = objid self.name = name self.epoch = epoch self.version = version @@ -63,6 +63,7 @@ class PackageInfo(object): self.license = kwargs.pop('license', None) self.group = kwargs.pop('group', None) self.vendor = kwargs.pop('vendor', None) + self.repoid = kwargs.pop("repoid", None) self.size = kwargs.pop('size', None) if self.size is not None and not isinstance(self.size, (int, long)): raise TypeError('size must be an integer') @@ -113,7 +114,7 @@ class PackageInfo(object): that uniquelly identify package in database """ return dict((k, getattr(self, k)) for k in ( - 'name', 'epoch', 'version', 'release', 'arch')) + 'name', 'epoch', 'version', 'release', 'arch', 'repoid')) # ************************************************************************* # Public methods @@ -158,8 +159,8 @@ def make_package_from_db(pkg): @return instance of PackageInfo """ metadata = dict((k, getattr(pkg, k)) for k in ( - 'summary', 'description', 'license', 'group', 'vendor', - 'size')) + 'summary', 'description', 'license', 'group', 'vendor', 'size', + 'repoid')) if isinstance(pkg, yum.rpmsack.RPMInstalledPackage): metadata['installed'] = True metadata['install_time'] = datetime.fromtimestamp(pkg.installtime) diff --git a/src/software/openlmi/software/yumdb/process.py b/src/software/openlmi/software/yumdb/process.py index faa0711..96c471e 100644 --- a/src/software/openlmi/software/yumdb/process.py +++ b/src/software/openlmi/software/yumdb/process.py @@ -75,44 +75,96 @@ def _get_package_filter_function(filters): filters = dict((k, value) for k, value in filters.items() if value is not None) + match = None if "nevra" in filters: - def _cmp_nevra(pkg): - """@return True if pkg matches nevra filter""" - value = '%s-%s:%s-%s.%s' % ( - pkg.name, - "0" if not pkg.epoch or pkg.epoch == "(none)" - else pkg.epoch, - pkg.version, pkg.release, pkg.arch) - return value == filters["nevra"] - return _cmp_nevra - + match = util.RE_NEVRA.match(filters["nevra"]) elif "envra" in filters: - def _cmp_envra(pkg): - """@return True if pkg matches envra filter""" - value = '%s:%s-%s-%s.%s' % ( - "0" if not pkg.epoch or pkg.epoch == "(none)" - else pkg.epoch, - pkg.name, - pkg.version, pkg.release, pkg.arch) - return value == filters["envra"] - return _cmp_envra - - else: - if "evra" in filters: - for prop_name in ("epoch", "version", "release", "epoch"): - filters.pop(prop_name, None) - filter_list = [] - # properties are sorted by their filtering ability - # (the most unprobable property, that can match, comes first) - for prop_name in ("evra", "name", "version", "epoch", - "release", "arch"): - if not prop_name in filters: - continue - filter_list.append((prop_name, filters.pop(prop_name))) - def _cmp_props(pkg): - """@return True if pkg matches properies filter""" - return all(getattr(pkg, p) == v for p, v in filter_list) - return _cmp_props + match = util.RE_ENVRA.match(filters["envra"]) + if match is not None: + for attr in ("name", "epoch", "version", "release", "arch"): + match_attr = attr + if attr in {'version', 'release'}: + match_attr = attr[:3] + filters[attr] = match.group(match_attr) + filters.pop('nevra', None) + filters.pop('envra', None) + elif "evra" in filters: + for prop_name in ("epoch", "version", "release", "epoch"): + filters.pop(prop_name, None) + filter_list = [] + # properties are sorted by their filtering ability + # (the most unprobable property, that can match, comes first) + for prop_name in ("evra", "name", "version", "epoch", + "release", "repoid", "arch"): + if not prop_name in filters: + continue + filter_list.append((prop_name, filters.pop(prop_name))) + def _cmp_props(pkg): + """@return True if pkg matches properies filter""" + return all(getattr(pkg, p) == v for p, v in filter_list) + return _cmp_props + +class RepoFilterSetter(object): + """ + A context manager, that will set a repository filter lasting + as long as the object itself. + """ + def __init__(self, yum_base, include_repos=None, exclude_repos=None): + if not isinstance(yum_base, yum.YumBase): + raise TypeError("yum_base must be a YumBase instance") + self._yum_base = yum_base + self._include = include_repos + self._exclude = exclude_repos + # after __enter__ this will be dictionary containing ( + # repoid, enabled) pairs + self._prev_states = None + + def __enter__(self): + self._prev_states = { r.id: r.enabled + for r in self._yum_base.repos.repos.values()} + if isinstance(self._exclude, (list, tuple, set)): + exclude = ",".join(self._exclude) + else: + exclude = self._exclude + # set of repositories, that were affected + repos = set() + if exclude: + repos.update(self._yum_base.repos.disableRepo(exclude)) + _logger().info('disabling repositories: [%s]', ", ".join(repos)) + if isinstance(self._include, (list, tuple, set)): + include = ",".join(self._include) + else: + include = self._include + if include: + affected = self._yum_base.repos.enableRepo(include) + _logger().info('enabling repositories: [%s]', ", ".join(affected)) + repos.update(affected) + for repoid, prev_enabled in self._prev_states.items(): + if ( repoid not in repos + or ( bool(prev_enabled) + is bool(self._yum_base.repos.getRepo(repoid).enabled))): + # keep only manipulated repositories + del self._prev_states[repoid] + if len(self._prev_states): + for repoid in (r for r, v in self._prev_states.items() if v): + self._yum_base.pkgSack.sacks.pop(repoid, None) + self._yum_base.repos.populateSack() + return self + + def __exit__(self, exc_type, exc_value, exc_tb): + # restore previous repository states + if len(self._prev_states): + _logger().info('restoring repositories: [%s]', + ", ".join(self._prev_states.keys())) + for repoid, enabled in self._prev_states.items(): + repo = self._yum_base.repos.getRepo(repoid) + if enabled: + repo.enable() + else: + repo.disable() + for repoid in (r for r, v in self._prev_states.items() if not v): + self._yum_base.pkgSack.sacks.pop(repoid, None) + self._yum_base.repos.populateSack() # ***************************************************************************** # Decorators @@ -204,7 +256,7 @@ class YumWorker(Process): yum_args=None, yum_kwargs=None, logging_config=None): - Process.__init__(self) + Process.__init__(self, name="YumWorker") self._queue_in = queue_in self._queue_out = queue_out self._session_level = 0 @@ -304,7 +356,7 @@ class YumWorker(Process): for orig in packages: pkg = packageinfo.make_package_from_db(orig) if cache_packages is True: - self._pkg_cache[pkg.pkgid] = orig + self._pkg_cache[pkg.objid] = orig res.append(pkg) return res @@ -336,15 +388,15 @@ class YumWorker(Process): if not isinstance(pkg, packageinfo.PackageInfo): raise TypeError("pkg must be instance of PackageInfo") _logger().debug("looking up yum package %s with id=%d", - pkg, pkg.pkgid) + pkg, pkg.objid) try: - result = self._pkg_cache[pkg.pkgid] + result = self._pkg_cache[pkg.objid] _logger().debug("lookup successful") except KeyError: _logger().warn("lookup of package %s with id=%d failed, trying" - " to query database", pkg, pkg.pkgid) + " to query database", pkg, pkg.objid) result = self._handle_filter_packages( - 'installed' if pkg.installed else 'avail_reinst', + 'installed' if pkg.installed else 'available', allow_duplicates=False, sort=False, transform=False, @@ -363,8 +415,10 @@ class YumWorker(Process): last modification times. """ if self._yum_base is not None: - self._yum_base.repos.close() + for repoid in self._yum_base.repos.repos.keys(): + self._yum_base.repos.delete(repoid) del self._yum_base.repos + del self._yum_base.pkgSack self._repodir_mtimes.clear() @_trace_function @@ -388,16 +442,17 @@ class YumWorker(Process): for repo in self._yum_base.repos.repos.values(): filename = repo.repofile if ( not os.path.exists(filename) - or os.stat(filename).st_mtime > repo.repo_config_age): - _logger().info('config file of repository "%s"' - ' changed', repo.id) + or ( int(os.stat(filename).st_mtime) + > repo.repo_config_age)): + _logger().info('config file of repository "%s" changed', + repo.id) dirty = True break if dirty is True: _logger().info("repository cache is dirty, cleaning up ...") self._clear_repository_cache() - if dirty is True or not self._repodir_mtimes: self._yum_base.getReposFromConfig() + if dirty is True or not self._repodir_mtimes: self._update_repodir_mtimes() @_trace_function @@ -478,14 +533,14 @@ class YumWorker(Process): Handler for session end job. """ _logger().info("ending session level %d", self._session_level) - self._session_level -= 1 + self._session_level = max(self._session_level - 1, 0) if self._session_level == 0: self._unlock_database() self._session_ended = True @_needs_database def _handle_get_package_list(self, kind, allow_duplicates, sort, - transform=True): + include_repos=None, exclude_repos=None, transform=True): """ Handler for listing packages job. @param transform says, whether to return just a package abstractions @@ -500,10 +555,11 @@ class YumWorker(Process): what = 'all' else: what = kind - _logger().debug("calling YumBase.doPackageLists(%s, showdups=%s)", - what, allow_duplicates) - pkglist = self._yum_base.doPackageLists(what, showdups=allow_duplicates) - _logger().debug("YumBase.doPackageLists() finished") + with RepoFilterSetter(self._yum_base, include_repos, exclude_repos): + _logger().debug("calling YumBase.doPackageLists(%s, showdups=%s)", + what, allow_duplicates) + pkglist = self._yum_base.doPackageLists(what, + showdups=allow_duplicates) if kind == 'all': result = pkglist.available + pkglist.installed elif kind == 'available': @@ -519,12 +575,14 @@ class YumWorker(Process): @_needs_database def _handle_filter_packages(self, kind, allow_duplicates, sort, + include_repos=None, exclude_repos=None, transform=True, **filters): """ Handler for filtering packages job. @return [pkg1, pkg2, ...] """ pkglist = self._handle_get_package_list(kind, allow_duplicates, False, + include_repos=include_repos, exclude_repos=exclude_repos, transform=False) matches = _get_package_filter_function(filters) result = [p for p in pkglist if matches(p)] @@ -644,24 +702,24 @@ class YumWorker(Process): @return list of yumdb.Repository instances -- filtered """ filters = dict((k, v) for k, v in filters.items() if v is not None) - if 'name' in filters: + if 'repoid' in filters: self._check_repository_configs() try: repo = repository.make_repository_from_db( - self._yum_base.repos.getRepo(filters["name"])) + self._yum_base.repos.getRepo(filters["repoid"])) if ( (kind == "enabled" and not repo.enabled) or (kind == "disabled" and repo.enabled)): _logger().warn( 'no such repository with id="%s"matching filters', - filters['name']) + filters['repoid']) return [] _logger().debug( "exactly one repository matching filters found") return [repo] except (KeyError, yum.Errors.RepoError): _logger().warn('repository with id="%s" could not be found', - filters['name']) - raise errors.RepositoryNotFound(filters['name']) + filters['repoid']) + raise errors.RepositoryNotFound(filters['repoid']) repos = self._handle_get_repository_list(kind, transform=False) result = [] for repo in repos: @@ -681,9 +739,9 @@ class YumWorker(Process): @return previous enabled state """ try: - repo = self._yum_base.repos.getRepo(repo.name) + repo = self._yum_base.repos.getRepo(repo.repoid) except (KeyError, yum.Errors.RepoError): - raise errors.RepositoryNotFound(repo.name) + raise errors.RepositoryNotFound(repo.repoid) res = repo.enabled try: if enable ^ res: diff --git a/src/software/openlmi/software/yumdb/repository.py b/src/software/openlmi/software/yumdb/repository.py index ee38f53..1bf7d82 100644 --- a/src/software/openlmi/software/yumdb/repository.py +++ b/src/software/openlmi/software/yumdb/repository.py @@ -29,9 +29,8 @@ import yum # maps names of Repository properties to their corresponding property # names in YumRepository object PROPERTY_NAME_MAP = { - "name" : "id", + "repoid" : "id", "base_urls" : "baseurl", - "caption" : "name", "config_file" : "repofile", "cost" : "cost", "enabled" : "enabled", @@ -39,6 +38,7 @@ PROPERTY_NAME_MAP = { "last_edit" : "repo_config_age", "mirror_list" : "mirrorlist", "mirror_urls" : "mirrorurls", + "name" : "name", "pkg_dir" : "pkgdir", "ready" : "ready", "repo_gpg_check" : "repo_gpgcheck", @@ -87,8 +87,8 @@ class Repository(object): vice-versa. """ __slots__ = ( - "repoid", # [int] id of python object on server process - "name", # [string] repository id name + "objid", # [int] id of python object on server process + "repoid", # [string] repository id name # (name of config file) "arch", # [string] architecture of packages @@ -96,7 +96,7 @@ class Repository(object): "base_urls", # [list] base urls as strings #"cache", #"cache_dir", - "caption", # [string] repository descriptive name + "name", # [string] repository descriptive name "config_file", # [string] file path to corresponding # config file "cost", # [int] cost of repository @@ -120,9 +120,9 @@ class Repository(object): "timeout", # timeout for requests ) - def __init__(self, repoid, name, arch, basearch, base_urls, caption, + def __init__(self, objid, repoid, arch, basearch, base_urls, config_file, cost, enabled, gpg_check, last_edit, last_update, - pkg_dir, ready, releasever, repo_gpg_check, revision, + name, pkg_dir, ready, releasever, repo_gpg_check, revision, timeout, mirror_list=None, mirror_urls=None): for arg in ('last_edit', 'last_update'): if ( locals()[arg] is not None @@ -134,12 +134,11 @@ class Repository(object): if ( locals()[arg] is not None and not isinstance(locals()[arg], (int, long))): raise TypeError("%s must be an integer" % arg) + self.objid = objid self.repoid = repoid - self.name = name self.arch = arch self.basearch = basearch self.base_urls = list(base_urls) - self.caption = caption self.config_file = config_file self.cost = cost self.enabled = bool(enabled) @@ -148,6 +147,7 @@ class Repository(object): self.last_update = last_update self.mirror_list = "" if not mirror_list else mirror_list self.mirror_urls = [] if not mirror_urls else list(mirror_urls) + self.name = name #self.pkg_count = pkg_count self.pkg_dir = pkg_dir self.ready = bool(ready) @@ -157,7 +157,7 @@ class Repository(object): self.timeout = timeout def __str__(self): - return self.name + return self.repoid def __getstate__(self): """ diff --git a/src/software/test/base.py b/src/software/test/base.py index 318f47c..6359203 100644 --- a/src/software/test/base.py +++ b/src/software/test/base.py @@ -217,22 +217,25 @@ class SoftwareBaseTestCase(unittest.TestCase): #pylint: disable=R0904 os.makedirs(cls.cache_dir) # rpm packages are expected to be in CWD os.chdir(cls.cache_dir) - if cls.needs_pkgdb(): - cls.safe_pkgs, cls.dangerous_pkgs = rpmcache.get_pkg_database( - use_cache=use_cache, - dangerous=cls.run_dangerous, - repolist=cls.test_repos, - cache_dir=cls.cache_dir if use_cache else None) + if not hasattr(cls, 'safe_pkgs') or not hasattr(cls, 'dangerous_pkgs'): + if cls.needs_pkgdb(): + safe, dangerous = rpmcache.get_pkg_database( + use_cache=use_cache, + dangerous=cls.run_dangerous, + repolist=cls.test_repos, + cache_dir=cls.cache_dir if use_cache else None) + SoftwareBaseTestCase.safe_pkgs = safe + SoftwareBaseTestCase.dangerous_pkgs = dangerous + else: + cls.safe_pkgs = [] + cls.dangerous_pkgs = [] + if cls.needs_pkgdb_files() and not hasattr(cls, 'pkgdb_files'): for pkg in cls.dangerous_pkgs: if not rpmcache.is_pkg_installed(pkg.name): rpmcache.install_pkg(pkg, repolist=cls.test_repos) - if cls.needs_pkgdb_files(): - cls.pkgdb_files = cls.get_pkgdb_files() - else: - cls.safe_pkgs = [] - cls.dangerous_pkgs = [] - if cls.needs_repodb(): - cls.repodb = cls.get_repodb() + SoftwareBaseTestCase.pkgdb_files = cls.get_pkgdb_files() + if cls.needs_repodb() and not hasattr(cls, 'repodb'): + SoftwareBaseTestCase.repodb = cls.get_repodb() @classmethod def tearDownClass(cls): diff --git a/src/software/test/test_hosted_software_collection.py b/src/software/test/test_hosted_software_collection.py index bdac158..365fd5d 100755 --- a/src/software/test/test_hosted_software_collection.py +++ b/src/software/test/test_hosted_software_collection.py @@ -124,16 +124,14 @@ class TestHostedSoftwareCollection(base.SoftwareBaseTestCase): Test ReferenceNames for ComputerSystem. """ objpath = self.make_op() - refs = self.conn.ReferenceNames( + refs = self.conn.AssociatorNames( Role="Antecedent", - ObjectName=objpath["Antecedent"]) - self.assertGreater(len(refs), 0) - refs = [ r for r in refs if "Dependent" in r - and r["Dependent"].classname == \ - "LMI_SystemSoftwareCollection" ] - self.assertEqual(1, len(refs)) + ObjectName=objpath["Antecedent"], + ResultRole="Dependent", + ResultClass='LMI_SystemSoftwareCollection') + self.assertEqual(len(refs), 1) ref = refs[0] - self.assertEqual(ref, objpath) + self.assertEqual(objpath["Dependent"], ref) @base.mark_dangerous def test_get_dependent_referents(self): diff --git a/src/software/test/test_installed_software_identity.py b/src/software/test/test_installed_software_identity.py index f3dec9c..1065c20 100644..100755 --- a/src/software/test/test_installed_software_identity.py +++ b/src/software/test/test_installed_software_identity.py @@ -225,7 +225,6 @@ class TestInstalledSoftwareIdentity(base.SoftwareBaseTestCase): self.assertTrue(ref["InstanceID"].startswith("LMI:PKG:")) nevra_set = set(i["InstanceID"] for i in refs) - # NOTE: installed packages might not be available for pkg in self.safe_pkgs: nevra = 'LMI:PKG:'+pkg.get_nevra(with_epoch="ALWAYS") self.assertTrue(nevra in nevra_set, diff --git a/src/software/test/test_member_of_software_collection.py b/src/software/test/test_member_of_software_collection.py index 4d375c4..df259fe 100755 --- a/src/software/test/test_member_of_software_collection.py +++ b/src/software/test/test_member_of_software_collection.py @@ -69,25 +69,25 @@ class TestMemberOfSoftwareCollection(base.SoftwareBaseTestCase): "OP is missing \"%s\" key for package %s"%(key, pkg)) self.assertEqual(objpath, inst.path) - @base.mark_tedious - def test_enum_instance_names(self): - """ - Tests EnumInstanceNames call on installed packages. - """ - inames = self.conn.EnumerateInstanceNames(ClassName=self.CLASS_NAME) - self.assertGreater(len(inames), 0) - objpath = self.make_op(self.safe_pkgs[0]) - for iname in inames: - self.assertIsInstance(iname, pywbem.CIMInstanceName) - self.assertEqual(iname.namespace, 'root/cimv2') - self.assertEqual(iname.classname, self.CLASS_NAME) - self.assertEqual(sorted(iname.keys()), sorted(self.KEYS)) - self.assertEqual(objpath["Collection"], iname["Collection"]) - nevra_set = set(i["Member"]["InstanceID"] for i in inames) - for pkg in self.safe_pkgs: - nevra = 'LMI:PKG:'+pkg.get_nevra(with_epoch="ALWAYS") - self.assertTrue(nevra in nevra_set, - 'Missing nevra "%s".' % nevra) +# @base.mark_tedious +# def test_enum_instance_names(self): +# """ +# Tests EnumInstanceNames call on installed packages. +# """ +# inames = self.conn.EnumerateInstanceNames(ClassName=self.CLASS_NAME) +# self.assertGreater(len(inames), 0) +# objpath = self.make_op(self.safe_pkgs[0]) +# for iname in inames: +# self.assertIsInstance(iname, pywbem.CIMInstanceName) +# self.assertEqual(iname.namespace, 'root/cimv2') +# self.assertEqual(iname.classname, self.CLASS_NAME) +# self.assertEqual(sorted(iname.keys()), sorted(self.KEYS)) +# self.assertEqual(objpath["Collection"], iname["Collection"]) +# nevra_set = set(i["Member"]["InstanceID"] for i in inames) +# for pkg in self.safe_pkgs: +# nevra = 'LMI:PKG:'+pkg.get_nevra(with_epoch="ALWAYS") +# self.assertTrue(nevra in nevra_set, +# 'Missing nevra "%s".' % nevra) # @base.mark_tedious # def test_enum_instances(self): @@ -106,31 +106,31 @@ class TestMemberOfSoftwareCollection(base.SoftwareBaseTestCase): # for pkg in self.safe_pkgs: # self.assertIn(pkg.get_nevra(with_epoch="ALWAYS"), nevra_set) - @base.mark_tedious - def test_get_collection_referents(self): - """ - Test ReferenceNames for SystemSoftwareCollection. - """ - objpath = self.make_op(self.safe_pkgs[0]) - refs = self.conn.AssociatorNames( - Role="Collection", - ObjectName=objpath["Collection"], - ResultRole="Member", - ResultClass="LMI_SoftwareIdentity") - self.assertGreater(len(refs), 0) - for ref in refs: - self.assertIsInstance(ref, pywbem.CIMInstanceName) - self.assertEqual(ref.namespace, 'root/cimv2') - self.assertEqual(ref.classname, "LMI_SoftwareIdentity") - self.assertEqual(sorted(ref.keys()), ["InstanceID"]) - self.assertTrue(ref["InstanceID"].startswith("LMI:PKG:")) - nevra_set = set(i["InstanceID"] for i in refs) - # NOTE: installed packages might not be available - for pkg in self.safe_pkgs: - nevra = 'LMI:PKG:'+pkg.get_nevra(with_epoch="ALWAYS") - self.assertTrue(nevra in nevra_set, - 'Missing nevra "%s".' % nevra) - +# @base.mark_tedious +# def test_get_collection_referents(self): +# """ +# Test ReferenceNames for SystemSoftwareCollection. +# """ +# objpath = self.make_op(self.safe_pkgs[0]) +# refs = self.conn.AssociatorNames( +# Role="Collection", +# ObjectName=objpath["Collection"], +# ResultRole="Member", +# ResultClass="LMI_SoftwareIdentity") +# self.assertGreater(len(refs), 0) +# for ref in refs: +# self.assertIsInstance(ref, pywbem.CIMInstanceName) +# self.assertEqual(ref.namespace, 'root/cimv2') +# self.assertEqual(ref.classname, "LMI_SoftwareIdentity") +# self.assertEqual(sorted(ref.keys()), ["InstanceID"]) +# self.assertTrue(ref["InstanceID"].startswith("LMI:PKG:")) +# nevra_set = set(i["InstanceID"] for i in refs) +# # NOTE: installed packages might not be available +# for pkg in self.dangerous_pkgs: +# nevra = 'LMI:PKG:'+pkg.get_nevra(with_epoch="ALWAYS") +# self.assertTrue(nevra in nevra_set, +# 'Missing nevra "%s".' % nevra) +# def test_get_member_referents(self): """ Test ReferenceNames for SoftwareIdentity. diff --git a/src/software/test/test_software_identity_resource.py b/src/software/test/test_software_identity_resource.py index f728095..13f05de 100755 --- a/src/software/test/test_software_identity_resource.py +++ b/src/software/test/test_software_identity_resource.py @@ -99,8 +99,7 @@ class TestSoftwareIdentityResource( "EnabledState does not match for repo %s" % repo.repoid) self.assertEqual(3, inst["ExtendedResourceType"]) if repo.revision is None: - self.assertTrue(isinstance(inst["Generation"], (int, long)) - or inst["Generation"] is None) + self.assertIsNone(inst["Generation"]) else: self.assertEqual(repo.revision, inst["Generation"], "Generation does not match for repo %s" % repo.repoid) @@ -125,9 +124,7 @@ class TestSoftwareIdentityResource( self.assertIsInstance(inst["StatusDescriptions"], list) self.assertEqual(1, len(inst["StatusDescriptions"])) if repo.last_updated is None: - self.assertTrue( - isinstance(inst["TimeOfLastUpdate"], pywbem.CIMDateTime) or - inst["TimeOfLastUpdate"] is None) + self.assertIsNone(inst["TimeOfLastUpdate"]) else: time_stamp = repo.last_updated.replace( microsecond=0, tzinfo=pywbem.cim_types.MinutesFromUTC(0)) |