summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJonathan Dieter <jdieter@gmail.com>2007-03-21 16:21:23 +0200
committerJonathan Dieter <jdieter@gmail.com>2007-03-21 16:21:23 +0200
commitafbedc06abf48f63ba8aee113103a0b360ed1c63 (patch)
tree7468431a27860afd19f1ca1356b9b862104f3972
parenteee8adadcd4fc3c0d7deea4a8ada73b4278c61f4 (diff)
downloadpresto-afbedc06abf48f63ba8aee113103a0b360ed1c63.zip
presto-afbedc06abf48f63ba8aee113103a0b360ed1c63.tar.gz
presto-afbedc06abf48f63ba8aee113103a0b360ed1c63.tar.xz
Massive rewrite of whole system. Now uses yum-like XML data so we don't waste time on 404 messages.
Signed-off-by: Jonathan Dieter <jdieter@gmail.com>
-rw-r--r--README25
-rw-r--r--deltarpm.py513
-rwxr-xr-xmakerepo/createprestorepo.py308
-rwxr-xr-xmakerepo/dumpMetadata.py782
-rw-r--r--presto.conf3
-rw-r--r--presto.py127
-rwxr-xr-xserver.py179
-rw-r--r--share-presto/deltarpm.py86
-rw-r--r--share-presto/prestoRepo.py588
-rw-r--r--share-presto/prestoTransaction.py65
-rw-r--r--share-presto/prestomdparser.py167
-rw-r--r--yum-deltarpm.conf4
-rw-r--r--yum-deltarpm.py76
13 files changed, 2144 insertions, 779 deletions
diff --git a/README b/README
index 3e7853e..e37f369 100644
--- a/README
+++ b/README
@@ -1,16 +1,27 @@
Presto: A project to add delta rpm support into yum for Fedora users
https://hosted.fedoraproject.org/projects/presto/wiki/WikiStart
-Most of the code base has been written by Marcel Hild <mhild@redhat.com> as up2date/satellite-server delta rpm support. Code adaptation for yum done by Ahmed Kamal <email.ahmedkamal@googlemail.com>
+Most of the code base has been written by Marcel Hild <mhild@redhat.com> as
+up2date/satellite-server delta rpm support. Code adaptation for yum done by
+Ahmed Kamal <email.ahmedkamal@googlemail.com>. Support for storing repository
+information added by Jonathan Dieter <jdieter@gmail.com>
Installation:
=============
1- Make sure deltarpm is installed on your system (yum -y install deltarpm)
-2- Place the files in the following locations (/usr/share/deltarpm-python/deltarpm.py /usr/lib/yum-plugins/yum-deltarpm.py /etc/yum/pluginconf.d/yum-deltarpm.conf)
-3- Modify the conf file to point to any test repo you have setup
-4- Use the server.py script to generate drpms from different update levels of some rpms in your test repo
-5- Now install an old rpm from your repo using rpm, try updating it using yum. The plugin should kickin, try to download the drpm, reconstruct the full rpm, and yum should install that
+2- Place the files in the following locations:
+ share-presto => /usr/share/presto/*
+ presto.py => /usr/lib/yum-plugins/presto.py
+ presto.conf => /etc/yum/pluginconf.d/presto.conf)
+3- Use makerepo/createdeltarepo.py to create a Presto repository on top of a
+ normal yum repository.
+ NOTE: createdeltarepo.py will NOT change any files created by createrepo
+4- In your repository conf file, set deltaurl to point to your Presto
+ repository (in most cases, it will be the same as baseurl).
+5- Now install an old rpm from your repo using rpm, then try updating it using
+ yum. The plugin should kick in, try to download the drpm, reconstruct the
+ full rpm, and yum should install that
Notes:
=====
-- The code is in a *very* early stage. Minimal testing has been done.
-- I (Ahmed) will probably be helping/maintaining this project, but probably I will not be the lead developer behind this
+- The code is in an early stage. Minimal testing has been done.
+
diff --git a/deltarpm.py b/deltarpm.py
deleted file mode 100644
index aac0c02..0000000
--- a/deltarpm.py
+++ /dev/null
@@ -1,513 +0,0 @@
-# author: Lars Herrmann <herrmann@redhat.com>
-# Ahmed Kamal <email.ahmedkamal@googlemail.com>
-# license: GPL (see COPYING file in distribution)
-#
-# this module provides a python wrapper around deltarpm tools written by suse
-#
-# TODO: catch exceptions wherever possible and raise useful ones ;)
-# see TODO lines in methods
-
-MAKE='/usr/bin/makedeltarpm'
-APPLY='/usr/bin/applydeltarpm'
-SEQ_SUFFIX='seq'
-SUFFIX='drpm'
-
-# constants for up2date configuration
-# major flag to enable or disable use of delta rpms , used in patched up2date
-USE_DELTA='useDeltaRpms'
-# directory where to store generated rpms - depending on use up2date storage or temp storage for fetching
-STORAGE='storageDir'
-# directory where to store delta rpm files, normally temporarily
-DELTA_STORAGE='deltaStorageDir'
-# url to fetch delta rpm files from
-DELTA_URL='deltaRpmURL'
-# directory where local copies of oldrpms reside
-DELTA_OLDRPM_REPOSITORY='deltaOldRpmRepository'
-# flag indicates if downloading and verfying sequence files before downloading delta rpms - saves bandwidth
-DELTA_USE_SEQ='deltaUseSequences'
-# flag indicates if local copies of old rpms should be used
-DELTA_USE_OLDRPMS='deltaUseOldRpms'
-SIZELIMIT='deltaRpmSizeLimit'
-
-# default setting for STORAGE - same as with up2date
-DEFAULT_STORAGE='/var/spool/up2date'
-# default setting for DELTA_STORAGE
-DEFAULT_DELTA_STORAGE='/var/spool/up2date/deltarpms'
-
-# enable or disable to see verbose messages on stdout
-DEBUG=0
-
-import popen2
-import string
-import os
-import glob
-
-class Process:
- """wrapper class to execute programs and return exitcode and output (stdout and stderr combined)"""
- def __init__(self):
- self.__stdout=None
- self.__returncode=None
- self.__command=None
- self.__args=None
-
- def run(self, command, *args):
- self.__command=command
- self.__args=args
- cmdline=command+" "+string.join(args, " ")
- if DEBUG:
- print 'DEBUG: %s.%s: executing %s' % (self.__class__, 'run', cmdline)
- pipe = popen2.Popen4(cmdline)
- self.__stdout=pipe.fromchild.read()
- retcode = pipe.wait()
- if os.WIFEXITED(retcode):
- self.__returncode = os.WEXITSTATUS(retcode)
- else:
- self.__returncode = retcode
- # fallback to old implementation - works better ?
- #stdoutp = os.popen(cmdline,'r',1)
- #self.__stdout = stdoutp.read()
- #retcode = stdoutp.close()
- #if retcode is None:
- # self.__returncode = 0
- #else:
- # self.__returncode = retcode
-
- def getOutput(self):
- return self.__stdout
-
- def returnCode(self):
- return self.__returncode
-
-class RpmDescription:
- """Wrapper class to encapsulate RPM attributes"""
-
- def __init__(self, name,version,release,arch, epoch=''):
- """constructor: provide major attributes in correct order - epoch optional"""
- self.name=name
- self.version=version
- self.release=release
- self.epoch=epoch
- self.arch=arch
- if DEBUG:
- print 'DEBUG: %s.%s: created: %s' % (self.__class__, '__init__', self)
-
- def rhnFileName(self):
- """return file name in e:nvr.a notation"""
- if self.epoch:
- return "%s:%s-%s-%s.%s" % (self.epoch, self.name, self.version, self.release, self.arch)
- else:
- return "%s-%s-%s.%s" % (self.name, self.version, self.release, self.arch)
-
- def evr(self):
- """return file name in e:vr notation as used in Satellite repository"""
- if self.epoch:
- return "%s:%s-%s" % (self.epoch, self.version, self.release)
- else:
- return "%s-%s" % (self.version, self.release)
-
- def satellitePath(self):
- """return file path as used in Satellite repository - relative to /var/satellite"""
- return "%s/%s/%s/%s.rpm" % (self.name, self.evr(), self.arch, self.fileName())
-
- def fileName(self):
- """return file name in nvr.a notation as used in up2date storageDir"""
- return "%s-%s-%s.%s" % (self.name, self.version, self.release, self.arch)
-
- def __str__(self):
- return self.rhnFileName()
-
-class DeltaRpmWrapper:
- """wrapper around deltarpm binaries - implement methods for create, apply and verify delta rpms
- - raises exceptions if exitcode of binaries was != 0"""
-
- def __init__(self, storageDir, oldRpmDir=None):
- """constructor - params: storageDir=path of delta rpm storage, oldRpmDir = path of full rpm repository (optional)"""
- self.storageDir = storageDir
- self.oldRpmDir = oldRpmDir
- if DEBUG:
- print 'DEBUG: %s.%s: created: %s' % (self.__class__, '__init__', self)
-
- def __str__(self):
- return "%s: storageDir=%s, oldRpmDir=%s" % (self.__class__, self.storageDir, self.oldRpmDir)
-
- def create(self, oldrpm, newrpm):
- """wraps execution of makedeltarpm -s seqfile oldrpm newrpm deltarpm
- constructs file names and paths based on given RpmDescription and instance settings for directories"""
- if DEBUG:
- print 'DEBUG: %s.create(%s,%s)' % (self.__class__,oldrpm, newrpm)
-
- # contruct filenames in satellite repository
- oldrpmfile = "%s/%s" % (self.oldRpmDir, oldrpm.satellitePath())
- newrpmfile = "%s/%s" % (self.oldRpmDir, newrpm.satellitePath())
-
- # check if file exists
- # this is an ugly workaround, where the epoch is 0, but satellite
- # stores it as 0:package, so we try it with epoch = string "0"
- # we do glob.glob here, because satellite path could also contain
- # shell wildcards
- if not glob.glob(oldrpmfile):
- if not oldrpm.epoch:
- oldrpm.epoch = '0'
- oldrpmfile = "%s/%s" % (self.oldRpmDir, oldrpm.satellitePath())
- if not glob.glob(newrpmfile):
- if not newrpm.epoch:
- newrpm.epoch = '0'
- newrpmfile = "%s/%s" % (self.oldRpmDir, newrpm.satellitePath())
-
- # construct filenames in deltarpm repository:
- # /root/oldrpm/newrpm.{seq|rpm}
- # with oldrpm|newrpm in e:nvr.a notation
- deltarpm = newrpm.rhnFileName()
- # files should go to /root/oldrpm
- deltadir = "%s/%s" % (self.storageDir, oldrpm.rhnFileName())
- # TODO check if is a directory
- if not os.access(deltadir, os.F_OK):
- if DEBUG:
- print 'DEBUG: %s.create: mkdir(%s)' % (__name__, deltadir)
- os.makedirs(deltadir)
- # filenames
- deltarpmfile = "%s/%s" % (deltadir, deltarpm)
- p=Process()
- p.run(MAKE, '-s', "%s.%s" % (deltarpmfile,SEQ_SUFFIX), oldrpmfile, newrpmfile, "%s.rpm" % deltarpmfile)
- # save output into logfile
- logfile = "%s.log" % deltarpmfile
- fd = open(logfile,'w')
- fd.write(p.getOutput())
- print >> fd, "of: %s \nnf: %s" % (oldrpmfile, newrpmfile)
-
- fd.close()
- if p.returnCode():
- raise Exception("%s.create: exitcode was %s - see %s" % (self.__class__,p.returnCode(), logfile))
- return deltarpmfile
-
- def apply(self, oldrpm, newrpm, deltarpmfile, useOldRpms = 0):
- """wraps execution of applydeltarpm [-r oldrpm] deltarpm newrpm -
- constructs file names and paths based on given RpmDescription and instance settings for directories"""
- # args: RpmDescription
- # TODO: test args for type == instance and __class__ == RpmDescription
- # TODO: test without useOldRpms
- if DEBUG:
- print 'DEBUG: %s.apply(%s,%s,%s,%s)' % (self.__class__,oldrpm, newrpm, deltarpmfile, useOldRpms)
- p=Process()
- # targetrpm filename
- newrpmfile = "%s/%s-%s-%s.%s.rpm" % (self.storageDir, newrpm.name, newrpm.version, newrpm.release, newrpm.arch)
- if useOldRpms:
- # TODO: check if self.oldRpmDir is set and exists !
- oldrpmfile = "%s/%s-%s-%s.%s.rpm" % (self.oldRpmDir, oldrpm.name, oldrpm.version, oldrpm.release, oldrpm.arch)
- p.run(APPLY, '-r', oldrpmfile, deltarpmfile, newrpmfile)
- else:
- p.run(APPLY, deltarpmfile, newrpmfile)
- if p.returnCode():
- # in case of error save output into logfile - will not be removed for further inspection
- logfile = "%s.log" % deltarpmfile
- fd = open(logfile,'w')
- fd.write(p.getOutput())
- fd.close()
- raise Exception("%s.apply(%s) exitcode was %d - see %s" % (self.__class__, newrpm, p.returnCode(), logfile))
- return newrpmfile
-
- def verifySequence(self, sequencefile, oldrpm = None, useOldRpms = 0):
- """wraps execution of applydeltarpm [-r oldrpm] -s seqfilecontent -
- constructs file names and paths based on given RpmDescription and instance settings for directories"""
- if DEBUG:
- print 'DEBUG: %s.verify(%s,%s,%s)' % (self.__class__,sequencefile, oldrpm, useOldRpms)
- # read sequencefile
- fd = open(sequencefile)
- # TODO: is strip safe here ? could remove other chars than the linebreak
- content = string.strip(string.join(fd.readlines()))
- fd.close()
- p = Process()
- if useOldRpms:
- oldrpmfile = "%s/%s-%s-%s.%s.rpm" % (self.oldRpmDir, oldrpm.name, oldrpm.version, oldrpm.release, oldrpm.arch)
- p.run(APPLY, '-s', content, '-r', oldrpmfile)
- else:
- p.run(APPLY, '-s', content)
- if p.returnCode():
- # in case of error save output into logfile - will not be removed for further inspection
- logfile = "%s.log" % deltarpmfile
- fd = open(logfile,'w')
- fd.write(p.getOutput())
- fd.close()
- raise Exception("could not verify sequence of delta rpm: %d - see %s" % (p.returnCode(), logfile))
-class Fetcher:
- """ abstract class to be derived from classes implementing fetching seq and rpm files """
-
- def fetchSequence(self, oldrpm, targetrpm):
- pass
-
- def fetchDeltaRpm(self, oldrpm, targetrpm):
- pass
-
-class HttpFetcher(Fetcher):
- """ fetching seq and rpm files via http urls"""
-
- def __init__(self, deltaUrl, destinationDir):
- """constructor - params: deltaUrl = webapp-url, destinationDir=path to store files"""
- self.deltaUrl = deltaUrl
- self.destinationDir = destinationDir
- if DEBUG:
- print 'DEBUG: %s.%s: created: %s' % (self.__class__, '__init__', self)
-
- def fetchSequence(self, oldrpm, targetrpm):
- if DEBUG:
- print 'DEBUG: %s.fetchSequence: : (%s,%s)' % (self.__class__, oldrpm, targetrpm)
- return self.__fetchFile(oldrpm, targetrpm, SEQ_SUFFIX, 1)
-
- # The following method has been disabled by Fedora Infrastructure team, as
- # we will not be using a server side web service, rather, delta rpms will be generated
- # periodically, and client side, will simply download them if applicable
- def __DISABLED__fetchFile(self, oldrpm, targetrpm, suffix, sequence=0):
- """private method - uses private module to do the http request to rely on http return code"""
- import httppost
- data={}
- data['oldname'] = oldrpm.name
- data['oldversion'] = oldrpm.version
- data['oldrelease'] = oldrpm.release
- # avoid that httplib would send 'None' and not empty string
- if oldrpm.epoch:
- data['oldepoch'] = oldrpm.epoch
- else:
- data['oldepoch']=''
- data['oldarch'] = oldrpm.arch
- data['newname'] = targetrpm.name
- data['newversion'] = targetrpm.version
- data['newrelease'] = targetrpm.release
- if targetrpm.epoch:
- data['newepoch'] = targetrpm.epoch
- else:
- data['newepoch'] = ''
- data['newarch'] = targetrpm.arch
- if sequence:
- data['sequence']='1'
-
- fd = httppost.send(self.deltaUrl, data, DEBUG)
- content = fd.read()
- fd.close()
- dest = "%s/%s-%s-%s.%s.%s" % (self.destinationDir, targetrpm.name, targetrpm.version, targetrpm.release, targetrpm.arch, suffix)
- fd=open(dest,'w')
- fd.write(content)
- fd.close
- return dest
- def __fetchFile(self, oldrpm, targetrpm, suffix, sequence=0):
- """private method - uses private module to download delta rpms"""
- import urllib2
-
- if sequence:
- data['sequence']='1'
-
- drpmName = getDrpmName(oldrpm, targetrpm)
- fullUrl = '%s%s.%s' % (self.deltaUrl,drpmName,suffix)
- if DEBUG:
- print 'DEBUG: oldrpm: %s, newrpm: %s, suffix: %s' % (oldrpm, targetrpm, suffix)
- print 'DEBUG: %s.__fetchFile: : (%s)' % (self.__class__, fullUrl)
- try:
- fd = urllib2.urlopen(fullUrl)
- except IOError, e:
- if hasattr(e, 'reason'):
- raise Exception ("Failed to download delta rpm from URL %s, error: %s" % (fullUrl,e.reason))
- elif hasattr(e, 'code'):
- raise Exception ("Failed to download delta rpm from URL %s, error: %s" % (fullUrl,e.code))
- else:
- content = fd.read()
- fd.close()
- dest = "%s/%s-%s-%s.%s.%s" % (self.destinationDir, targetrpm.name, targetrpm.version, targetrpm.release, targetrpm.arch, suffix)
- fd=open(dest,'w')
- fd.write(content)
- fd.close
- return dest
-
- def fetchDeltaRpm(self, oldrpm, targetrpm):
- if DEBUG:
- print 'DEBUG: %s.fetchDeltaRpm: : (%s,%s)' % (self.__class__, oldrpm, targetrpm)
- return self.__fetchFile(oldrpm, targetrpm, SUFFIX, 0)
-
- def __str__(self):
- return "%s: deltaUrl=%s, destinationDir=%s" % (self.__class__, self.deltaUrl, self.destinationDir)
-
-class TestFSFetcher(Fetcher):
- """ fetching seq and rpm files from local filesystem - uses NOT same directory structure as DeltaRpmWrapper.create"""
-
- def __init__(self, sourceDir, destinationDir):
- self.sourceDir = sourceDir
- self.destinationDir = destinationDir
- if DEBUG:
- print 'DEBUG: %s.%s: created: %s' % (self.__class__, '__init__', self)
-
- def __str__(self):
- return "%s: sourceDir=%s, destinationDir=%s" % (self.__class__, self.sourceDir, self.destinationDir)
-
- def __copyFile(self, targetrpm, suffix):
- source = "%s/%s-%s-%s.%s.%s" % (self.sourceDir, targetrpm.name, targetrpm.version, targetrpm.release, targetrpm.arch, suffix)
- # construct new sequence filename
- dest = "%s/%s-%s-%s.%s.%s" % (self.destinationDir, targetrpm.name, targetrpm.version, targetrpm.release, targetrpm.arch, suffix)
- # copy content usind read/write
- fr=open(source,'r')
- content=fr.readlines()
- fr.close()
- fw=open(dest,'w')
- fw.writelines(content)
- fw.close()
- return dest
-
-
- def fetchSequence(self, oldrpm, targetrpm):
- if DEBUG:
- print 'DEBUG: %s.fetchSequence(%s,%s)' % (self.__class__, oldrpm, targetrpm)
- return self.__copyFile(targetrpm, SEQ_SUFFIX)
-
- def fetchDeltaRpm(self, oldrpm, targetrpm):
- if DEBUG:
- print 'DEBUG: %s.fetchDeltaRpm(%s,%s)' % (self.__class__, oldrpm, targetrpm)
- return self.__copyFile(targetrpm, 'rpm')
-
-def getInstalled(targetrpm, sizelimit=0):
- """retrieve description of installed version from rpm database"""
- if DEBUG:
- print 'DEBUG: %s.getInstalled(%s)' % (__name__, targetrpm)
- import rpm
- ts = rpm.TransactionSet()
- # ts.setVSFlags(-1)
- mi = ts.dbMatch('name', targetrpm.name)
- oldrpm = None
- count = 0
- for h in mi:
- oldrpmtmp = RpmDescription( h['name'], h['version'], h['release'], h['arch'], h['epoch'])
- size = h['size']
- #print "sizelimit: %d, size: %d" % (sizelimit, size)
- if sizelimit > 0 and size > sizelimit:
- raise Exception ("package %s bigger than limit (%d, %d)" % (targetrpm.name, size, sizelimit))
- # TODO: add __cmp__ to RpmDescription to determine most current installed
- # does not matter too much - for reconstruction any installed version is good
- oldrpm = oldrpmtmp
- count+=1
- continue
- if oldrpm:
- if oldrpm > oldrpmtmp:
- oldrpm = oldrpmtmp
- else:
- oldrpm = oldrpmtmp
- # cleanup handles to free all rpmdb transactions - avoid db locking
- del mi
- del ts
- if DEBUG:
- print 'DEBUG: %s.getInstalled(%s): %s matches, using %s' % (__name__, targetrpm, count,oldrpm)
- return oldrpm
-
-def getPackageFromDelta(cfg, rpmarray):
- # method to be invoked within up2date
- # return filename of regenerated newrpm
- #
- if DEBUG:
- print 'DEBUG: %s.getPackageFromDelta(%s)' % (__name__, rpmarray)
- sizelimit=0
- if cfg.has_key(SIZELIMIT):
- sizelimit = cfg[SIZELIMIT]
- # 1. retrieve relevant config from rhncfg
- if cfg.has_key(STORAGE):
- storageDir = cfg[STORAGE]
- else:
- storageDir = DEFAULT_STORAGE
- # TODO: check if is directory
- if not os.access(storageDir, os.F_OK):
- if DEBUG:
- print 'DEBUG: %s.getPackageFromDelta: mkdir(%s)' % (__name__, storageDir)
- os.makedirs(storageDir)
- if cfg.has_key(DELTA_STORAGE):
- deltaStorage = cfg[DELTA_STORAGE]
- else:
- deltaStorage = DEFAULT_DELTA_STORAGE
- # TODO: check if is directory
- if not os.access(deltaStorage, os.F_OK):
- if DEBUG:
- print 'DEBUG: %s.getPackageFromDelta: mkdir(%s)' % (__name__, deltaStorage)
- os.makedirs(deltaStorage)
- if cfg.has_key(DELTA_URL):
- deltaUrl = cfg[DELTA_URL]
- else:
- # without URL we can't do anything useful - raise exception and let up2date fall back to its own retrieval
- raise "%s not configured" % DELTA_URL
- oldRpms = None
- if cfg.has_key(DELTA_OLDRPM_REPOSITORY):
- oldRpms = cfg[DELTA_OLDRPM_REPOSITORY]
- # use both config setting where old rpms could be and if they should be used
- if cfg.has_key(DELTA_USE_OLDRPMS):
- useOldRpms = cfg[DELTA_USE_OLDRPMS]
- else:
- # default is to NOT use old rpms
- useOldRpms = 0
- # if old rpms should be used, check if oldRpms is set , warn otherwise
- if useOldRpms:
- if not oldRpms:
- print "warning: configuration inconsistent: cannot use ols rpms without path specified, check %s" % DELTA_OLDRPM_REPOSITORY
- useOldRpms = 0
- if cfg.has_key(DELTA_USE_SEQ):
- useSeq = cfg[DELTA_USE_SEQ]
- else:
- # default is to NOT use sequence files
- useSeq = 0
- # 2. determine old rpm description
- targetrpm = RpmDescription(rpmarray[0], rpmarray[1], rpmarray[2], rpmarray[4], rpmarray[3])
- oldrpm = getInstalled(targetrpm, sizelimit)
-
- # raise exception if package is not installed
- if not oldrpm:
- raise Exception("%s is not installed" % targetrpm.name)
-
- # TODO: determine based on URL setting whih fetcher to use
- fetcher = HttpFetcher(deltaUrl, deltaStorage)
- #fetcher = TestFSFetcher('/tmp/deltasource', deltaStorage)
- # wrapper takes only paths as constructor arguments,
- # flags like useSeq or useOldRpms can be set on every method invocation
- wrapper = DeltaRpmWrapper(storageDir, oldRpms)
- # 3. ifSeq:
- if useSeq:
- # 3.1. download seq
- seqfile = fetcher.fetchSequence(oldrpm, targetrpm)
- if DEBUG:
- print 'DEBUG: %s.getPackageFromDelta: received seq in %s' % (__name__, seqfile)
- # 3.2 verify seq
- wrapper.verifySequence(seqfile, oldrpm, useOldRpms)
- # 4. download deltarpm
- deltafile = fetcher.fetchDeltaRpm(oldrpm, targetrpm)
- if DEBUG:
- print 'DEBUG: %s.getPackageFromDelta: received rpm in %s' % (__name__, deltafile)
- # 5. regenerate newrpm
- newfile = wrapper.apply(oldrpm, targetrpm, deltafile, useOldRpms)
- # output some statistics ;)
- print "successfully reconstructed %s - %d bytes tranferred instead of %d" % (targetrpm, os.stat(deltafile).st_size, os.stat(newfile).st_size)
- # done, cleanup
- # 6. delete seq and deltarpm file if keepAfterInstall is not set
- if cfg.has_key('keepAfterInstall') and cfg['keepAfterInstall']:
- pass
- else:
- # let mkdir operation without try/except as failure would mean that something is really broken
- # up2date would fallback to its retrieval and therefore not rely at all on this code ;)
- if DEBUG:
- print 'DEBUG: %s.getPackageFromDelta: rm(%s)' % (__name__, deltafile)
- os.unlink(deltafile)
- if useSeq:
- if DEBUG:
- print 'DEBUG: %s.getPackageFromDelta: rm(%s)' % (__name__, seqfile)
- os.unlink(seqfile)
- return newfile
-
-def getDrpmName(oldrpm, targetrpm):
- """Get delta rpm name from old, new rpms, and suffix"""
- dver = "_".join([targetrpm.version, oldrpm.version] )
- drel = "_".join([targetrpm.release ,oldrpm.release] )
- drpmName = '%s-%s-%s.%s' % (oldrpm.name, dver, drel, oldrpm.arch)
- return drpmName
-
-if __name__ == '__main__':
- import sys
- arg = sys.argv[1]
- newrpm = RpmDescription(arg,'1.0.6','1.4.1','i386')
- old = getInstalled(newrpm, 0)
- old = getInstalled(newrpm, 10000000)
-
- print old.rhnFileName()
- #p = Process()
- #p.run('find','/var/Satellite','-xdev')
- #print p.getOutput()
- #print p.returnCode()
- print
diff --git a/makerepo/createprestorepo.py b/makerepo/createprestorepo.py
new file mode 100755
index 0000000..0fee5b4
--- /dev/null
+++ b/makerepo/createprestorepo.py
@@ -0,0 +1,308 @@
+#!/usr/bin/python -t
+# -*- mode: Python; indent-tabs-mode: nil; -*-
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+import errno, os, sys, gzip
+import fnmatch, re
+import rpmUtils.transaction, rpmUtils.miscutils
+import commands, libxml2
+import dumpMetadata
+from dumpMetadata import _gzipOpen, getChecksum
+#### import Utils
+
+DEBUG = False
+#### Utils.setdebug(DEBUG)
+
+SUFFIX='drpm'
+DRPMWORTHKEEPINGTHRESH=0.5
+DEBUG=0
+REPODATA="repodata"
+REPOFILE="presto.xml"
+REPOMDFILE="prestomd.xml"
+SUM_TYPE="sha"
+
+def XML_start_newrpm(node, (f, n, e, v, r, a), srcdir_len):
+ newrpm_node = node.newChild(None, "package", None)
+ newrpm_node.newProp("type", "rpm")
+ newrpm_node.newChild(None, "name", n)
+ newrpm_node.newChild(None, "arch", str(a))
+ version = newrpm_node.newChild(None, "version", None)
+ version.newProp("epoch", str(e))
+ version.newProp("ver", str(v))
+ version.newProp("rel", str(r))
+ deltas = newrpm_node.newChild(None, "deltas", None)
+ return deltas
+
+def XML_oldrpm(newrpm_node, drpm_file, oldrpm, newrpm, sequence, size):
+ (f, n, e, v, r, a) = oldrpm
+ (nf, nn, ne, nv, nr, na) = newrpm
+ oldrpm_node = newrpm_node.newChild(None, "oldrpm", None)
+ checksum = getChecksum(SUM_TYPE, drpm_file)
+ if n != nn:
+ oldrpm_node.newChild(None, "name", n)
+ if a != na:
+ oldrpm_node.newChild(None, "arch", str(a))
+ version = oldrpm_node.newChild(None, "version", None)
+ if e != ne:
+ version.newProp("epoch", str(e))
+ if v != nv:
+ version.newProp("ver", str(v))
+ version.newProp("rel", str(r))
+ oldrpm_node.newChild(None, "drpm_filename", drpm_file)
+ oldrpm_node.newChild(None, "size", str(size))
+ oldrpm_node.newChild(None, "sequence", str(sequence))
+ cs_node = oldrpm_node.newChild(None, "checksum", str(checksum))
+ cs_node.newProp("type", SUM_TYPE)
+
+def startXML():
+ basedoc = libxml2.newDoc("1.0")
+ baseroot = basedoc.newChild(None, "metadata", None)
+ basens = baseroot.newNs('http://linux.duke.edu/metadata/common', None)
+ formatns = baseroot.newNs('http://linux.duke.edu/metadata/rpm', 'rpm')
+ baseroot.setNs(basens)
+ return (basedoc, baseroot)
+
+def endXML(xmldoc, filename, srcdir, compressed=True):
+ if compressed:
+ outfile = _gzipOpen("%s%s/%s.gz" % (srcdir, REPODATA, filename), "w")
+ output = xmldoc.serialize('UTF-8', 1)
+ outfile.write(output)
+ outfile.close()
+ else:
+ xmldoc.saveFormatFileEnc("%s%s/%s" % (srcdir, REPODATA, filename), 'UTF-8', 1)
+ xmldoc.freeDoc()
+
+def repoXML(srcdir):
+ """generate the repomd.xml file that stores the info on the other files"""
+ repodoc = libxml2.newDoc("1.0")
+ reporoot = repodoc.newChild(None, "repomd", None)
+ repons = reporoot.newNs('http://linux.duke.edu/metadata/repo', None)
+ reporoot.setNs(repons)
+ repofilepath = "%s%s/%s" % (srcdir, REPODATA, REPOMDFILE)
+ filename = "%s%s/%s.gz" % (srcdir, REPODATA, REPOFILE)
+ filetype = "deltas"
+ zfo = _gzipOpen(filename, "rb")
+ uncsum = getChecksum(SUM_TYPE, zfo)
+ zfo.close()
+ csum = getChecksum(SUM_TYPE, filename)
+ timestamp = os.stat(filename)[8]
+ data = reporoot.newChild(None, 'data', None)
+ data.newProp('type', filetype)
+ location = data.newChild(None, 'location', None)
+ location.newProp('href', "%s/%s.gz" % (REPODATA, REPOFILE))
+ checksum = data.newChild(None, 'checksum', csum)
+ checksum.newProp('type', SUM_TYPE)
+ timestamp = data.newChild(None, 'timestamp', str(timestamp))
+ unchecksum = data.newChild(None, 'open-checksum', uncsum)
+ unchecksum.newProp('type', SUM_TYPE)
+ endXML(repodoc, REPOMDFILE, srcdir, False)
+
+def genDeltaRPM(ts, newrpm, oldrpm, is_new_package, srcdir, dstdir, locroot):
+ (f1,n1,e1,v1,r1,a1) = newrpm
+ (f2,n2,e2,v2,r2,a2) = oldrpm
+ hdr = rpmUtils.miscutils.hdrFromPackage(ts,f1)
+ arch = hdr['arch']
+ v12 = "_".join([v1,v2])
+ r12 = "_".join([r1,r2])
+ deltaRPMName= '%s/%s.%s.%s' % (dstdir, "-".join([n1,v12,r12]), a1, SUFFIX)
+ if DEBUG:
+ print "DEBUG " + deltaCommand
+ # If the drpm doesn't exists, make it, else skip it
+ if os.path.exists("%s%s" % (srcdir, deltaRPMName)):
+ dsize = os.path.getsize("%s%s" % (srcdir, deltaRPMName))
+ if e1 == e2:
+ print 'Using pre-generated delta rpm for %s.%s - %s.%s => %s.%s' % (n1, a1, v2, r2, v1, r1)
+ else:
+ print 'Using pre-generated delta rpm for %s.%s - %s:%s.%s => %s:%s.%s' % (n1, a1, e2, v2, r2, e1, v1, r1)
+ # Get checksum
+ seqfile = open("%s%s.seq" % (srcdir, deltaRPMName), "r")
+ sequence = seqfile.read()[:-1]
+ sequence = sequence[sequence.rfind("-")+1:]
+ seqfile.close()
+ if is_new_package:
+ locroot = XML_start_newrpm(locroot, newrpm, len(srcdir))
+ is_new_package = False
+ XML_oldrpm(locroot, deltaRPMName, oldrpm, newrpm, sequence, dsize)
+ if DEBUG:
+ print "DEBUG skipping %s" % (deltaRPMName)
+ elif os.path.exists("%s%s.dontdelta" % (srcdir, deltaRPMName)) or os.path.getsize(f1) > 70000000:
+ pass
+ else:
+ deltaCommand = 'makedeltarpm -s %s%s.seq %s %s %s%s' % (srcdir, deltaRPMName, f2, f1, srcdir, deltaRPMName)
+ (code, out) = commands.getstatusoutput(deltaCommand)
+ if code:
+ #raise Exception("genDeltaRPM: exitcode was %s - Reported Error: %s" % (code, out))
+ print "Error genDeltaRPM for %s: exitcode was %s - Reported Error: %s" % (n1, code, out)
+
+ # Get size
+ dsize = os.path.getsize("%s%s" % (srcdir, deltaRPMName))
+
+ # Get checksum
+ seqfile = open("%s%s.seq" % (srcdir, deltaRPMName), "r")
+ sequence = seqfile.read()[:-1]
+ sequence = sequence[sequence.rfind("-")+1:]
+ seqfile.close()
+
+ # Check whether or not we should keep the drpm
+ if not drpmIsWorthKeeping(deltaRPMName, f1, srcdir):
+ if DEBUG:
+ print 'deleting %s' % (deltaRPMName)
+ try:
+ os.unlink("%s%s" % (srcdir, deltaRPMName))
+ except Exception, e:
+ print "Error deleting deltarpm %s" % (deltaRPMName), str(e)
+ try:
+ os.unlink("%s%s.seq" % (srcdir, deltaRPMName))
+ except Exception, e:
+ print "Error deleting checksum %s.seq" % (deltaRPMName), str(e)
+ f = open("%s%s.dontdelta" % (srcdir, deltaRPMName), "w")
+ f.close()
+ else:
+ if e1 == e2:
+ print 'Generated delta rpm for %s.%s - %s.%s => %s.%s' % (n1, a1, v2, r2, v1, r1)
+ else:
+ print 'Generated delta rpm for %s.%s - %s:%s.%s => %s:%s.%s' % (n1, a1, e2, v2, r2, e1, v1, r1)
+
+ if is_new_package:
+ locroot = XML_start_newrpm(locroot, newrpm, len(srcdir))
+ is_new_package = False
+ XML_oldrpm(locroot, deltaRPMName, oldrpm, newrpm, sequence, dsize)
+ return (is_new_package, locroot)
+
+def drpmIsWorthKeeping(deltaRPMName, newrpm, srcdir):
+ newsize = os.path.getsize(newrpm)
+ drpmsize = os.path.getsize("%s%s" % (srcdir, deltaRPMName))
+ # Delete the drpm if it's too large
+ if drpmsize > DRPMWORTHKEEPINGTHRESH * newsize:
+ return False
+ return True
+
+def createPrestoRepo(srcdir, dstdir):
+ ts = rpmUtils.transaction.initReadOnlyTransaction()
+ changed = False
+
+ # Create list of .rpm files.
+ # We don't use "glob", so sub-directories are supported.
+ print 'Using source dir: %s' % srcdir
+ print 'Using destination dir: %s' % dstdir
+ if dstdir[-1] == "/":
+ dstdir = dstdir[:-1]
+ srcfiles = []
+ for root, dirs, files in os.walk(srcdir):
+ for f in fnmatch.filter(files,'*.rpm'):
+ srcfiles.append(os.path.join(root,f))
+ if not len(srcfiles):
+ print ' Nothing found.'
+ return changed
+ assert srcfiles[0].startswith(srcdir)
+
+ # Check whether dstdir exists, and if it doesn't, create it
+ if not os.access(dstdir, os.F_OK):
+ os.makedirs(dstdir, 0755)
+ elif not os.access(dstdir, os.W_OK):
+ print 'ERROR: Unable to write to %s' % dstdir
+ sys.exit(1)
+
+ # Check whether REPODATA exists, and if it doesn't, create it
+ if not os.access("%s%s" % (srcdir, REPODATA), os.F_OK):
+ os.makedirs("%s%s" % (srcdir, REPODATA), 0755)
+ elif not os.access(dstdir, os.W_OK):
+ print 'ERROR: Unable to write to %s' % REPODATA
+ sys.exit(1)
+
+ # Create XML document
+# xmldoc = libxml2.newDoc("1.0")
+# xmlroot = xmldoc
+ (xmldoc, xmlroot) = startXML()
+
+ # Create map: rpm %name -> list of tuples (filename,name,e,v,r)
+ newestsrcrpms = {}
+ for f in srcfiles:
+ hdr = rpmUtils.miscutils.hdrFromPackage(ts, f)
+ nm = hdr['name'] + "." + hdr['arch']
+ n = hdr['name']
+ a = hdr['arch']
+ v = hdr['version']
+ r = hdr['release']
+ e = hdr['epoch']
+ if e is None:
+ e = 0
+ newestsrcrpms.setdefault(nm,[])
+ newestsrcrpms[nm].append((f,n,e,v,r,a))
+
+ # Now purge old src.rpm unless their %name matches a white-list pattern.
+ for l in newestsrcrpms.itervalues():
+ x = len(l)
+
+ if x > 1:
+ def sortByEVR(fnevr1, fnevr2):
+ (f1,n1,e1,v1,r1,a1) = fnevr1
+ (f2,n2,e2,v2,r2,a2) = fnevr2
+ rc = rpmUtils.miscutils.compareEVR((e1,v1,r1),(e2,v2,r2))
+ if rc == 0:
+ return 0
+ if rc > 0:
+ return -1
+ if rc < 0:
+ return 1
+
+ l.sort(sortByEVR) # highest first in list
+
+ # Generate delta rpm
+ is_new_package = True
+ locroot = xmlroot
+ for rpm in l[1:]:
+ (is_new_package, locroot) = genDeltaRPM(ts, l[0], rpm, is_new_package, srcdir, dstdir, locroot)
+
+ if not len(srcfiles):
+ print 'WARNING: No .rpms left. Stopping here.'
+ return changed
+
+ # Write out end of deltas.xml file
+ endXML(xmldoc, REPOFILE, srcdir, True)
+ repoXML(srcdir)
+
+ # Examine binary repository directories and remove everything which
+ # is missing its corresponding src.rpm.
+ return changed
+
+
+def main(bin_rpm_path, delta_rpm_path):
+ assert rpmUtils.miscutils.compareEVR((1,2,3),(1,2,0)) > 0
+ assert rpmUtils.miscutils.compareEVR((0,1,2),(0,1,2)) == 0
+ assert rpmUtils.miscutils.compareEVR((1,2,3),(4,0,99)) < 0
+
+ return createPrestoRepo(bin_rpm_path, delta_rpm_path)
+
+
+if __name__ == '__main__':
+ if len(sys.argv) < 2:
+ print 'Usage: %s <bin_rpm_dir> <delta_rpm_dir> \n' % os.path.basename(sys.argv[0])
+ sys.exit(errno.EINVAL)
+ bin_rpm_path = sys.argv[1]
+ delta_rpm_path = sys.argv[2]
+
+ #### cfg = Utils.load_config_module(sys.argv[1])
+
+ #### Utils.signer_gid_check(cfg.signersgid)
+ #### os.umask(cfg.signersumask)
+
+ #### for dist in sys.argv[2:]:
+ #### if not cfg.archdict.has_key(dist):
+ #### print "No distribution release named '%s' found" % dist
+ #### sys.exit(errno.EINVAL)
+ main(bin_rpm_path, delta_rpm_path)
+ sys.exit(0)
diff --git a/makerepo/dumpMetadata.py b/makerepo/dumpMetadata.py
new file mode 100755
index 0000000..4ca0793
--- /dev/null
+++ b/makerepo/dumpMetadata.py
@@ -0,0 +1,782 @@
+#!/usr/bin/python -t
+# base classes and functions for dumping out package Metadata
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Library General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+# Copyright 2004 Duke University
+
+# $Id: dumpMetadata.py,v 1.36 2006/02/21 20:10:08 pnasrat Exp $
+
+import os
+import rpm
+import exceptions
+import md5
+import sha
+import types
+import struct
+import re
+import stat
+
+# done to fix gzip randomly changing the checksum
+import gzip
+from zlib import error as zlibError
+from gzip import write32u, FNAME
+
+__all__ = ["GzipFile","open"]
+
+class GzipFile(gzip.GzipFile):
+ def _write_gzip_header(self):
+ self.fileobj.write('\037\213') # magic header
+ self.fileobj.write('\010') # compression method
+ fname = self.filename[:-3]
+ flags = 0
+ if fname:
+ flags = FNAME
+ self.fileobj.write(chr(flags))
+ write32u(self.fileobj, long(0))
+ self.fileobj.write('\002')
+ self.fileobj.write('\377')
+ if fname:
+ self.fileobj.write(fname + '\000')
+
+
+def _gzipOpen(filename, mode="rb", compresslevel=9):
+ return GzipFile(filename, mode, compresslevel)
+
+
+
+def returnFD(filename):
+ try:
+ fdno = os.open(filename, os.O_RDONLY)
+ except OSError:
+ raise MDError, "Error opening file"
+ return fdno
+
+def returnHdr(ts, package):
+ """hand back the rpm header or raise an Error if the pkg is fubar"""
+ opened_here = 0
+ try:
+ if type(package) is types.StringType:
+ opened_here = 1
+ fdno = os.open(package, os.O_RDONLY)
+ else:
+ fdno = package # let's assume this is an fdno and go with it :)
+ except OSError:
+ raise MDError, "Error opening file"
+ ts.setVSFlags((rpm._RPMVSF_NOSIGNATURES|rpm.RPMVSF_NOMD5|rpm.RPMVSF_NEEDPAYLOAD))
+ try:
+ hdr = ts.hdrFromFdno(fdno)
+ except rpm.error:
+ raise MDError, "Error opening package"
+ if type(hdr) != rpm.hdr:
+ raise MDError, "Error opening package"
+ ts.setVSFlags(0)
+
+ if opened_here:
+ os.close(fdno)
+ del fdno
+
+ return hdr
+
+def getChecksum(sumtype, file, CHUNK=2**16):
+ """takes filename, hand back Checksum of it
+ sumtype = md5 or sha
+ filename = /path/to/file
+ CHUNK=65536 by default"""
+
+ # chunking brazenly lifted from Ryan Tomayko
+ opened_here = 0
+ try:
+ if type(file) is not types.StringType:
+ fo = file # assume it's a file-like-object
+ else:
+ opened_here = 1
+ fo = open(file, 'rb', CHUNK)
+
+ if sumtype == 'md5':
+ sum = md5.new()
+ elif sumtype == 'sha':
+ sum = sha.new()
+ else:
+ raise MDError, 'Error Checksumming file, wrong checksum type %s' % sumtype
+ chunk = fo.read
+ while chunk:
+ chunk = fo.read(CHUNK)
+ sum.update(chunk)
+
+ if opened_here:
+ fo.close()
+ del fo
+
+ return sum.hexdigest()
+ except:
+ raise MDError, 'Error opening file for checksum: %s' % file
+
+
+def utf8String(string):
+ """hands back a unicoded string"""
+ if string is None:
+ return ''
+ elif isinstance(string, unicode):
+ return string
+ try:
+ x = unicode(string, 'ascii')
+ return string
+ except UnicodeError:
+ encodings = ['utf-8', 'iso-8859-1', 'iso-8859-15', 'iso-8859-2']
+ for enc in encodings:
+ try:
+ x = unicode(string, enc)
+ except UnicodeError:
+ pass
+ else:
+ if x.encode(enc) == string:
+ return x.encode('utf-8')
+ newstring = ''
+ for char in string:
+ if ord(char) > 127:
+ newstring = newstring + '?'
+ else:
+ newstring = newstring + char
+ return newstring
+
+
+def byteranges(file):
+ """takes an rpm file or fileobject and returns byteranges for location of the header"""
+ opened_here = 0
+ if type(file) is not types.StringType:
+ fo = file
+ else:
+ opened_here = 1
+ fo = open(file, 'r')
+ #read in past lead and first 8 bytes of sig header
+ fo.seek(104)
+ # 104 bytes in
+ binindex = fo.read(4)
+ # 108 bytes in
+ (sigindex, ) = struct.unpack('>I', binindex)
+ bindata = fo.read(4)
+ # 112 bytes in
+ (sigdata, ) = struct.unpack('>I', bindata)
+ # each index is 4 32bit segments - so each is 16 bytes
+ sigindexsize = sigindex * 16
+ sigsize = sigdata + sigindexsize
+ # we have to round off to the next 8 byte boundary
+ disttoboundary = (sigsize % 8)
+ if disttoboundary != 0:
+ disttoboundary = 8 - disttoboundary
+ # 112 bytes - 96 == lead, 8 = magic and reserved, 8 == sig header data
+ hdrstart = 112 + sigsize + disttoboundary
+
+ fo.seek(hdrstart) # go to the start of the header
+ fo.seek(8,1) # read past the magic number and reserved bytes
+
+ binindex = fo.read(4)
+ (hdrindex, ) = struct.unpack('>I', binindex)
+ bindata = fo.read(4)
+ (hdrdata, ) = struct.unpack('>I', bindata)
+
+ # each index is 4 32bit segments - so each is 16 bytes
+ hdrindexsize = hdrindex * 16
+ # add 16 to the hdrsize to account for the 16 bytes of misc data b/t the
+ # end of the sig and the header.
+ hdrsize = hdrdata + hdrindexsize + 16
+
+ # header end is hdrstart + hdrsize
+ hdrend = hdrstart + hdrsize
+ if opened_here:
+ fo.close()
+ del fo
+ return (hdrstart, hdrend)
+
+
+class MDError(exceptions.Exception):
+ def __init__(self, args=None):
+ exceptions.Exception.__init__(self)
+ self.args = args
+
+
+
+class RpmMetaData:
+ """each rpm is one object, you pass it an rpm file
+ it opens the file, and pulls the information out in bite-sized chunks :)
+ """
+
+ mode_cache = {}
+
+ def __init__(self, ts, basedir, filename, options):
+ try:
+ stats = os.stat(os.path.join(basedir, filename))
+ self.size = stats[6]
+ self.mtime = stats[8]
+ del stats
+ except OSError, e:
+ raise MDError, "Error Stat'ing file %s %s" % (basedir, filename)
+ self.options = options
+ self.localurl = options['baseurl']
+ self.relativepath = filename
+ fd = returnFD(os.path.join(basedir, filename))
+ self.hdr = returnHdr(ts, fd)
+ os.lseek(fd, 0, 0)
+ fo = os.fdopen(fd, 'rb')
+ self.pkgid = self.doChecksumCache(fo)
+ fo.seek(0)
+ (self.rangestart, self.rangeend) = byteranges(fo)
+ fo.close()
+ del fo
+ del fd
+
+ # setup our regex objects
+ fileglobs = options['file-pattern-match']
+ #['.*bin\/.*', '^\/etc\/.*', '^\/usr\/lib\/sendmail$']
+ dirglobs = options['dir-pattern-match']
+ #['.*bin\/.*', '^\/etc\/.*']
+ self.dirrc = []
+ self.filerc = []
+ for glob in fileglobs:
+ self.filerc.append(re.compile(glob))
+
+ for glob in dirglobs:
+ self.dirrc.append(re.compile(glob))
+
+ self.filenames = []
+ self.dirnames = []
+ self.ghostnames = []
+ self.genFileLists()
+
+ def arch(self):
+ if self.tagByName('sourcepackage') == 1:
+ return 'src'
+ else:
+ return self.tagByName('arch')
+
+ def _correctFlags(self, flags):
+ returnflags=[]
+ if flags is None:
+ return returnflags
+
+ if type(flags) is not types.ListType:
+ newflag = flags & 0xf
+ returnflags.append(newflag)
+ else:
+ for flag in flags:
+ newflag = flag
+ if flag is not None:
+ newflag = flag & 0xf
+ returnflags.append(newflag)
+ return returnflags
+
+ def _checkPreReq(self, flags):
+ reqs=[]
+ if flags is None:
+ return reqs
+
+ if type(flags) is not types.ListType:
+ flags = [flags]
+ for flag in flags:
+ newflag = flag
+ if flag is not None:
+ newflag = flag & 64
+ if newflag == 64:
+ reqs.append(1)
+ else:
+ reqs.append(0)
+ return reqs
+
+
+ def _correctVersion(self, vers):
+ returnvers = []
+ vertuple = (None, None, None)
+ if vers is None:
+ returnvers.append(vertuple)
+ return returnvers
+
+ if type(vers) is not types.ListType:
+ if vers is not None:
+ vertuple = self._stringToVersion(vers)
+ else:
+ vertuple = (None, None, None)
+ returnvers.append(vertuple)
+ else:
+ for ver in vers:
+ if ver is not None:
+ vertuple = self._stringToVersion(ver)
+ else:
+ vertuple = (None, None, None)
+ returnvers.append(vertuple)
+ return returnvers
+
+
+ def _stringToVersion(self, strng):
+ i = strng.find(':')
+ if i != -1:
+ epoch = strng[:i]
+ else:
+ epoch = '0'
+ j = strng.find('-')
+ if j != -1:
+ if strng[i + 1:j] == '':
+ version = None
+ else:
+ version = strng[i + 1:j]
+ release = strng[j + 1:]
+ else:
+ if strng[i + 1:] == '':
+ version = None
+ else:
+ version = strng[i + 1:]
+ release = None
+ return (epoch, version, release)
+
+ ###########
+ # Title: Remove duplicates from a sequence
+ # Submitter: Tim Peters
+ # From: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52560
+
+ def _uniq(self,s):
+ """Return a list of the elements in s, but without duplicates.
+
+ For example, unique([1,2,3,1,2,3]) is some permutation of [1,2,3],
+ unique("abcabc") some permutation of ["a", "b", "c"], and
+ unique(([1, 2], [2, 3], [1, 2])) some permutation of
+ [[2, 3], [1, 2]].
+
+ For best speed, all sequence elements should be hashable. Then
+ unique() will usually work in linear time.
+
+ If not possible, the sequence elements should enjoy a total
+ ordering, and if list(s).sort() doesn't raise TypeError it's
+ assumed that they do enjoy a total ordering. Then unique() will
+ usually work in O(N*log2(N)) time.
+
+ If that's not possible either, the sequence elements must support
+ equality-testing. Then unique() will usually work in quadratic
+ time.
+ """
+
+ n = len(s)
+ if n == 0:
+ return []
+
+ # Try using a dict first, as that's the fastest and will usually
+ # work. If it doesn't work, it will usually fail quickly, so it
+ # usually doesn't cost much to *try* it. It requires that all the
+ # sequence elements be hashable, and support equality comparison.
+ u = {}
+ try:
+ for x in s:
+ u[x] = 1
+ except TypeError:
+ del u # move on to the next method
+ else:
+ return u.keys()
+
+ # We can't hash all the elements. Second fastest is to sort,
+ # which brings the equal elements together; then duplicates are
+ # easy to weed out in a single pass.
+ # NOTE: Python's list.sort() was designed to be efficient in the
+ # presence of many duplicate elements. This isn't true of all
+ # sort functions in all languages or libraries, so this approach
+ # is more effective in Python than it may be elsewhere.
+ try:
+ t = list(s)
+ t.sort()
+ except TypeError:
+ del t # move on to the next method
+ else:
+ assert n > 0
+ last = t[0]
+ lasti = i = 1
+ while i < n:
+ if t[i] != last:
+ t[lasti] = last = t[i]
+ lasti += 1
+ i += 1
+ return t[:lasti]
+
+ # Brute force is all that's left.
+ u = []
+ for x in s:
+ if x not in u:
+ u.append(x)
+ return u
+
+ def tagByName(self, tag):
+ data = self.hdr[tag]
+ if type(data) is types.ListType:
+ if len(data) > 0:
+ return data[0]
+ else:
+ return ''
+ else:
+ return data
+
+ def listTagByName(self, tag):
+ """take a tag that should be a list and make sure it is one"""
+ lst = []
+ data = self.hdr[tag]
+ if data is None:
+ return lst
+
+ if type(data) is types.ListType:
+ lst.extend(data)
+ else:
+ lst.append(data)
+ return lst
+
+
+ def epoch(self):
+ if self.hdr['epoch'] is None:
+ return 0
+ else:
+ return self.tagByName('epoch')
+
+ def genFileLists(self):
+ """produces lists of dirs and files for this header in two lists"""
+
+ files = self.listTagByName('filenames')
+ fileflags = self.listTagByName('fileflags')
+ filemodes = self.listTagByName('filemodes')
+ filetuple = zip(files, filemodes, fileflags)
+ for (file, mode, flag) in filetuple:
+ #garbage checks
+ if mode is None or mode == '':
+ self.filenames.append(file)
+ continue
+ if not RpmMetaData.mode_cache.has_key(mode):
+ RpmMetaData.mode_cache[mode] = stat.S_ISDIR(mode)
+ if RpmMetaData.mode_cache[mode]:
+ self.dirnames.append(file)
+ else:
+ if flag is None:
+ self.filenames.append(file)
+ else:
+ if (flag & 64):
+ self.ghostnames.append(file)
+ continue
+ self.filenames.append(file)
+
+
+ def usefulFiles(self):
+ """search for good files"""
+ returns = {}
+ for item in self.filenames:
+ if item is None:
+ continue
+ for glob in self.filerc:
+ if glob.match(item):
+ returns[item] = 1
+ return returns
+
+ def usefulGhosts(self):
+ """search for useful ghost file names"""
+ returns = {}
+ for item in self.ghostnames:
+ if item is None:
+ continue
+ for glob in self.filerc:
+ if glob.match(item):
+ returns[item] = 1
+ return returns
+
+
+ def usefulDirs(self):
+ """search for good dirs"""
+ returns = {}
+ for item in self.dirnames:
+ if item is None:
+ continue
+ for glob in self.dirrc:
+ if glob.match(item):
+ returns[item] = 1
+ return returns.keys()
+
+
+ def depsList(self):
+ """returns a list of tuples of dependencies"""
+ # these should probably compress down duplicates too
+ lst = []
+ names = self.hdr[rpm.RPMTAG_REQUIRENAME]
+ tmpflags = self.hdr[rpm.RPMTAG_REQUIREFLAGS]
+ flags = self._correctFlags(tmpflags)
+ prereq = self._checkPreReq(tmpflags)
+ ver = self._correctVersion(self.hdr[rpm.RPMTAG_REQUIREVERSION])
+ if names is not None:
+ lst = zip(names, flags, ver, prereq)
+ return self._uniq(lst)
+
+ def obsoletesList(self):
+ lst = []
+ names = self.hdr[rpm.RPMTAG_OBSOLETENAME]
+ tmpflags = self.hdr[rpm.RPMTAG_OBSOLETEFLAGS]
+ flags = self._correctFlags(tmpflags)
+ ver = self._correctVersion(self.hdr[rpm.RPMTAG_OBSOLETEVERSION])
+ if names is not None:
+ lst = zip(names, flags, ver)
+ return self._uniq(lst)
+
+ def conflictsList(self):
+ lst = []
+ names = self.hdr[rpm.RPMTAG_CONFLICTNAME]
+ tmpflags = self.hdr[rpm.RPMTAG_CONFLICTFLAGS]
+ flags = self._correctFlags(tmpflags)
+ ver = self._correctVersion(self.hdr[rpm.RPMTAG_CONFLICTVERSION])
+ if names is not None:
+ lst = zip(names, flags, ver)
+ return self._uniq(lst)
+
+ def providesList(self):
+ lst = []
+ names = self.hdr[rpm.RPMTAG_PROVIDENAME]
+ tmpflags = self.hdr[rpm.RPMTAG_PROVIDEFLAGS]
+ flags = self._correctFlags(tmpflags)
+ ver = self._correctVersion(self.hdr[rpm.RPMTAG_PROVIDEVERSION])
+ if names is not None:
+ lst = zip(names, flags, ver)
+ return self._uniq(lst)
+
+ def changelogLists(self):
+ lst = []
+ names = self.listTagByName('changelogname')
+ times = self.listTagByName('changelogtime')
+ texts = self.listTagByName('changelogtext')
+ if len(names) > 0:
+ lst = zip(names, times, texts)
+ return lst
+
+ def doChecksumCache(self, fo):
+ """return a checksum for a package:
+ - check if the checksum cache is enabled
+ if not - return the checksum
+ if so - check to see if it has a cache file
+ if so, open it and return the first line's contents
+ if not, grab the checksum and write it to a file for this pkg
+ """
+ if not self.options['cache']:
+ return getChecksum(self.options['sumtype'], fo)
+
+ csumtag = '%s-%s' % (self.hdr['name'] , self.hdr[rpm.RPMTAG_SHA1HEADER])
+ csumfile = '%s/%s' % (self.options['cachedir'], csumtag)
+ if os.path.exists(csumfile) and self.mtime <= os.stat(csumfile)[8]:
+ csumo = open(csumfile, 'r')
+ checksum = csumo.readline()
+ csumo.close()
+
+ else:
+ checksum = getChecksum(self.options['sumtype'], fo)
+ csumo = open(csumfile, 'w')
+ csumo.write(checksum)
+ csumo.close()
+
+ return checksum
+
+
+
+def generateXML(doc, node, formatns, rpmObj, sumtype):
+ """takes an xml doc object and a package metadata entry node, populates a
+ package node with the md information"""
+ ns = node.ns()
+ pkgNode = node.newChild(None, "package", None)
+ pkgNode.newProp('type', 'rpm')
+ pkgNode.newChild(None, 'name', rpmObj.tagByName('name'))
+ pkgNode.newChild(None, 'arch', rpmObj.arch())
+ version = pkgNode.newChild(None, 'version', None)
+ version.newProp('epoch', str(rpmObj.epoch()))
+ version.newProp('ver', str(rpmObj.tagByName('version')))
+ version.newProp('rel', str(rpmObj.tagByName('release')))
+ csum = pkgNode.newChild(None, 'checksum', rpmObj.pkgid)
+ csum.newProp('type', sumtype)
+ csum.newProp('pkgid', 'YES')
+ for tag in ['summary', 'description', 'packager', 'url']:
+ value = rpmObj.tagByName(tag)
+ value = utf8String(value)
+ value = re.sub("\n$", '', value)
+ entry = pkgNode.newChild(None, tag, None)
+ entry.addContent(value)
+
+ time = pkgNode.newChild(None, 'time', None)
+ time.newProp('file', str(rpmObj.mtime))
+ time.newProp('build', str(rpmObj.tagByName('buildtime')))
+ size = pkgNode.newChild(None, 'size', None)
+ size.newProp('package', str(rpmObj.size))
+ size.newProp('installed', str(rpmObj.tagByName('size')))
+ size.newProp('archive', str(rpmObj.tagByName('archivesize')))
+ location = pkgNode.newChild(None, 'location', None)
+ if rpmObj.localurl is not None:
+ location.newProp('xml:base', rpmObj.localurl)
+ location.newProp('href', rpmObj.relativepath)
+ format = pkgNode.newChild(ns, 'format', None)
+ for tag in ['license', 'vendor', 'group', 'buildhost', 'sourcerpm']:
+ value = rpmObj.tagByName(tag)
+ value = utf8String(value)
+ value = re.sub("\n$", '', value)
+ entry = format.newChild(formatns, tag, None)
+ entry.addContent(value)
+
+ hr = format.newChild(formatns, 'header-range', None)
+ hr.newProp('start', str(rpmObj.rangestart))
+ hr.newProp('end', str(rpmObj.rangeend))
+ for (lst, nodename) in [(rpmObj.providesList(), 'provides'),
+ (rpmObj.conflictsList(), 'conflicts'),
+ (rpmObj.obsoletesList(), 'obsoletes')]:
+ if len(lst) > 0:
+ rpconode = format.newChild(formatns, nodename, None)
+ for (name, flags, (e,v,r)) in lst:
+ entry = rpconode.newChild(formatns, 'entry', None)
+ entry.newProp('name', name)
+ if flags != 0:
+ if flags == 2: arg = 'LT'
+ if flags == 4: arg = 'GT'
+ if flags == 8: arg = 'EQ'
+ if flags == 10: arg = 'LE'
+ if flags == 12: arg = 'GE'
+ entry.newProp('flags', arg)
+ # if we've got a flag we've got a version, I hope :)
+ if e:
+ entry.newProp('epoch', str(e))
+ if v:
+ entry.newProp('ver', str(v))
+ if r:
+ entry.newProp('rel', str(r))
+
+ depsList = rpmObj.depsList()
+ if len(depsList) > 0:
+ rpconode = format.newChild(formatns, 'requires', None)
+ for (name, flags, (e,v,r), prereq) in depsList:
+ entry = rpconode.newChild(formatns, 'entry', None)
+ entry.newProp('name', name)
+ if flags != 0:
+ if flags == 2: arg = 'LT'
+ if flags == 4: arg = 'GT'
+ if flags == 8: arg = 'EQ'
+ if flags == 10: arg = 'LE'
+ if flags == 12: arg = 'GE'
+ entry.newProp('flags', arg)
+ # if we've got a flag we've got a version, I hope :)
+ if e:
+ entry.newProp('epoch', str(e))
+ if v:
+ entry.newProp('ver', str(v))
+ if r:
+ entry.newProp('rel', str(r))
+ if prereq == 1:
+ entry.newProp('pre', str(prereq))
+
+ for file in rpmObj.usefulFiles():
+ files = format.newChild(None, 'file', None)
+ file = utf8String(file)
+ files.addContent(file)
+ for directory in rpmObj.usefulDirs():
+ files = format.newChild(None, 'file', None)
+ directory = utf8String(directory)
+ files.addContent(directory)
+ files.newProp('type', 'dir')
+ for directory in rpmObj.usefulGhosts():
+ files = format.newChild(None, 'file', None)
+ directory = utf8String(directory)
+ files.addContent(directory)
+ files.newProp('type', 'ghost')
+
+ return pkgNode
+
+def fileListXML(doc, node, rpmObj):
+ pkg = node.newChild(None, 'package', None)
+ pkg.newProp('pkgid', rpmObj.pkgid)
+ pkg.newProp('name', rpmObj.tagByName('name'))
+ pkg.newProp('arch', rpmObj.arch())
+ version = pkg.newChild(None, 'version', None)
+ version.newProp('epoch', str(rpmObj.epoch()))
+ version.newProp('ver', str(rpmObj.tagByName('version')))
+ version.newProp('rel', str(rpmObj.tagByName('release')))
+ for file in rpmObj.filenames:
+ files = pkg.newChild(None, 'file', None)
+ file = utf8String(file)
+ files.addContent(file)
+ for directory in rpmObj.dirnames:
+ files = pkg.newChild(None, 'file', None)
+ directory = utf8String(directory)
+ files.addContent(directory)
+ files.newProp('type', 'dir')
+ for ghost in rpmObj.ghostnames:
+ files = pkg.newChild(None, 'file', None)
+ ghost = utf8String(ghost)
+ files.addContent(ghost)
+ files.newProp('type', 'ghost')
+ return pkg
+
+def otherXML(doc, node, rpmObj):
+ pkg = node.newChild(None, 'package', None)
+ pkg.newProp('pkgid', rpmObj.pkgid)
+ pkg.newProp('name', rpmObj.tagByName('name'))
+ pkg.newProp('arch', rpmObj.arch())
+ version = pkg.newChild(None, 'version', None)
+ version.newProp('epoch', str(rpmObj.epoch()))
+ version.newProp('ver', str(rpmObj.tagByName('version')))
+ version.newProp('rel', str(rpmObj.tagByName('release')))
+ clogs = rpmObj.changelogLists()
+ for (name, time, text) in clogs:
+ clog = pkg.newChild(None, 'changelog', None)
+ clog.addContent(utf8String(text))
+ clog.newProp('author', utf8String(name))
+ clog.newProp('date', str(time))
+ return pkg
+
+def repoXML(node, cmds):
+ """generate the repomd.xml file that stores the info on the other files"""
+ sumtype = cmds['sumtype']
+ workfiles = [(cmds['otherfile'], 'other',),
+ (cmds['filelistsfile'], 'filelists'),
+ (cmds['primaryfile'], 'primary')]
+
+
+ for (file, ftype) in workfiles:
+ zfo = _gzipOpen(os.path.join(cmds['outputdir'], cmds['tempdir'], file))
+ uncsum = getChecksum(sumtype, zfo)
+ zfo.close()
+ csum = getChecksum(sumtype, os.path.join(cmds['outputdir'], cmds['tempdir'], file))
+ timestamp = os.stat(os.path.join(cmds['outputdir'], cmds['tempdir'], file))[8]
+ data = node.newChild(None, 'data', None)
+ data.newProp('type', ftype)
+ location = data.newChild(None, 'location', None)
+ if cmds['baseurl'] is not None:
+ location.newProp('xml:base', cmds['baseurl'])
+ location.newProp('href', os.path.join(cmds['finaldir'], file))
+ checksum = data.newChild(None, 'checksum', csum)
+ checksum.newProp('type', sumtype)
+ timestamp = data.newChild(None, 'timestamp', str(timestamp))
+ unchecksum = data.newChild(None, 'open-checksum', uncsum)
+ unchecksum.newProp('type', sumtype)
+
+ # if we've got a group file then checksum it once and be done
+ if cmds['groupfile'] is not None:
+ grpfile = cmds['groupfile']
+ timestamp = os.stat(grpfile)[8]
+ sfile = os.path.basename(grpfile)
+ fo = open(grpfile, 'r')
+ output = open(os.path.join(cmds['outputdir'], cmds['tempdir'], sfile), 'w')
+ output.write(fo.read())
+ output.close()
+ fo.seek(0)
+ csum = getChecksum(sumtype, fo)
+ fo.close()
+
+ data = node.newChild(None, 'data', None)
+ data.newProp('type', 'group')
+ location = data.newChild(None, 'location', None)
+ if cmds['baseurl'] is not None:
+ location.newProp('xml:base', cmds['baseurl'])
+ location.newProp('href', os.path.join(cmds['finaldir'], sfile))
+ checksum = data.newChild(None, 'checksum', csum)
+ checksum.newProp('type', sumtype)
+ timestamp = data.newChild(None, 'timestamp', str(timestamp))
diff --git a/presto.conf b/presto.conf
new file mode 100644
index 0000000..ea7df4c
--- /dev/null
+++ b/presto.conf
@@ -0,0 +1,3 @@
+[main]
+enabled=1
+neverkeepdeltas=0
diff --git a/presto.py b/presto.py
new file mode 100644
index 0000000..454d372
--- /dev/null
+++ b/presto.py
@@ -0,0 +1,127 @@
+# author: Jonathan Dieter <jdieter@gmail.com>
+#
+# heavily modified from yum-deltarpm.py created by
+# Lars Herrmann <herrmann@redhat.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Library General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+# Copyright 2005 Duke University
+
+from yum.plugins import TYPE_INTERACTIVE
+from yum import config
+
+import os
+import sys
+
+sys.path.append("/usr/share/presto")
+import deltarpm
+from prestoRepo import PrestoRepository
+from prestomdparser import PrestoMDParser
+import prestoTransaction
+
+requires_api_version = '2.1'
+plugin_type = (TYPE_INTERACTIVE,)
+
+# Setup repository specific deltarpm url and mirrorlist
+def config_hook(conduit):
+ config.RepoConf.deltaurl = config.UrlListOption()
+ config.RepoConf.deltamirrorlist = config.UrlOption()
+
+# Set up Presto repositories
+def prereposetup_hook(conduit):
+ conduit.info(2, 'Setting up Presto')
+ for active_repo in conduit.getRepos().listEnabled():
+ p_repo = PrestoRepository(active_repo, conduit)
+ p_repo.setup(conduit.getConf().cache)
+
+ conduit.info(2, 'Reading Presto metadata in from local files')
+ for active_repo in conduit.getRepos().listEnabled():
+ xml = active_repo.p_repo.getPrestoXML()
+ if active_repo.p_repo.enabled:
+ xmldata = active_repo.p_repo.repoXML.getData('deltas')
+ (ctype, csum) = xmldata.checksum
+ parser = PrestoMDParser(xml)
+ active_repo.p_repo.deltalist = parser.getDeltaList()
+
+ conduit.info(2, 'Setting up repositories')
+
+
+def postresolve_hook(conduit):
+ # Cycle through packages to see if there's a deltarpm available
+ for newpkg in conduit.getTsInfo():
+ if newpkg.ts_state != "e":
+ chosen_drpm = prestoTransaction.find_available_drpms(conduit, newpkg)
+
+ # If a drpm was found, change certain package information so it reflects
+ # the drpm, not the rpm.
+ if chosen_drpm != None:
+ newpkg.po.hasdrpm = True
+ newpkg.po.simple['realpackagesize'] = newpkg.po.simple['packagesize']
+ newpkg.po.simple['packagesize'] = chosen_drpm['size']
+ newpkg.po.simple['realrelativepath'] = newpkg.po.simple['relativepath']
+ newpkg.po.simple['relativepath'] = chosen_drpm['drpm_filename']
+ newpkg.po.reallocalpath = newpkg.po.localpath
+ newpkg.po.localpath = newpkg.po.repo.deltasdir + "/" + os.path.basename(chosen_drpm['drpm_filename'])
+ newpkg.po.to = newpkg
+ newpkg.realpkgtup = newpkg.pkgtup
+ newpkg.pkgtup = (newpkg.name + " *", newpkg.arch, newpkg.epoch, newpkg.version, newpkg.release)
+ for (csum_type, csum, csumid) in newpkg.po._checksums:
+ if csumid:
+ newpkg.po._realchecksum = (csum_type, csum, csumid)
+ newpkg.po._checksums.remove((csum_type, csum, csumid))
+ csum_type = chosen_drpm['checksum_type']
+ csum = chosen_drpm['checksum']
+ newpkg.po._checksums.append((csum_type, csum, csumid))
+
+ conduit.info(2, "Found deltarpm update for %s.%s %s:%s.%s" % (newpkg.name, newpkg.arch, newpkg.epoch, newpkg.version, newpkg.release))
+ else:
+ newpkg.po.hasdrpm = False
+ return
+
+ # Free up memory used by deleting Presto repositories
+ for active_repo in conduit.getRepos().listEnabled():
+ if active_repo.p_repo.enabled:
+ del active_repo.p_repo
+
+def postdownload_hook(conduit):
+ # Cycle through packages to see if we've downloaded a deltarpm
+ for pkg in conduit.getDownloadPackages():
+ if pkg.hasdrpm:
+ # Apply deltarpm and save where rpm would have been saved
+ drpm = deltarpm.DeltaRpmWrapper(conduit)
+ drpm.apply(pkg.reallocalpath, pkg.localpath)
+ drpm_path = pkg.localpath
+
+ # Change package information to reflect original rpm information
+ pkg.to.pkgtup = pkg.to.realpkgtup
+ pkg.localpath = pkg.reallocalpath
+ pkg.simple['packagesize'] = pkg.simple['realpackagesize']
+ for (csum_type, csum, csumid) in pkg._checksums:
+ if csumid:
+ pkg._checksums.remove((csum_type, csum, csumid))
+ pkg._checksums.append(pkg._realchecksum)
+
+ # Check to see whether or not we should keep the drpms
+ # FIXME: Is there any way to see whether or not a Boolean option was not set?
+ if conduit.confBool('main', 'neverkeepdeltas'):
+ delete = True
+ elif conduit.confBool('main', 'keepdeltas'):
+ delete = False
+ elif conduit.getConf().keepcache != 0:
+ delete = False
+ else:
+ delete = True
+
+ if delete:
+ os.unlink(drpm_path)
diff --git a/server.py b/server.py
deleted file mode 100755
index c3651fc..0000000
--- a/server.py
+++ /dev/null
@@ -1,179 +0,0 @@
-#!/usr/bin/python -t
-# -*- mode: Python; indent-tabs-mode: nil; -*-
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
-import errno, os, sys
-import fnmatch, re
-import rpmUtils.transaction, rpmUtils.miscutils
-import commands
-
-#### import Utils
-
-DEBUG = False
-#### Utils.setdebug(DEBUG)
-
-SUFFIX='drpm'
-DRPMWORTHKEEPINGTHRESH=0.5
-DEBUG=0
-
-def genDeltaRPM(ts, newrpm, oldrpm):
- (f1,n1,e1,v1,r1) = newrpm
- (f2,n2,e2,v2,r2) = oldrpm
- hdr = rpmUtils.miscutils.hdrFromPackage(ts,f1)
- arch = hdr['arch']
- print 'Generating delta rpm for %s' % n1
- v12 = "_".join([v1,v2])
- r12 = "_".join([r1,r2])
- deltaRPMName= '%s.%s.%s' % ("-".join([n1,v12,r12]), arch, SUFFIX)
- deltaCommand = 'makedeltarpm %s %s %s' % (f2, f1, deltaRPMName)
- if DEBUG:
- print "DEBUG " + deltaCommand
- # If the drpm doesn't exists, make it, else skip it
- if not os.path.exists(deltaRPMName):
- (code, out) = commands.getstatusoutput(deltaCommand)
- if code:
- #raise Exception("genDeltaRPM: exitcode was %s - Reported Error: %s" % (code, out))
- print "Error genDeltaRPM for %s: exitcode was %s - Reported Error: %s" % (n1, code, out)
- # Check whether or not we should keep the drpm
- if not drpmIsWorthKeeping(deltaRPMName, f1):
- if DEBUG:
- print 'deleting %s' % (deltaRPMName)
- try:
- os.unlink(deltaRPMName)
- except Exception, e:
- print "Error deleting deltarpm %s" % (deltaRPMName), str(e)
- else:
- if DEBUG:
- print "DEBUG skipping %s" % (deltaRPMName)
-
-def drpmIsWorthKeeping(deltaRPMName, newrpm):
- newsize = os.path.getsize(newrpm)
- drpmsize = os.path.getsize(deltaRPMName)
- # Delete the drpm if it's too fat
- if drpmsize > DRPMWORTHKEEPINGTHRESH * newsize:
- return 0
- return 1
-
-def pruneRepo(keep,whitelist,srcdir):
- ts = rpmUtils.transaction.initReadOnlyTransaction()
- changed = False
-
- # Create list of .rpm files.
- # We don't use "glob", so sub-directories are supported.
- print 'Expiring (keep=%d):' % keep, srcdir
- srcfiles = []
- for root, dirs, files in os.walk(srcdir):
- for f in fnmatch.filter(files,'*.rpm'):
- srcfiles.append(os.path.join(root,f))
- if not len(srcfiles):
- print ' Nothing found.'
- return changed
- assert srcfiles[0].startswith(srcdir)
-
- # Create map: rpm %name -> list of tuples (filename,name,e,v,r)
- newestsrcrpms = {}
- for f in srcfiles:
- hdr = rpmUtils.miscutils.hdrFromPackage(ts,f)
- n = hdr['name']
- v = hdr['version']
- r = hdr['release']
- e = hdr['epoch']
- if e is None:
- e = 0
- newestsrcrpms.setdefault(n,[])
- newestsrcrpms[n].append((f,n,e,v,r))
-
- # Now purge old src.rpm unless their %name matches a white-list pattern.
- for l in newestsrcrpms.values():
- x = len(l)
-
- if x > 1:
- # White-listing.
- (f,n,e,v,r) = l[0]
- keepthis = False
- for r in whitelist:
- if re.compile(r).search(n):
- keepthis = True
- break
- if keepthis:
- print ' Skipping',n
- continue
-
- def sortByEVR(fnevr1, fnevr2):
- (f1,n1,e1,v1,r1) = fnevr1
- (f2,n2,e2,v2,r2) = fnevr2
- rc = rpmUtils.miscutils.compareEVR((e1,v1,r1),(e2,v2,r2))
- if rc == 0:
- return 0
- if rc > 0:
- return -1
- if rc < 0:
- return 1
-
- l.sort(sortByEVR) # highest first in list
- # Generate delta rpm
- genDeltaRPM(ts, l[0],l[1])
-
- oldies = []
- if len(l) > abs(keep):
- oldies = l[keep:]
- for (f,n,e,v,r) in oldies:
- print ' Removing', os.path.basename(f)
- srcfiles.remove(f)
- if not DEBUG:
- os.remove(f)
- print "not removing\n"
- changed = True
-
- if not len(srcfiles):
- print 'WARNING: No .rpms left. Stopping here.'
- return changed
-
- # Examine binary repository directories and remove everything which
- # is missing its corresponding src.rpm.
- return changed
-
-
-def main(bin_rpm_path):
- assert rpmUtils.miscutils.compareEVR((1,2,3),(1,2,0)) > 0
- assert rpmUtils.miscutils.compareEVR((0,1,2),(0,1,2)) == 0
- assert rpmUtils.miscutils.compareEVR((1,2,3),(4,0,99)) < 0
-
- #### keep = (dist == 'development') and 1 or 2
- keep = 2
- #### whitelist = cfg.repoprune_keepdict[dist]
- whitelist = ""
-
- return pruneRepo(keep,whitelist,bin_rpm_path)
-
-
-if __name__ == '__main__':
- if len(sys.argv) < 2:
- print 'Usage: %s <bin_rpm_dir> \n' % os.path.basename(sys.argv[0])
- sys.exit(errno.EINVAL)
- bin_rpm_path = sys.argv[1]
-
- #### cfg = Utils.load_config_module(sys.argv[1])
-
- #### Utils.signer_gid_check(cfg.signersgid)
- #### os.umask(cfg.signersumask)
-
- #### for dist in sys.argv[2:]:
- #### if not cfg.archdict.has_key(dist):
- #### print "No distribution release named '%s' found" % dist
- #### sys.exit(errno.EINVAL)
- main(bin_rpm_path)
- sys.exit(0)
diff --git a/share-presto/deltarpm.py b/share-presto/deltarpm.py
new file mode 100644
index 0000000..710a8bb
--- /dev/null
+++ b/share-presto/deltarpm.py
@@ -0,0 +1,86 @@
+# author: Jonathan Dieter <jdieter@gmail.com>
+#
+# mostly taken from deltarpm.py created by
+# Lars Herrmann <herrmann@redhat.com>
+# and modified for Presto by
+# Ahmed Kamal <email.ahmedkamal@googlemail.com>
+#
+# license: GPL (see COPYING file in distribution)
+#
+# this module provides a python wrapper around deltarpm tools written by suse
+#
+# TODO: catch exceptions wherever possible and raise useful ones ;)
+# see TODO lines in methods
+
+APPLY='/usr/bin/applydeltarpm'
+
+import popen2
+import string
+import os
+
+class Process:
+ """wrapper class to execute programs and return exitcode and output (stdout and stderr combined)"""
+ def __init__(self, conduit):
+ self.__stdout=None
+ self.__returncode=None
+ self.__command=None
+ self.__args=None
+ self.conduit = conduit
+
+ def run(self, command, *args):
+ self.__command=command
+ self.__args=args
+ cmdline=command+" "+string.join(args, " ")
+ self.conduit.info(7, '%s.%s: executing %s' % (self.__class__, 'run', cmdline))
+ pipe = popen2.Popen4(cmdline)
+ self.__stdout=pipe.fromchild.read()
+ retcode = pipe.wait()
+ if os.WIFEXITED(retcode):
+ self.__returncode = os.WEXITSTATUS(retcode)
+ else:
+ self.__returncode = retcode
+ # fallback to old implementation - works better ?
+ #stdoutp = os.popen(cmdline,'r',1)
+ #self.__stdout = stdoutp.read()
+ #retcode = stdoutp.close()
+ #if retcode is None:
+ # self.__returncode = 0
+ #else:
+ # self.__returncode = retcode
+
+ def getOutput(self):
+ return self.__stdout
+
+ def returnCode(self):
+ return self.__returncode
+
+class DeltaRpmWrapper:
+ """wrapper around deltarpm binaries - implement methods for applying and verifying delta rpms
+ - raises exceptions if exitcode of binaries was != 0"""
+
+ def __init__(self, conduit):
+ self.conduit = conduit
+ self.conduit.info(7, '%s.%s: created' % (self.__class__, '__init__'))
+
+ def apply(self, newrpmfile, deltarpmfile):
+ """wraps execution of applydeltarpm [-r oldrpm] deltarpm newrpm -
+ constructs file names and paths based on given RpmDescription and instance settings for directories"""
+ # TODO: test args for type == instance and __class__ == RpmDescription
+ self.conduit.info(7, '%s.apply(%s,%s)' % (self.__class__, newrpmfile, deltarpmfile))
+ p=Process(self.conduit)
+ # targetrpm filename
+ p.run(APPLY, deltarpmfile, newrpmfile)
+ if p.returnCode():
+ # in case of error, raise exception
+ raise Exception("Could not apply deltarpm: %d" % (p.returnCode()))
+ return newrpmfile
+
+ def verifySequence(self, sequence):
+ """wraps execution of applydeltarpm [-r oldrpm] -s seqfilecontent -
+ constructs file names and paths based on given RpmDescription and instance settings for directories"""
+ self.conduit.info(7, '%s.verify(%s)' % (self.__class__, sequence))
+ p = Process(self.conduit)
+ p.run(APPLY, '-s', sequence)
+ if p.returnCode():
+ # in case of error, raise exception
+ raise Exception("Could not verify sequence of deltarpm: %d" % (p.returnCode()))
diff --git a/share-presto/prestoRepo.py b/share-presto/prestoRepo.py
new file mode 100644
index 0000000..3b62caa
--- /dev/null
+++ b/share-presto/prestoRepo.py
@@ -0,0 +1,588 @@
+# author: Jonathan Dieter <jdieter@gmail.com>
+#
+# mostly taken from yumRepo.py (part of yum) with a few minor modifications
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Library General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+# Copyright 2005 Duke University
+
+import os
+import re
+import time
+import types
+import urlparse
+
+from yum import Errors
+from urlgrabber.grabber import URLGrabber
+import urlgrabber.mirror
+from urlgrabber.grabber import URLGrabError
+from yum.repos import Repository
+from yum import repoMDObject
+from yum import parser
+from yum import config
+from yum import misc
+
+class PrestoRepository(Repository):
+ """
+ This is an actual repository object
+
+ Configuration attributes are pulled in from config.RepoConf.
+ """
+
+ def __init__(self, repo, conduit):
+ Repository.__init__(self, repo.id)
+
+ # If there's a specific deltarpm url, use that
+ is_different = False
+ if repo.deltaurl != []:
+ self.baseurl = repo.deltaurl
+ is_different = True
+ conduit.info(5, 'Manual url set: %s' % self.baseurl)
+ else:
+ self.baseurl = repo.baseurl
+
+ # If there's a specific mirrorlist, use that
+ if repo.deltamirrorlist != None:
+ self.mirrorlist = repo.deltamirrorlist
+ is_different = True
+ conduit.info(5, 'Manual mirrorlist set: %s' % self.mirrorlist)
+ else:
+ self.mirrorlist = repo.mirrorlist
+
+ self.conduit = conduit
+ self.urls = []
+ self.is_different = is_different
+ if is_different:
+ self.repoMDFile = 'repodata/prestomd.xml'
+ self.metadata_cookie_fn = 'presto_cachecookie'
+ else:
+ self.repoMDFile = 'repodata/repomd.xml'
+ self.metadata_cookie_fn = 'cachecookie'
+ self.repoXML = None
+ self.cache = 0
+ self.mirrorlistparsed = 0
+ self.yumvar = {} # empty dict of yumvariables for $string replacement
+ self._proxy_dict = {}
+ self.http_headers = {}
+
+ # throw in some stubs for things that will be set by the config class
+ self.basecachedir = ""
+ self.cachedir = ""
+ self.pkgdir = ""
+ self.hdrdir = ""
+ self.enabled = True
+
+ # holder for stuff we've grabbed
+ self.retrieved = { 'deltas':0 }
+
+ # callbacks
+ self.keepalive = repo.keepalive
+ self.bandwidth = repo.bandwidth
+ self.retries = repo.retries
+ self.throttle = repo.throttle
+ self.proxy = repo.proxy
+ self.proxy_username = repo.proxy_username
+ self.proxy_password = repo.proxy_password
+ self.timeout = repo.timeout
+ self.http_caching = repo.http_caching
+ self.failovermethod = repo.failovermethod
+ self.metadata_expire = repo.metadata_expire
+ self.basecachedir = repo.basecachedir
+ self.callback = repo.callback
+ self.failure_obj = None
+ self.mirror_failure_obj = None
+ self.interrupt_callback = None
+ self.drpm_list = {}
+ self.parent = repo
+ repo.p_repo = self
+
+
+ def __getProxyDict(self):
+ self.doProxyDict()
+ if self._proxy_dict:
+ return self._proxy_dict
+ return None
+
+ # consistent access to how proxy information should look (and ensuring
+ # that it's actually determined for the repo)
+ proxy_dict = property(__getProxyDict)
+
+ def ready(self):
+ """Returns true if this repository is setup and ready for use."""
+ return self.repoXML is not None
+
+ def __cmp__(self, other):
+ if self.id > other.id:
+ return 1
+ elif self.id < other.id:
+ return -1
+ else:
+ return 0
+
+ def __str__(self):
+ return self.id
+
+ def _checksum(self, sumtype, file, CHUNK=2**16):
+ """takes filename, hand back Checksum of it
+ sumtype = md5 or sha
+ filename = /path/to/file
+ CHUNK=65536 by default"""
+ try:
+ return misc.checksum(sumtype, file, CHUNK)
+ except (Errors.MiscError, EnvironmentError), e:
+ raise Errors.RepoError, 'Error opening file for checksum: %s' % e
+
+ def dump(self):
+ output = '[%s]\n' % self.id
+ vars = ['name', 'bandwidth', 'enabled',
+ 'keepalive', 'proxy',
+ 'proxy_password', 'proxy_username',
+ 'retries', 'throttle', 'timeout', 'mirrorlist',
+ 'cachedir', 'deltasdir' ]
+ vars.sort()
+ for attr in vars:
+ output = output + '%s = %s\n' % (attr, getattr(self, attr))
+ output = output + 'baseurl ='
+ for url in self.urls:
+ output = output + ' %s\n' % url
+
+ return output
+
+ def check(self):
+ """self-check the repo information - if we don't have enough to move
+ on then raise a repo error"""
+ if len(self.urls) < 1:
+ raise Errors.RepoError, \
+ 'Cannot find a valid deltaurl for repo: %s' % self.id
+
+ def doProxyDict(self):
+ if self._proxy_dict:
+ return
+
+ self._proxy_dict = {} # zap it
+ proxy_string = None
+ if self.proxy not in [None, '_none_']:
+ proxy_string = '%s' % self.proxy
+ if self.proxy_username is not None:
+ proxy_parsed = urlparse.urlsplit(self.proxy, allow_fragments=0)
+ proxy_proto = proxy_parsed[0]
+ proxy_host = proxy_parsed[1]
+ proxy_rest = proxy_parsed[2] + '?' + proxy_parsed[3]
+ proxy_string = '%s://%s@%s%s' % (proxy_proto,
+ self.proxy_username, proxy_host, proxy_rest)
+
+ if self.proxy_password is not None:
+ proxy_string = '%s://%s:%s@%s%s' % (proxy_proto,
+ self.proxy_username, self.proxy_password,
+ proxy_host, proxy_rest)
+
+ if proxy_string is not None:
+ self._proxy_dict['http'] = proxy_string
+ self._proxy_dict['https'] = proxy_string
+ self._proxy_dict['ftp'] = proxy_string
+
+ def __headersListFromDict(self):
+ """Convert our dict of headers to a list of 2-tuples for urlgrabber."""
+ headers = []
+
+ keys = self.http_headers.keys()
+ for key in keys:
+ headers.append((key, self.http_headers[key]))
+
+ return headers
+
+ def setupGrab(self):
+ """sets up the grabber functions with the already stocked in urls for
+ the mirror groups"""
+
+ if self.failovermethod == 'roundrobin':
+ mgclass = urlgrabber.mirror.MGRandomOrder
+ else:
+ mgclass = urlgrabber.mirror.MirrorGroup
+
+ headers = tuple(self.__headersListFromDict())
+
+ self.grabfunc = URLGrabber(keepalive=self.keepalive,
+ bandwidth=self.bandwidth,
+ retry=self.retries,
+ throttle=self.throttle,
+ progress_obj=self.callback,
+ proxies = self.proxy_dict,
+ failure_callback=self.failure_obj,
+ interrupt_callback=self.interrupt_callback,
+ timeout=self.timeout,
+ http_headers=headers,
+ reget='simple')
+
+
+ self.grab = mgclass(self.grabfunc, self.urls,
+ failure_callback=self.mirror_failure_obj)
+
+ def dirSetup(self):
+ """make the necessary dirs, if possible, raise on failure"""
+
+ cachedir = os.path.join(self.parent.basecachedir, self.id)
+ deltasdir = os.path.join(cachedir, 'deltas')
+ self.parent.setAttribute('deltasdir', deltasdir)
+
+ cookie = cachedir + '/' + self.metadata_cookie_fn
+ self.setAttribute('metadata_cookie', cookie)
+
+ for dir in [cachedir, self.parent.deltasdir]:
+ if self.cache == 0:
+ if os.path.exists(dir) and os.path.isdir(dir):
+ continue
+ else:
+ try:
+ os.makedirs(dir, mode=0755)
+ except OSError, e:
+ raise Errors.RepoError, \
+ "Error making cache directory: %s error was: %s" % (dir, e)
+ else:
+ if not os.path.exists(dir):
+ raise Errors.RepoError, \
+ "Cannot access repository dir %s" % dir
+
+ def baseurlSetup(self):
+ """go through the baseurls and mirrorlists and populate self.urls
+ with valid ones, run self.check() at the end to make sure it worked"""
+
+ goodurls = []
+ if self.mirrorlist and not self.mirrorlistparsed:
+ mirrorurls = getMirrorList(self.mirrorlist, self.proxy_dict)
+ self.mirrorlistparsed = 1
+ for url in mirrorurls:
+ url = parser.varReplace(url, self.yumvar)
+ self.baseurl.append(url)
+
+ for url in self.baseurl:
+ url = parser.varReplace(url, self.yumvar)
+ (s,b,p,q,f,o) = urlparse.urlparse(url)
+ if s not in ['http', 'ftp', 'file', 'https']:
+ print 'not using ftp, http[s], or file for repos, skipping - %s' % (url)
+ continue
+ else:
+ goodurls.append(url)
+
+ self.setAttribute('urls', goodurls)
+ self.check()
+ self.setupGrab() # update the grabber for the urls
+
+ def __get(self, url=None, relative=None, local=None, start=None, end=None,
+ copy_local=0, checkfunc=None, text=None, reget='simple', cache=True):
+ """retrieve file from the mirrorgroup for the repo
+ relative to local, optionally get range from
+ start to end, also optionally retrieve from a specific baseurl"""
+
+ # if local or relative is None: raise an exception b/c that shouldn't happen
+ # if url is not None - then do a grab from the complete url - not through
+ # the mirror, raise errors as need be
+ # if url is None do a grab via the mirror group/grab for the repo
+ # return the path to the local file
+
+ # Turn our dict into a list of 2-tuples
+ headers = self.__headersListFromDict()
+
+ # We will always prefer to send no-cache.
+ if not (cache or self.http_headers.has_key('Pragma')):
+ headers.append(('Pragma', 'no-cache'))
+
+ headers = tuple(headers)
+
+ if local is None or relative is None:
+ raise Errors.RepoError, \
+ "get request for Repo %s, gave no source or dest" % self.id
+
+ if self.cache == 1:
+ if os.path.exists(local): # FIXME - we should figure out a way
+ return local # to run the checkfunc from here
+
+ else: # ain't there - raise
+ raise Errors.RepoError, \
+ "Caching enabled but no local cache of %s from %s" % (local,
+ self)
+
+ if url is not None:
+ ug = URLGrabber(keepalive = self.keepalive,
+ bandwidth = self.bandwidth,
+ retry = self.retries,
+ throttle = self.throttle,
+ progress_obj = self.callback,
+ copy_local = copy_local,
+ reget = reget,
+ proxies = self.proxy_dict,
+ failure_callback = self.failure_obj,
+ interrupt_callback=self.interrupt_callback,
+ timeout=self.timeout,
+ checkfunc=checkfunc,
+ http_headers=headers,
+ )
+
+ remote = url + '/' + relative
+
+ try:
+ result = ug.urlgrab(remote, local,
+ text=text,
+ range=(start, end),
+ )
+ except URLGrabError, e:
+ raise Errors.RepoError, \
+ "failed to retrieve %s from %s\nerror was %s" % (relative, self.id, e)
+
+ else:
+ try:
+ result = self.grab.urlgrab(relative, local,
+ text = text,
+ range = (start, end),
+ copy_local=copy_local,
+ reget = reget,
+ checkfunc=checkfunc,
+ http_headers=headers,
+ )
+ except URLGrabError, e:
+ raise Errors.RepoError, "failure: %s from %s: %s" % (relative, self.id, e)
+
+ return result
+
+
+ def metadataCurrent(self):
+ """Check if there is a metadata_cookie and check its age. If the
+ age of the cookie is less than metadata_expire time then return true
+ else return False"""
+
+ val = False
+ if os.path.exists(self.metadata_cookie):
+ cookie_info = os.stat(self.metadata_cookie)
+ if cookie_info[8] + self.metadata_expire > time.time():
+ val = True
+ # WE ARE FROM THE FUTURE!!!!
+ elif cookie_info[8] > time.time():
+ val = False
+ return val
+
+ def setMetadataCookie(self):
+ """if possible, set touch the metadata_cookie file"""
+
+ check = self.metadata_cookie
+ if not os.path.exists(self.metadata_cookie):
+ check = self.cachedir
+
+ if os.access(check, os.W_OK):
+ fo = open(self.metadata_cookie, 'w+')
+ fo.close()
+ del fo
+
+
+ def setup(self, cache):
+ try:
+ self.cache = cache
+ self.baseurlSetup()
+ self.dirSetup()
+ except Errors.RepoError, e:
+ raise
+
+ try:
+ self._loadRepoXML(text=self)
+ except Errors.RepoError, e:
+ raise Errors.RepoError, ('Cannot open/read %s file for repository: %s' % (self.repoMDFile, self))
+
+
+ def _loadRepoXML(self, text=None):
+ """retrieve/check/read in repomd.xml from the repository"""
+
+ remote = self.repoMDFile
+ if self.is_different:
+ local = self.cachedir + '/prestomd.xml'
+ else:
+ local = self.cachedir + '/repomd.xml'
+
+ if self.repoXML is not None:
+ return
+
+ if self.cache or self.metadataCurrent():
+ if not os.path.exists(local):
+ raise Errors.RepoError, 'Cannot find %s file for %s' % (self.repoMDFile, self)
+ else:
+ result = local
+ else:
+ checkfunc = (self._checkRepoXML, (), {})
+ try:
+ result = self.__get(relative=remote,
+ local=local,
+ copy_local=1,
+ text=text,
+ reget=None,
+ checkfunc=checkfunc,
+ cache=self.http_caching == 'all')
+
+
+ except URLGrabError, e:
+ raise Errors.RepoError, 'Error downloading file %s: %s' % (local, e)
+ # if we have a 'fresh' repomd.xml then update the cookie
+ self.setMetadataCookie()
+
+ try:
+ self.repoXML = repoMDObject.RepoMD(self.id, result)
+ except Errors.RepoMDError, e:
+ raise Errors.RepoError, 'Error importing %s from %s: %s' % (self.repoMDFile, self, e)
+
+ def _checkRepoXML(self, fo):
+ if type(fo) is types.InstanceType:
+ filepath = fo.filename
+ else:
+ filepath = fo
+
+ try:
+ repoMDObject.RepoMD(self.id, filepath)
+ except Errors.RepoMDError, e:
+ raise URLGrabError(-1, 'Error importing %s for %s: %s' % (self.repoMDFile, self, e))
+
+
+ def checkMD(self, fn, mdtype):
+ """check the metadata type against its checksum"""
+
+ thisdata = self.repoXML.getData(mdtype)
+
+ (r_ctype, r_csum) = thisdata.checksum # get the remote checksum
+
+ if type(fn) == types.InstanceType: # this is an urlgrabber check
+ file = fn.filename
+ else:
+ file = fn
+
+ try:
+ l_csum = self._checksum(r_ctype, file) # get the local checksum
+ except Errors.RepoError, e:
+ raise URLGrabError(-3, 'Error performing checksum')
+
+ if l_csum == r_csum:
+ return 1
+ else:
+ raise URLGrabError(-1, 'Metadata file does not match checksum')
+
+
+
+ def retrieveMD(self, mdtype):
+ """base function to retrieve metadata files from the remote url
+ returns the path to the local metadata file of a 'mdtype'
+ mdtype must be 'deltas'."""
+ try:
+ thisdata = self.repoXML.getData(mdtype)
+ except Errors.RepoMDError:
+ self.enabled = False
+ self.conduit.info(5, "No drpms available for %s" % self.id)
+ return
+
+ (r_base, remote) = thisdata.location
+ fname = os.path.basename(remote)
+ local = self.cachedir + '/' + fname
+
+ if self.retrieved.has_key(mdtype):
+ if self.retrieved[mdtype]: # got it, move along
+ return local
+
+ if self.cache == 1:
+ if os.path.exists(local):
+ try:
+ self.checkMD(local, mdtype)
+ except URLGrabError, e:
+ raise Errors.RepoError, \
+ "Caching enabled and local cache: %s does not match checksum" % local
+ else:
+ return local
+
+ else: # ain't there - raise
+ raise Errors.RepoError, \
+ "Caching enabled but no local cache of %s from %s" % (local,
+ self)
+
+ if os.path.exists(local):
+ try:
+ self.checkMD(local, mdtype)
+ except URLGrabError, e:
+ pass
+ else:
+ self.retrieved[mdtype] = 1
+ return local # it's the same return the local one
+
+ try:
+ checkfunc = (self.checkMD, (mdtype,), {})
+ local = self.__get(relative=remote, local=local, copy_local=1,
+ checkfunc=checkfunc, reget=None,
+ cache=self.http_caching == 'all')
+ except URLGrabError, e:
+ raise Errors.RepoError, \
+ "Could not retrieve %s matching remote checksum from %s" % (local, self)
+ else:
+ self.retrieved[mdtype] = 1
+ return local
+
+
+ def getPrestoXML(self):
+ """this gets you the path to the primary.xml file, retrieving it if we
+ need a new one"""
+
+ return self.retrieveMD('deltas')
+
+ def setCallback(self, callback):
+ self.callback = callback
+ self.setupGrab()
+
+ def setFailureObj(self, failure_obj):
+ self.failure_obj = failure_obj
+ self.setupGrab()
+
+ def setMirrorFailureObj(self, failure_obj):
+ self.mirror_failure_obj = failure_obj
+ self.setupGrab()
+
+ def setInterruptCallback(self, callback):
+ self.interrupt_callback = callback
+ self.setupGrab()
+
+def getMirrorList(mirrorlist, pdict = None):
+ """retrieve an up2date-style mirrorlist file from a url,
+ we also s/$ARCH/$BASEARCH/ and move along
+ returns a list of the urls from that file"""
+
+ returnlist = []
+ if hasattr(urlgrabber.grabber, 'urlopen'):
+ urlresolver = urlgrabber.grabber
+ else:
+ import urllib
+ urlresolver = urllib
+
+ scheme = urlparse.urlparse(mirrorlist)[0]
+ if scheme == '':
+ url = 'file://' + mirrorlist
+ else:
+ url = mirrorlist
+
+ try:
+ fo = urlresolver.urlopen(url, proxies=pdict)
+ except urlgrabber.grabber.URLGrabError, e:
+ print "Could not retrieve mirrorlist %s error was\n%s" % (url, e)
+ fo = None
+
+ if fo is not None:
+ content = fo.readlines()
+ for line in content:
+ if re.match('^\s*\#.*', line) or re.match('^\s*$', line):
+ continue
+ mirror = re.sub('\n$', '', line) # no more trailing \n's
+ (mirror, count) = re.subn('\$ARCH', '$BASEARCH', mirror)
+ returnlist.append(mirror)
+
+ return returnlist
+
diff --git a/share-presto/prestoTransaction.py b/share-presto/prestoTransaction.py
new file mode 100644
index 0000000..5b7e3d4
--- /dev/null
+++ b/share-presto/prestoTransaction.py
@@ -0,0 +1,65 @@
+# author: Jonathan Dieter <jdieter@gmail.com>
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Library General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+# Copyright 2005 Duke University
+
+import os
+import deltarpm
+
+def find_available_drpms(conduit, newpkg):
+ """Find any applicable drpms for newpkg
+ newpkg is a TransactionMember"""
+
+ rpmdb = conduit.getRpmDB()
+
+ # Saves us some typing
+ p_repo = newpkg.po.repo.p_repo
+ chosen_drpm = None
+
+ if p_repo.enabled:
+ # Don't try to download deltarpm if full rpm already exists
+ if not os.path.exists(newpkg.po.localpath):
+ # First part of key when matching drpms
+ key1 = "%s*%s*%i*%s*%s" % (newpkg.name, newpkg.arch, int(newpkg.epoch), newpkg.version, newpkg.release)
+
+ # Find any installed packages that match the ones we want to download
+ installed = rpmdb.searchNevra(newpkg.name, None, None, None, newpkg.arch)
+
+ for oldpkg in installed:
+ # Generate second part of key for matching drpms, then full key
+ key2 = "%s*%s*%i*%s*%s" % (oldpkg.name, oldpkg.arch, int(oldpkg.epoch), oldpkg.version, oldpkg.release)
+ key = "%s!!%s" % (key1, key2)
+
+ # Check whether we have a matching drpm
+ if p_repo.deltalist.has_key(key):
+ # Check whether or not we already have a matching drpm, then choose smallest of the two if we do
+ if chosen_drpm == None or p_repo.deltalist[key]['size'] < chosen_drpm['size']:
+
+ # Get sequence code for drpm
+ sequence = p_repo.deltalist[key]['sequence']
+ if int(oldpkg.epoch) == 0:
+ seq = "%s-%s-%s-%s" % (oldpkg.name, oldpkg.version, oldpkg.release, sequence)
+ else:
+ seq = "%s-%i:%s-%s-%s" % (oldpkg.name, int(oldpkg.epoch), oldpkg.version, oldpkg.release, sequence)
+ drpm = deltarpm.DeltaRpmWrapper(conduit)
+
+ # Attempt to apply sequence code for drpm. If this fails, drpm will not apply cleanly, so
+ # don't even try to download it.
+ try:
+ drpm.verifySequence(seq)
+ chosen_drpm = p_repo.deltalist[key]
+ except:
+ conduit.info(5, "Verification of %s failed" % seq)
+ return chosen_drpm
diff --git a/share-presto/prestomdparser.py b/share-presto/prestomdparser.py
new file mode 100644
index 0000000..1713531
--- /dev/null
+++ b/share-presto/prestomdparser.py
@@ -0,0 +1,167 @@
+#!/usr/bin/python -t
+#
+# author: Jonathan Dieter <jdieter@gmail.com>
+#
+# mostly taken from mdparser.py (part of yum) with a few minor modifications
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Library General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+# Copyright 2005 Duke University
+
+import gzip
+from cElementTree import iterparse
+
+from cStringIO import StringIO
+
+#TODO: document everything here
+
+class PrestoMDParser:
+
+ def __init__(self, filename):
+
+ # Set up mapping of meta types to handler classes
+ handlers = {
+ '{http://linux.duke.edu/metadata/common}metadata': DeltasEntry,
+ }
+
+ self.total = None
+ self.count = 0
+ self._handlercls = None
+
+ # Read in type, set package node handler and get total number of
+ # packages
+ if filename[-3:] == '.gz': fh = gzip.open(filename, 'r')
+ else: fh = open(filename, 'r')
+ parser = iterparse(fh, events=('start', 'end'))
+ self.reader = parser.__iter__()
+ event, elem = self.reader.next()
+ self._handlercls = handlers.get(elem.tag, None)
+ if not self._handlercls:
+ raise ValueError('Unknown repodata type "%s" in %s' % (
+ elem.tag, filename))
+
+ def getDeltaList(self):
+ for event, elem in self.reader:
+ if event == 'end' and elem.tag == '{http://linux.duke.edu/metadata/common}metadata':
+ return self._handlercls(elem)
+
+
+class BaseEntry:
+ def __init__(self, elem):
+ self._p = {}
+
+ def __getitem__(self, k):
+ return self._p[k]
+
+ def keys(self):
+ return self._p.keys()
+
+ def values(self):
+ return self._p.values()
+
+ def has_key(self, k):
+ return self._p.has_key(k)
+
+ def __str__(self):
+ out = StringIO()
+ keys = self.keys()
+ keys.sort()
+ for k in keys:
+ line = u'%s=%s\n' % (k, self[k])
+ out.write(line.encode('utf8'))
+ return out.getvalue()
+
+ def _bn(self, qn):
+ if qn.find('}') == -1: return qn
+ return qn.split('}')[1]
+
+ def _prefixprops(self, elem, prefix):
+ ret = {}
+ for key in elem.attrib.keys():
+ ret[prefix + '_' + self._bn(key)] = elem.attrib[key]
+ return ret
+
+class DeltasEntry(BaseEntry):
+ def __init__(self, deltas):
+ BaseEntry.__init__(self, deltas)
+ # Avoid excess typing :)
+ p = self._p
+
+ for elem in deltas:
+ temp = {}
+ key1 = ""
+ key2 = ""
+ for child in elem:
+ name = self._bn(child.tag)
+ if name in ('name', 'arch'):
+ temp[name] = child.text
+
+ elif name == 'version':
+ attrib = child.attrib
+ try:
+ attrib['epoch'] = int(attrib['epoch'])
+ except:
+ attrib['epoch'] = 0
+ key1 = "%s*%s*%i*%s*%s" % (temp['name'], temp['arch'], attrib['epoch'], attrib['ver'], attrib['rel'])
+
+ elif name == 'deltas':
+ for oldrpm in child:
+ temp2 = {}
+ value = {}
+ key = None
+ for oldrpm_child in oldrpm:
+ name = self._bn(oldrpm_child.tag)
+ if name in ('name', 'arch'):
+ temp2[name] = oldrpm_child.text
+
+ elif name == 'version':
+ ch_attrib = oldrpm_child.attrib
+ try:
+ ch_attrib['epoch'] = int(ch_attrib['epoch'])
+ except:
+ ch_attrib['epoch'] = attrib['epoch']
+ try:
+ ch_attrib['ver'] = ch_attrib['ver']
+ except:
+ ch_attrib['ver'] = attrib['ver']
+ if not temp2.has_key('name'):
+ temp2['name'] = temp['name']
+ if not temp2.has_key('arch'):
+ temp2['arch'] = temp['arch']
+ key2 = "%s*%s*%i*%s*%s" % (temp2['name'], temp2['arch'], ch_attrib['epoch'], ch_attrib['ver'], ch_attrib['rel'])
+ key = "%s!!%s" % (key1, key2)
+ p[key] = {}
+
+ if name in ('sequence', 'drpm_filename', 'size'):
+ p[key][name] = oldrpm_child.text
+
+ if name == "checksum":
+ p[key][name] = oldrpm_child.text
+ p[key]["%s_type" % name] = oldrpm_child.attrib['type']
+ deltas.clear()
+
+def test():
+ import sys
+
+ parser = PrestoMDParser(sys.argv[1])
+
+ deltalist = parser.getDeltaList()
+
+ print '-' * 40
+ print deltalist
+
+ print 'read: %s deltarpms ' % (len(deltalist.keys()))
+
+if __name__ == '__main__':
+ test()
diff --git a/yum-deltarpm.conf b/yum-deltarpm.conf
deleted file mode 100644
index f3badd5..0000000
--- a/yum-deltarpm.conf
+++ /dev/null
@@ -1,4 +0,0 @@
-[main]
-enabled=1
-deltaRpmURL=http://localhost/delta/
-deltaStorageDir=/tmp/deltas/
diff --git a/yum-deltarpm.py b/yum-deltarpm.py
deleted file mode 100644
index 4b2ad10..0000000
--- a/yum-deltarpm.py
+++ /dev/null
@@ -1,76 +0,0 @@
-# author: Lars Herrmann <herrmann@redhat.com>
-# license: GPL (see COPYING file in distribution)
-#
-
-from yum.plugins import PluginYumExit, TYPE_INTERACTIVE
-
-import os
-import sys
-
-sys.path.append("/usr/share/deltarpm-python")
-import deltarpm
-
-deltarpm.DEBUG = 1
-
-requires_api_version = '2.1'
-plugin_type = (TYPE_INTERACTIVE,)
-
-
-def predownload_hook(conduit):
- rpmdb = conduit.getRpmDB()
- up2date_cfg={}
-
- # Read configuration
- conduit.info(2, 'Reading deltarpm configuration')
- delta_rpm_url = conduit.confString('main', 'deltaRpmURL')
-
- if not delta_rpm_url:
- conduit.error(2, 'deltaRpmURL not set in deltarpm plugin config')
-
- # set delta storage dir if provide, else fallback to default
- delta_storage_dir = conduit.confString('main', 'deltaStorageDir')
- if delta_storage_dir:
- up2date_cfg[deltarpm.DELTA_STORAGE] = delta_storage_dir
-
- # get up2date config and adjust some values for use with yum
- ## up2date_cfg = config.initUp2dateConfig()
- up2date_cfg[deltarpm.USE_DELTA] = 1
- up2date_cfg[deltarpm.DELTA_URL] = delta_rpm_url
-
- for p in conduit.getDownloadPackages():
- # 0. Maybe rpm is already there?
- #if os.path.exists(p.localpath):
- # conduit.info(2, "target rpm already exists in cache - continue")
- # continue
-
- # 1. Is the package already installed?
- installed = rpmdb.returnTupleByKeyword(name=p.name)
- if not installed:
- # Package not installed so fetching delta makes no sense
- continue
-
- # 2. Try to fetch deltarpm
- filename = os.path.basename(p.localpath)
- # Set the storage path for this package
- up2date_cfg[deltarpm.STORAGE] = os.path.dirname(p.localpath)
-
- try:
- # invoke getPackageFromDelta method
- # this method does all the work: reading config, fetching delta
- # and reconstructing the new rpm. if something goes wrong, an
- # exception is thrown
- package = [p.name, p.version, p.release, p.epoch, p.arch]
- # FIXME: handle this in deltarpm.py
- if (package[3] == '0'):
- package[3] = 0
- deltarpm.getPackageFromDelta(up2date_cfg, package)
-
- except Exception, e:
- # Warn about failure
- print "could not use delta rpm for %s: %s" % (filename, e)
- # And remove anything we might have created
- try:
- os.unlink(p.localpath)
- except OSError, msg:
- pass
- #raise PluginYumExit('exiting for testing in predownload hook ')