summaryrefslogtreecommitdiffstats
path: root/server/debuginfofs-mirror
diff options
context:
space:
mode:
Diffstat (limited to 'server/debuginfofs-mirror')
-rwxr-xr-xserver/debuginfofs-mirror352
1 files changed, 352 insertions, 0 deletions
diff --git a/server/debuginfofs-mirror b/server/debuginfofs-mirror
new file mode 100755
index 0000000..695cef2
--- /dev/null
+++ b/server/debuginfofs-mirror
@@ -0,0 +1,352 @@
+#!/usr/bin/python
+# Given Fedora arch and version, fetch all debuginfo packages from the mirrors
+# Copyright 2009 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Author: Will Woods <wwoods@redhat.com>
+
+import os
+import sys
+import stat
+import shutil
+import tempfile
+import optparse
+import ConfigParser
+import rpmUtils.arch
+# Why, yes, I do like to arrange imports by line length
+from subprocess import Popen, PIPE
+# Yum stuff
+sys.path.insert(0,'/usr/share/yum-cli')
+import yum.misc
+import yum.Errors
+from yum import _
+from utils import YumUtilBase
+from yum.parser import varReplace
+from yum.yumRepo import YumRepository
+
+configfile = '/etc/debuginfofs.conf'
+c = ConfigParser.RawConfigParser()
+try:
+ c.read(configfile)
+ conf = dict(c.items('general'))
+except (IOError,
+ ConfigParser.MissingSectionHeaderError,
+ ConfigParser.NoSectionError):
+ print "Error: no 'cachedir' in %s" % configfile
+ sys.exit(1)
+
+class DebuginfoFSDownloader(YumUtilBase):
+ NAME = 'debuginfofs-mirror'
+ VERSION = '0.1'
+ USAGE = 'usage: %s REPOID [REPOID..]' % NAME
+ def __init__(self):
+ YumUtilBase.__init__(self,
+ DebuginfoFSDownloader.NAME,
+ DebuginfoFSDownloader.VERSION,
+ DebuginfoFSDownloader.USAGE)
+ self.optparser = self.getOptionParser()
+
+ def modify_options(self):
+ o = self.optparser
+ # FIXME: remove some inappropriate options (--enablerepo etc)
+ o.add_option("--cachedir", default=conf['cachedir'],
+ help="directory to use for caching RPMs and metadata")
+ o.add_option("--exportdir", default=conf['exportdir'],
+ help="toplevel directory for debuginfofs exported data")
+ # TODO
+ #o.add_option("--distro", default="Fedora",
+ # help="Use debuginfo repos for this distro.")
+ # TODO: validate releasever/basearch
+ o.add_option("--releasever", default="10",
+ help="download debuginfo for this version of the distro.")
+ o.add_option("--basearch", default="i386",
+ help="download debuginfo for this arch")
+ o.add_option("--savecache", default=False, action="store_true",
+ help="don't delete downloaded RPMs after unpacking them")
+ o.add_option("--force-download", default=False, action="store_true",
+ help="download and unpack RPMs even if they already exist")
+ o.add_option("--download-only", default=False, action="store_true",
+ help="download RPMs but don't bother unpacking them")
+ o.add_option("--keep-src", default=False, action="store_true",
+ help="keep the contents of /usr/src/debug from each package")
+
+ def setup(self):
+ self.modify_options()
+ try:
+ # sets self.cmds
+ opt = self.doUtilConfigSetup()
+ except yum.Errors.RepoError, e:
+ self.logger.error(str(e))
+ sys.exit(50)
+ # Kinda stupid, setting up and then immediately tearing down all repos..
+ self.repos.disableRepo('*')
+ self.repos.setCacheDir(opt.cachedir)
+ # No yum cache needed, since we're saving the packages ourselves
+ self.conf.cache = 0
+ self.repos.setCache(0)
+
+ return opt
+
+ def listDownloads(self, packages):
+ '''Returns a string representation of the packages to be downloaded'''
+ n_wid = s_wid = 1
+ coldata = []
+ for p in packages:
+ nevra = pkg_to_nevra(p)
+ size = self.format_number(int(p.returnSimple('packagesize')))
+ if len(nevra) > n_wid:
+ n_wid = len(nevra)
+ if len(size) > s_wid:
+ s_wid = len(size)
+ coldata.append((nevra, size))
+ hr = '=' * self.term.columns
+ head = self.fmtColumns(((_('Package'),-n_wid),(_('Size'),-s_wid)), u" ")
+ out = u"""%s\n%s\n%s""" % (hr, head, hr)
+ for nevra, size in coldata:
+ cols = ((nevra,-n_wid),(size, s_wid))
+ line = self.fmtColumns(cols, u" ", u"\n")
+ out += line
+ return out
+
+def pkg_to_nevra(p):
+ '''return NEVRA string for package object p'''
+ (n,a,e,v,r) = p.pkgtup
+ return "%s-%s:%s-%s.%s" % (n,e,v,r,a)
+
+def fix_perms(targetdir):
+ '''Make all files readable, and all directories read+execute'''
+ def chmod_or(f, mask):
+ mode = os.lstat(f)[0]
+ if not stat.S_ISLNK(mode):
+ newmode = mode | mask
+ if newmode != mode:
+ os.chmod(f, newmode)
+ for top, dirs, files in os.walk(targetdir):
+ chmod_or(top, 0555)
+ for f in files:
+ chmod_or(os.path.join(top, f), 0444)
+
+def _unpack_rpm(rpm, targetdir, includepat=None, excludepat=None):
+ '''Unpack the given rpm into the given directory'''
+ if not os.path.isdir(targetdir):
+ os.makedirs(targetdir,mode=0755)
+ os.chdir(targetdir)
+ cpio = ['cpio','--quiet','-iud']
+ if excludepat:
+ cpio += ['-f', excludepat]
+ if includepat:
+ cpio += [includepat]
+ p1 = Popen(['rpm2cpio',rpm], stdout=PIPE)
+ p2 = Popen(cpio, stdin=p1.stdout, stdout=PIPE)
+ output = p2.communicate()[0] # should be empty
+ if p2.returncode != 0:
+ raise OSError, "cpio failed: %s output:\n%s" % (str(p2.returncode),output)
+ # Fix perms so all files are readable
+ fix_perms(targetdir)
+
+def unpack_rpm(rpm, targetdir, includepat=None, excludepat=None):
+ '''Unpack the given rpm into a temporary directory alongside the targetdir,
+ then rename the temp dir once finished.'''
+ dir = os.path.dirname(targetdir)
+ if not os.path.isdir(dir):
+ os.makedirs(targetdir,mode=0755)
+ tmpdir = tempfile.mkdtemp(dir=dir)
+ try:
+ _unpack_rpm(rpm, tmpdir, includepat, excludepat)
+ if os.path.isdir(targetdir):
+ shutil.rmtree(targetdir)
+ os.rename(tmpdir,targetdir)
+ return True
+ except (OSError, IOError), e:
+ print "unpack_rpm failed: %s" % str(e)
+ shutil.rmtree(tmpdir)
+ return False
+
+def mkdebuginfolinks(sourcedir, targetdir, warn_on_collision=False):
+ '''hardlink debuginfo from sourcedir into targetdir'''
+ count = 0
+ for top, dirs, files in os.walk(sourcedir, topdown=True):
+ if '/usr/lib/debug/.build-id/' not in top:
+ continue
+ for u in [os.path.join(top,f) for f in files if f.endswith('.debug')]:
+ target = os.path.realpath(u)
+ linkname = u.split('/usr/lib/debug/.build-id/')[1]
+ newlink = os.path.join(targetdir,linkname)
+ try:
+ os.makedirs(os.path.dirname(newlink),mode=0755)
+ except OSError, e:
+ if e.errno != 17:
+ raise e
+ if os.path.exists(newlink):
+ if warn_on_collision and (os.stat(newlink) != os.stat(target)):
+ # TODO: better logging here
+ print "WARNING: collision or damaged debuginfo detected"
+ print "%s exists - removing" % (newlink,)
+ os.unlink(newlink)
+ os.link(target,newlink)
+ count += 1
+ return count
+
+# TODO: break this up into more logical chunks
+def main():
+ y = DebuginfoFSDownloader()
+ opt = y.setup()
+ # FIXME this is stupid - figure out how --verbose works
+ if opt.verbose:
+ y.logger = y.verbose_logger
+ # Validate some of the opts
+ # Check permissions on exportdir/cachedir
+ perms_ok = True
+ for d in (opt.exportdir, opt.cachedir):
+ if not os.access(d,os.W_OK):
+ perms_ok = False
+ y.logger.error("you don't have write permission on %s" % d)
+ if not perms_ok:
+ sys.exit(1)
+
+ if opt.basearch not in rpmUtils.arch.arches:
+ y.logger.error("Unrecognized arch %s" % opt.basearch)
+ sys.exit(1)
+
+ # kind of a cheap hack - use values from conf and opt to re-parse URLs
+ my_yumvar = y.conf.yumvar
+ my_yumvar.update(conf)
+ for a in 'basearch', 'releasever':
+ v = getattr(opt,a,None)
+ if v:
+ my_yumvar[a] = v
+ # Another cheap hack - lie about our arch
+ if opt.basearch == 'i386':
+ # hacks upon hacks. blerg.
+ rpmUtils.arch.canonArch = 'i686'
+ else:
+ rpmUtils.arch.canonArch = opt.basearch
+
+ # Set up the requested repos
+ for repoid in y.cmds:
+ try:
+ d = dict([(k,varReplace(v,my_yumvar)) for k,v in c.items(repoid)])
+ except ConfigParser.NoSectionError:
+ y.logger.error("Unknown repo ID %s" % repoid)
+ continue
+ if 'mirrorlist' in d:
+ y.logger.info("Adding repo %s with mirrorlist %s" % (repoid,d['mirrorlist']))
+ repo = YumRepository('%s-%s-%s-debuginfofs' % \
+ (repoid, opt.releasever, opt.basearch))
+ repo.name = repo.id
+ repo.mirrorlist = d['mirrorlist']
+ repo.cache = 0
+ repo.enable()
+ repo.basecachedir = opt.cachedir
+ repo.cachedir = repo.id
+ y.repos.add(repo)
+ y.repos.doSetup(thisrepo=repo.id)
+ # FIXME: locking - don't setup a repo that's in use by another process
+
+ repolist = y.repos.listEnabled()
+ if repolist:
+ # Set up progress callbacks for new repos
+ y.setupProgressCallbacks()
+ for r in repolist:
+ try:
+ r.ready()
+ except yum.Errors.RepoError, e:
+ y.logger.error(str(e))
+ sys.exit(1)
+ else:
+ y.logger.error("Couldn't find any repos to set up.")
+ repos = [s for s in c.sections() if s != 'general']
+ y.logger.error("Known repos: %s" % ", ".join(repos))
+ sys.exit(1)
+
+ # Figure out which packages we need to fetch
+ download_packages = []
+ unpack_packages = []
+ for p in y.pkgSack.returnPackages():
+ repo = y.repos.getRepo(p.repoid)
+ remote = p.returnSimple('relativepath')
+ local = os.path.join(opt.cachedir,os.path.basename(remote))
+ p.localpath = local
+ n = p.pkgtup[0]
+ nevra = pkg_to_nevra(p)
+ #/var/www/debuginfofs/packages/c/coreutils/coreutils-0:6.12-18.fc10.i386
+ newdir = os.path.join(n[0],n,nevra)
+ targetdir = os.path.join(opt.exportdir,'packages',newdir)
+ # Don't bother checking - just add everything to the list
+ if opt.force_download:
+ download_packages.append(p)
+ continue
+
+ # Skip it if it's already unpacked
+ if os.path.isdir(targetdir):
+ y.logger.info("Already unpacked %s" % nevra)
+ os.utime(targetdir, None)
+ continue
+
+ # XXX does y.downloadPackages handle this for us?
+ if (os.path.exists(local) and
+ os.path.getsize(local) == int(p.returnSimple('packagesize'))):
+ y.logger.info("Already downloaded %s" % nevra)
+ unpack_packages.append(p)
+ else:
+ download_packages.append(p)
+
+ if not (unpack_packages or download_packages):
+ y.verbose_logger.info(_('Nothing to do!'))
+ sys.exit(0)
+
+ # Show what will be downloaded and get confirmation
+ y.verbose_logger.info(y.listDownloads(download_packages))
+ y.reportDownloadSize(download_packages)
+ # FIXME: show some info on unpack_packages
+ y.verbose_logger.info("Downloading to cachedir %s" % opt.cachedir)
+ y.verbose_logger.info("Unpacking to exportdir %s" % opt.exportdir)
+ if y._promptWanted():
+ if not y.userconfirm():
+ y.logger.info(_('Exiting on user Command'))
+ sys.exit(1)
+
+ # Download, unpack, and hardlink debuginfo data
+ excludepat=None
+ if not opt.keep_src:
+ excludepat="*/usr/src/debug/*"
+ for p in unpack_packages + download_packages:
+ if p in download_packages:
+ repo = y.repos.getRepo(p.repoid)
+ # FIXME handle possible errors here
+ repo.getPackage(p)
+ if opt.download_only:
+ continue
+ local = p.localpath
+ n = p.pkgtup[0]
+ nevra = pkg_to_nevra(p)
+ newdir = os.path.join(n[0],n,nevra)
+ targetdir = os.path.join(opt.exportdir,'packages',newdir)
+
+ y.verbose_logger.info("Unpacking %s" % nevra)
+ unpack_rpm(local, targetdir, excludepat=excludepat)
+ # Remove cached package now that we've unpacked it
+ if not opt.savecache:
+ os.unlink(local)
+ # Make hardlinks
+ r = mkdebuginfolinks(targetdir, os.path.join(opt.exportdir,'build-id'))
+ y.logger.info("Linked %3i debuginfo file%s" % (r, r != 1 and "s" or ""))
+
+if __name__ == '__main__':
+ try:
+ main()
+ except KeyboardInterrupt:
+ print "Exiting on keyboard interrupt"