summaryrefslogtreecommitdiffstats
path: root/server
diff options
context:
space:
mode:
authorWill Woods <wwoods@redhat.com>2009-03-05 18:05:34 -0500
committerWill Woods <wwoods@redhat.com>2009-03-05 18:05:34 -0500
commit87db0232f329e722f9f8cd97ec50285b14069f04 (patch)
tree787a4d4f72f2d5e8a376a64fe8e894287de5d83c /server
parentc9e5a68ad5f6febaed4093817fcce2b9a8d80a5b (diff)
downloaddebuginfofs-87db0232f329e722f9f8cd97ec50285b14069f04.tar.gz
debuginfofs-87db0232f329e722f9f8cd97ec50285b14069f04.tar.xz
debuginfofs-87db0232f329e722f9f8cd97ec50285b14069f04.zip
Rearrange files a bit, and add Requires(post/preun) for semanage
Diffstat (limited to 'server')
-rw-r--r--server/dav-debuginfo.conf52
-rwxr-xr-xserver/debuginfofs-mirror352
-rwxr-xr-xserver/debuginfofs-server.init89
-rw-r--r--server/debuginfofs.conf21
-rwxr-xr-xserver/repofs.py373
5 files changed, 887 insertions, 0 deletions
diff --git a/server/dav-debuginfo.conf b/server/dav-debuginfo.conf
new file mode 100644
index 0000000..c66d3b9
--- /dev/null
+++ b/server/dav-debuginfo.conf
@@ -0,0 +1,52 @@
+# Export debuginfo as a readonly DAV share
+# See also: /usr/share/gnome-user-share/dav_user.conf
+ServerRoot ${DEBUGINFOFS_EXPORTDIR}
+DocumentRoot ${DEBUGINFOFS_EXPORTDIR}
+PidFile /var/run/debuginfofs-server.pid
+LockFile /var/lock/debuginfofs-server
+DAVLockDB /var/lock/debuginfofs-server.davlock
+LogLevel crit
+#LogLevel info
+ErrorLog /var/log/debuginfofs-server.log
+# I had this idea on March 3 2009, so... 3309
+Listen 3309
+# XXX maybe we should have a special debuginfofs user for this?
+User nobody
+
+LimitXMLRequestBody 100000
+
+LoadModule dav_module /etc/httpd/modules/mod_dav.so
+LoadModule dav_fs_module /etc/httpd/modules/mod_dav_fs.so
+LoadModule authz_host_module /etc/httpd/modules/mod_authz_host.so
+
+<Directory "${DEBUGINFOFS_EXPORTDIR}">
+ DAV On
+ <LimitExcept GET HEAD OPTIONS PROPFIND>
+ Order allow,deny
+ Deny from all
+ </LimitExcept>
+ #If you enable Indexes on a public site, the first spider that crawls
+ #this filesystem could unpack every RPM you have. Use with caution.
+ #Options Indexes
+</Directory>
+
+# These may need tuning for a public instance..
+StartServers 1
+MaxClients 3
+
+<IfModule mpm_prefork_module>
+ MinSpareServers 1
+ MaxSpareServers 1
+</IfModule>
+
+<IfModule mpm_worker_module>
+ MinSpareThreads 1
+ MaxSpareThreads 1
+ ThreadsPerChild 3
+</IfModule>
+
+<IfModule mpm_event_module>
+ MinSpareThreads 1
+ MaxSpareThreads 1
+ ThreadsPerChild 3
+</IfModule>
diff --git a/server/debuginfofs-mirror b/server/debuginfofs-mirror
new file mode 100755
index 0000000..695cef2
--- /dev/null
+++ b/server/debuginfofs-mirror
@@ -0,0 +1,352 @@
+#!/usr/bin/python
+# Given Fedora arch and version, fetch all debuginfo packages from the mirrors
+# Copyright 2009 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Author: Will Woods <wwoods@redhat.com>
+
+import os
+import sys
+import stat
+import shutil
+import tempfile
+import optparse
+import ConfigParser
+import rpmUtils.arch
+# Why, yes, I do like to arrange imports by line length
+from subprocess import Popen, PIPE
+# Yum stuff
+sys.path.insert(0,'/usr/share/yum-cli')
+import yum.misc
+import yum.Errors
+from yum import _
+from utils import YumUtilBase
+from yum.parser import varReplace
+from yum.yumRepo import YumRepository
+
+configfile = '/etc/debuginfofs.conf'
+c = ConfigParser.RawConfigParser()
+try:
+ c.read(configfile)
+ conf = dict(c.items('general'))
+except (IOError,
+ ConfigParser.MissingSectionHeaderError,
+ ConfigParser.NoSectionError):
+ print "Error: no 'cachedir' in %s" % configfile
+ sys.exit(1)
+
+class DebuginfoFSDownloader(YumUtilBase):
+ NAME = 'debuginfofs-mirror'
+ VERSION = '0.1'
+ USAGE = 'usage: %s REPOID [REPOID..]' % NAME
+ def __init__(self):
+ YumUtilBase.__init__(self,
+ DebuginfoFSDownloader.NAME,
+ DebuginfoFSDownloader.VERSION,
+ DebuginfoFSDownloader.USAGE)
+ self.optparser = self.getOptionParser()
+
+ def modify_options(self):
+ o = self.optparser
+ # FIXME: remove some inappropriate options (--enablerepo etc)
+ o.add_option("--cachedir", default=conf['cachedir'],
+ help="directory to use for caching RPMs and metadata")
+ o.add_option("--exportdir", default=conf['exportdir'],
+ help="toplevel directory for debuginfofs exported data")
+ # TODO
+ #o.add_option("--distro", default="Fedora",
+ # help="Use debuginfo repos for this distro.")
+ # TODO: validate releasever/basearch
+ o.add_option("--releasever", default="10",
+ help="download debuginfo for this version of the distro.")
+ o.add_option("--basearch", default="i386",
+ help="download debuginfo for this arch")
+ o.add_option("--savecache", default=False, action="store_true",
+ help="don't delete downloaded RPMs after unpacking them")
+ o.add_option("--force-download", default=False, action="store_true",
+ help="download and unpack RPMs even if they already exist")
+ o.add_option("--download-only", default=False, action="store_true",
+ help="download RPMs but don't bother unpacking them")
+ o.add_option("--keep-src", default=False, action="store_true",
+ help="keep the contents of /usr/src/debug from each package")
+
+ def setup(self):
+ self.modify_options()
+ try:
+ # sets self.cmds
+ opt = self.doUtilConfigSetup()
+ except yum.Errors.RepoError, e:
+ self.logger.error(str(e))
+ sys.exit(50)
+ # Kinda stupid, setting up and then immediately tearing down all repos..
+ self.repos.disableRepo('*')
+ self.repos.setCacheDir(opt.cachedir)
+ # No yum cache needed, since we're saving the packages ourselves
+ self.conf.cache = 0
+ self.repos.setCache(0)
+
+ return opt
+
+ def listDownloads(self, packages):
+ '''Returns a string representation of the packages to be downloaded'''
+ n_wid = s_wid = 1
+ coldata = []
+ for p in packages:
+ nevra = pkg_to_nevra(p)
+ size = self.format_number(int(p.returnSimple('packagesize')))
+ if len(nevra) > n_wid:
+ n_wid = len(nevra)
+ if len(size) > s_wid:
+ s_wid = len(size)
+ coldata.append((nevra, size))
+ hr = '=' * self.term.columns
+ head = self.fmtColumns(((_('Package'),-n_wid),(_('Size'),-s_wid)), u" ")
+ out = u"""%s\n%s\n%s""" % (hr, head, hr)
+ for nevra, size in coldata:
+ cols = ((nevra,-n_wid),(size, s_wid))
+ line = self.fmtColumns(cols, u" ", u"\n")
+ out += line
+ return out
+
+def pkg_to_nevra(p):
+ '''return NEVRA string for package object p'''
+ (n,a,e,v,r) = p.pkgtup
+ return "%s-%s:%s-%s.%s" % (n,e,v,r,a)
+
+def fix_perms(targetdir):
+ '''Make all files readable, and all directories read+execute'''
+ def chmod_or(f, mask):
+ mode = os.lstat(f)[0]
+ if not stat.S_ISLNK(mode):
+ newmode = mode | mask
+ if newmode != mode:
+ os.chmod(f, newmode)
+ for top, dirs, files in os.walk(targetdir):
+ chmod_or(top, 0555)
+ for f in files:
+ chmod_or(os.path.join(top, f), 0444)
+
+def _unpack_rpm(rpm, targetdir, includepat=None, excludepat=None):
+ '''Unpack the given rpm into the given directory'''
+ if not os.path.isdir(targetdir):
+ os.makedirs(targetdir,mode=0755)
+ os.chdir(targetdir)
+ cpio = ['cpio','--quiet','-iud']
+ if excludepat:
+ cpio += ['-f', excludepat]
+ if includepat:
+ cpio += [includepat]
+ p1 = Popen(['rpm2cpio',rpm], stdout=PIPE)
+ p2 = Popen(cpio, stdin=p1.stdout, stdout=PIPE)
+ output = p2.communicate()[0] # should be empty
+ if p2.returncode != 0:
+ raise OSError, "cpio failed: %s output:\n%s" % (str(p2.returncode),output)
+ # Fix perms so all files are readable
+ fix_perms(targetdir)
+
+def unpack_rpm(rpm, targetdir, includepat=None, excludepat=None):
+ '''Unpack the given rpm into a temporary directory alongside the targetdir,
+ then rename the temp dir once finished.'''
+ dir = os.path.dirname(targetdir)
+ if not os.path.isdir(dir):
+ os.makedirs(targetdir,mode=0755)
+ tmpdir = tempfile.mkdtemp(dir=dir)
+ try:
+ _unpack_rpm(rpm, tmpdir, includepat, excludepat)
+ if os.path.isdir(targetdir):
+ shutil.rmtree(targetdir)
+ os.rename(tmpdir,targetdir)
+ return True
+ except (OSError, IOError), e:
+ print "unpack_rpm failed: %s" % str(e)
+ shutil.rmtree(tmpdir)
+ return False
+
+def mkdebuginfolinks(sourcedir, targetdir, warn_on_collision=False):
+ '''hardlink debuginfo from sourcedir into targetdir'''
+ count = 0
+ for top, dirs, files in os.walk(sourcedir, topdown=True):
+ if '/usr/lib/debug/.build-id/' not in top:
+ continue
+ for u in [os.path.join(top,f) for f in files if f.endswith('.debug')]:
+ target = os.path.realpath(u)
+ linkname = u.split('/usr/lib/debug/.build-id/')[1]
+ newlink = os.path.join(targetdir,linkname)
+ try:
+ os.makedirs(os.path.dirname(newlink),mode=0755)
+ except OSError, e:
+ if e.errno != 17:
+ raise e
+ if os.path.exists(newlink):
+ if warn_on_collision and (os.stat(newlink) != os.stat(target)):
+ # TODO: better logging here
+ print "WARNING: collision or damaged debuginfo detected"
+ print "%s exists - removing" % (newlink,)
+ os.unlink(newlink)
+ os.link(target,newlink)
+ count += 1
+ return count
+
+# TODO: break this up into more logical chunks
+def main():
+ y = DebuginfoFSDownloader()
+ opt = y.setup()
+ # FIXME this is stupid - figure out how --verbose works
+ if opt.verbose:
+ y.logger = y.verbose_logger
+ # Validate some of the opts
+ # Check permissions on exportdir/cachedir
+ perms_ok = True
+ for d in (opt.exportdir, opt.cachedir):
+ if not os.access(d,os.W_OK):
+ perms_ok = False
+ y.logger.error("you don't have write permission on %s" % d)
+ if not perms_ok:
+ sys.exit(1)
+
+ if opt.basearch not in rpmUtils.arch.arches:
+ y.logger.error("Unrecognized arch %s" % opt.basearch)
+ sys.exit(1)
+
+ # kind of a cheap hack - use values from conf and opt to re-parse URLs
+ my_yumvar = y.conf.yumvar
+ my_yumvar.update(conf)
+ for a in 'basearch', 'releasever':
+ v = getattr(opt,a,None)
+ if v:
+ my_yumvar[a] = v
+ # Another cheap hack - lie about our arch
+ if opt.basearch == 'i386':
+ # hacks upon hacks. blerg.
+ rpmUtils.arch.canonArch = 'i686'
+ else:
+ rpmUtils.arch.canonArch = opt.basearch
+
+ # Set up the requested repos
+ for repoid in y.cmds:
+ try:
+ d = dict([(k,varReplace(v,my_yumvar)) for k,v in c.items(repoid)])
+ except ConfigParser.NoSectionError:
+ y.logger.error("Unknown repo ID %s" % repoid)
+ continue
+ if 'mirrorlist' in d:
+ y.logger.info("Adding repo %s with mirrorlist %s" % (repoid,d['mirrorlist']))
+ repo = YumRepository('%s-%s-%s-debuginfofs' % \
+ (repoid, opt.releasever, opt.basearch))
+ repo.name = repo.id
+ repo.mirrorlist = d['mirrorlist']
+ repo.cache = 0
+ repo.enable()
+ repo.basecachedir = opt.cachedir
+ repo.cachedir = repo.id
+ y.repos.add(repo)
+ y.repos.doSetup(thisrepo=repo.id)
+ # FIXME: locking - don't setup a repo that's in use by another process
+
+ repolist = y.repos.listEnabled()
+ if repolist:
+ # Set up progress callbacks for new repos
+ y.setupProgressCallbacks()
+ for r in repolist:
+ try:
+ r.ready()
+ except yum.Errors.RepoError, e:
+ y.logger.error(str(e))
+ sys.exit(1)
+ else:
+ y.logger.error("Couldn't find any repos to set up.")
+ repos = [s for s in c.sections() if s != 'general']
+ y.logger.error("Known repos: %s" % ", ".join(repos))
+ sys.exit(1)
+
+ # Figure out which packages we need to fetch
+ download_packages = []
+ unpack_packages = []
+ for p in y.pkgSack.returnPackages():
+ repo = y.repos.getRepo(p.repoid)
+ remote = p.returnSimple('relativepath')
+ local = os.path.join(opt.cachedir,os.path.basename(remote))
+ p.localpath = local
+ n = p.pkgtup[0]
+ nevra = pkg_to_nevra(p)
+ #/var/www/debuginfofs/packages/c/coreutils/coreutils-0:6.12-18.fc10.i386
+ newdir = os.path.join(n[0],n,nevra)
+ targetdir = os.path.join(opt.exportdir,'packages',newdir)
+ # Don't bother checking - just add everything to the list
+ if opt.force_download:
+ download_packages.append(p)
+ continue
+
+ # Skip it if it's already unpacked
+ if os.path.isdir(targetdir):
+ y.logger.info("Already unpacked %s" % nevra)
+ os.utime(targetdir, None)
+ continue
+
+ # XXX does y.downloadPackages handle this for us?
+ if (os.path.exists(local) and
+ os.path.getsize(local) == int(p.returnSimple('packagesize'))):
+ y.logger.info("Already downloaded %s" % nevra)
+ unpack_packages.append(p)
+ else:
+ download_packages.append(p)
+
+ if not (unpack_packages or download_packages):
+ y.verbose_logger.info(_('Nothing to do!'))
+ sys.exit(0)
+
+ # Show what will be downloaded and get confirmation
+ y.verbose_logger.info(y.listDownloads(download_packages))
+ y.reportDownloadSize(download_packages)
+ # FIXME: show some info on unpack_packages
+ y.verbose_logger.info("Downloading to cachedir %s" % opt.cachedir)
+ y.verbose_logger.info("Unpacking to exportdir %s" % opt.exportdir)
+ if y._promptWanted():
+ if not y.userconfirm():
+ y.logger.info(_('Exiting on user Command'))
+ sys.exit(1)
+
+ # Download, unpack, and hardlink debuginfo data
+ excludepat=None
+ if not opt.keep_src:
+ excludepat="*/usr/src/debug/*"
+ for p in unpack_packages + download_packages:
+ if p in download_packages:
+ repo = y.repos.getRepo(p.repoid)
+ # FIXME handle possible errors here
+ repo.getPackage(p)
+ if opt.download_only:
+ continue
+ local = p.localpath
+ n = p.pkgtup[0]
+ nevra = pkg_to_nevra(p)
+ newdir = os.path.join(n[0],n,nevra)
+ targetdir = os.path.join(opt.exportdir,'packages',newdir)
+
+ y.verbose_logger.info("Unpacking %s" % nevra)
+ unpack_rpm(local, targetdir, excludepat=excludepat)
+ # Remove cached package now that we've unpacked it
+ if not opt.savecache:
+ os.unlink(local)
+ # Make hardlinks
+ r = mkdebuginfolinks(targetdir, os.path.join(opt.exportdir,'build-id'))
+ y.logger.info("Linked %3i debuginfo file%s" % (r, r != 1 and "s" or ""))
+
+if __name__ == '__main__':
+ try:
+ main()
+ except KeyboardInterrupt:
+ print "Exiting on keyboard interrupt"
diff --git a/server/debuginfofs-server.init b/server/debuginfofs-server.init
new file mode 100755
index 0000000..ac294bc
--- /dev/null
+++ b/server/debuginfofs-server.init
@@ -0,0 +1,89 @@
+#!/bin/bash
+#
+### BEGIN INIT INFO
+# Provides: debuginfofs-server
+# Required-Start: $local_fs $network $named $remote_fs $syslog
+# Required-Stop: $local_fs $network $named $remote_fs $syslog
+# Short-Description: Remote debuginfo filesystem server
+# Description: Export a filesystem with debuginfo for use with debugging tools
+### END INIT INFO
+
+# debuginfofs Starts/stops the debuginfofs WebDAV server
+#
+# chkconfig: - 85 15
+# description: debuginfofs-server starts a WebDAV server which exports the \
+# debuginfo data fetched with debuginfofs-mirror.
+# config: /etc/debuginfofs.conf
+
+# Source function library.
+. /etc/rc.d/init.d/functions
+
+# Source networking configuration.
+. /etc/sysconfig/network
+
+# Read our configuration
+export DEBUGINFOFS_EXPORTDIR="$(sed -ne 's/ *exportdir *=\(.*\)$/\1/p' /etc/debuginfofs.conf 2>/dev/null)"
+
+RETVAL=0
+prog="debuginfofs-server"
+pidfile="/var/run/$prog.pid"
+
+start() {
+ # Check that networking is up.
+ [ ${NETWORKING} = "no" ] && exit 1
+
+ # Sanity check binary and configuration
+ [ -x /usr/sbin/httpd ] || exit 1
+ [ -d "$DEBUGINFOFS_EXPORTDIR" ] || mkdir -p "$DEBUGINFOFS_EXPORTDIR"
+ [ -r "$DEBUGINFOFS_EXPORTDIR" ] || exit 1
+
+ echo -n $"Starting debuginfofs-server: "
+ /usr/sbin/httpd -f /usr/share/debuginfofs/dav-debuginfo.conf
+ RETVAL=$?
+
+ [ $RETVAL -eq 0 ] && touch /var/lock/subsys/debuginfofs-server
+ [ $RETVAL -eq 0 ] && success $"$prog startup" || failure $"$prog startup"
+ echo
+ return $RETVAL
+}
+
+stop() {
+ echo -n $"Stopping debuginfofs: "
+ /usr/sbin/httpd -f /usr/share/debuginfofs/dav-debuginfo.conf -k stop
+ RETVAL=$?
+ rm -f /var/lock/subsys/debuginfofs-server
+ [ $RETVAL -eq 0 ] && success $"$prog shutdown" || failure $"$prog shutdown"
+ echo
+ return $RETVAL
+}
+
+# See how we were called.
+case "$1" in
+ start)
+ start
+ ;;
+ stop)
+ stop
+ ;;
+ restart|reload|force-reload)
+ stop
+ start
+ RETVAL=$?
+ ;;
+ condrestart)
+ if [ -f /var/lock/subsys/debuginfofs-server ]; then
+ stop
+ start
+ RETVAL=$?
+ fi
+ ;;
+ status)
+ status $prog
+ RETVAL=$?
+ ;;
+ *)
+ echo $"Usage: $0 {start|stop|restart|condrestart|status}"
+ exit 1
+esac
+
+exit $RETVAL
diff --git a/server/debuginfofs.conf b/server/debuginfofs.conf
new file mode 100644
index 0000000..29e2f40
--- /dev/null
+++ b/server/debuginfofs.conf
@@ -0,0 +1,21 @@
+# Configuration for debuginfofs
+
+[general]
+# Base URL for the fedora mirrorlist CGI.
+fedora_mirrorlist=http://mirrors.fedoraproject.org/mirrorlist
+# Directory for temporary files (downloaded RPMs, etc)
+cachedir=/var/cache/debuginfofs
+# Directory for the filesystem structure to be exported
+exportdir=/var/www/debuginfofs
+
+[rawhide]
+mirrorlist=$fedora_mirrorlist?repo=rawhide-debug&arch=$basearch
+
+[fedora]
+mirrorlist=$fedora_mirrorlist?repo=fedora-debug-$releasever&arch=$basearch
+
+[fedora-updates]
+mirrorlist=$fedora_mirrorlist?repo=updates-released-debug-f$releasever&arch=$basearch
+
+[fedora-updates-testing]
+mirrorlist=$fedora_mirrorlist?repo=updates-testing-debug-f$releasever&arch=$basearch
diff --git a/server/repofs.py b/server/repofs.py
new file mode 100755
index 0000000..6a0259b
--- /dev/null
+++ b/server/repofs.py
@@ -0,0 +1,373 @@
+#!/usr/bin/python
+# repofs.py - Export the contents of a package repo as a readonly filesystem.
+# Copyright 2009 Red Hat, Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+#
+# Author: Will Woods <wwoods@redhat.com>
+#
+# Exports the contents of the repo(s) in a filesystem that looks like this:
+# $PACKAGE_UID/[package contents]
+# where $PACKAGE_UID is some unique package identifier (e.g. ENVRA in RPM)
+#
+# Known bugs:
+# - SimpleYumRepo needs a way to reload metadata
+# - SimpleYumRepo.filecache never shrinks
+# - Stuff goes all to heck if you run multithreaded
+# - Probably need some locking or something to keep threads from fighting
+# over the filecache
+#
+# TODO:
+# - Actually frickin' implement open() and read()
+# - Test mem/disk use with actual repos and actual users
+# - Rewrite unpack() to use rpm2cpio
+# - Rewrite stupid log() method to use logging module
+
+import os
+import glob
+from stat import *
+import errno
+import subprocess
+import time
+
+import fuse
+fuse.fuse_python_api = (0, 2)
+fuse.feature_assert('stateful_files', 'has_init')
+from fuse import Fuse
+
+from sqlite3 import dbapi2 as sqlite
+
+import bz2, gzip
+
+import yum.repoMDObject
+
+def bunzip(infile,outfile):
+ (p,f) = os.path.split(outfile)
+ if not os.path.isdir(p):
+ os.makedirs(p)
+ outf = open(outfile,"w")
+ inf = bz2.BZ2File(infile)
+ data = inf.read(4096)
+ while data:
+ outf.write(data)
+ data = inf.read(4096)
+ inf.close()
+ outf.close()
+
+def flag2mode(flags):
+ md = {os.O_RDONLY: 'r', os.O_WRONLY: 'w', os.O_RDWR: 'w+'}
+ m = md[flags & (os.O_RDONLY | os.O_WRONLY | os.O_RDWR)]
+ if flags | os.O_APPEND:
+ m = m.replace('w', 'a', 1)
+ return m
+
+def fuseclonestat(stat):
+ s = fuse.Stat()
+ (s.st_mode, s.st_ino, s.st_dev, s.st_nlink, s.st_uid, s.st_gid, s.st_size,
+ s.st_atime, s.st_mtime, s.st_ctime) = stat
+ return s
+
+def leading_paths(dir):
+ dirlist = []
+ while dir:
+ dirlist.append(dir)
+ if dir == '/':
+ break
+ (dir, dummy) = os.path.split(dir)
+ return dirlist
+
+class SimpleYumRepo(object):
+ def __init__(self, path=None, cachedir=None):
+ self.path = path
+ self.cachedir = cachedir
+ self.pkgkey = {}
+ self.filecache = {}
+ if path:
+ self.parse_repomd()
+
+ def parse_repomd(self):
+ repomd = os.path.join(self.path, "repodata/repomd.xml")
+ repoXML = yum.repoMDObject.RepoMD(self.path, repomd)
+ for t in ('primary_db', 'filelists_db'):
+ if t in repoXML.fileTypes():
+ d = repoXML.getData(t)
+ (base,dbpath) = d.location
+ dbfile = os.path.join(self.path,dbpath)
+ # TODO check for existing db file
+ if dbfile.endswith(".bz2"):
+ outfile = os.path.join(self.cachedir,".repodata",
+ os.path.basename(dbfile)[:-4])
+ bunzip(dbfile,outfile)
+ dbfile = outfile
+ # TODO: elif .gz, else...
+ con = sqlite.connect(dbfile)
+ setattr(self,t,con)
+ self.pkgkey = self.package_keys_from_db()
+
+ # TODO: need a refresh method to check the repodata and reload it
+
+ def package_uids(self):
+ '''return a list of unique identifiers for every package in the repo'''
+ return self.pkgkey.keys()
+
+ def package_keys_from_db(self):
+ '''Return a dict of {packageuid:dbkey,...}'''
+ c = self.primary_db.cursor()
+ c.execute("SELECT epoch, name, version, release, arch, pkgKey FROM packages")
+ pkgkey = {}
+ for (e,n,v,r,a,key) in c:
+ nevra = "%s-%s:%s-%s.%s" % (n,e,v,r,a)
+ pkgkey[nevra] = key
+ return pkgkey
+
+ # Cache filelist data pulled from the database.
+ # XXX: Can we make FUSE cache this info instead?
+ # XXX: Seriously this is going to expand forever and consume gobs of memory.
+ # XXX: Then again, "gobs" might turn out to be, like, several dozen MB
+ # (i.e. No Big Deal). Need more testing here!
+ def files_for_package(self, packageuid):
+ if packageuid not in self.filecache:
+ self.filecache[packageuid] = self.files_for_package_from_db(packageuid)
+ return self.filecache[packageuid]
+
+ def files_for_package_from_db(self, packageuid):
+ '''Return a list of info about files in the given packageuid.
+ Each item is a tuple of the form (abspath,type) where type is
+ 'd' or 'f' (dir/file).'''
+ c = self.filelists_db.cursor()
+ key = self.pkgkey.get(packageuid)
+ filelist = []
+ if not key:
+ return filelist
+ c.execute("SELECT dirname, filenames, filetypes FROM filelist "
+ "WHERE pkgKey=?",(key,))
+ dirs = []
+ for (dir,names,types) in c:
+ if dir not in dirs:
+ for d in leading_paths(dir):
+ if d not in dirs:
+ dirs.append(d)
+ filelist.append((d,'d'))
+ for (n,t) in zip(names.split('/'),types):
+ f = os.path.join(dir,n)
+ if t == 'd' and f in dirs:
+ continue
+ else:
+ dirs.append(f)
+ filelist.append((f,t))
+ return filelist
+
+ def unpack(self,rpm,targetdir=None):
+ if targetdir:
+ if not os.path.isdir(targetdir):
+ os.makedirs(targetdir)
+ else:
+ targetdir = self.cachedir
+
+ inf = open(rpm)
+ # Find RPM header and read compression algorithm
+ # Skip forward to gzipped CPIO archive
+ # FIXME: Awful. Just awful. At least use rpm2cpio.
+ header = inf.read(409600)
+ offset = header.index("\x1f\x8b")
+ del header
+ inf.seek(offset)
+ gz = gzip.GzipFile(fileobj=inf, mode="rb")
+ # Open a pipe to "cpio -iumd --quiet"
+ cpio = subprocess.Popen(args=["cpio","-iumd","--quiet"],
+ cwd=targetdir, stdin=subprocess.PIPE)
+ data = gz.read(4096)
+ while data:
+ cpio.stdin.write(data)
+ data = gz.read(4096)
+ gz.close()
+ inf.close()
+ cpio.stdin.close()
+ cpio.wait()
+
+class FileStat(fuse.Stat):
+ def __init__(self, **kw):
+ fuse.Stat.__init__(self, **kw)
+ self.st_mode = S_IFREG|S_IRUSR|S_IRGRP|S_IROTH
+ self.st_nlink = 1
+
+class DirStat(fuse.Stat):
+ def __init__(self, **kw):
+ fuse.Stat.__init__(self, **kw)
+ self.st_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IROTH|S_IXUSR|S_IXGRP|S_IXOTH
+ self.st_nlink = 2
+
+class FuseRO(Fuse):
+ '''A Fuse subclass for implementing readonly filesystems.'''
+ def __rofs(self, *args):
+ '''Raises OSError(EROFS,"Read-only filesystem")'''
+ raise OSError(errno.EROFS, "Read-only filesystem")
+ chmod = __rofs
+ chown = __rofs
+ ftruncate = __rofs
+ link = __rofs
+ mkdir = __rofs
+ mknod = __rofs
+ removexattr = __rofs
+ rename = __rofs
+ rmdir = __rofs
+ setxattr = __rofs
+ symlink = __rofs
+ truncate = __rofs
+ unlink = __rofs
+ def write(self, *args):
+ '''write() function that raises IOError(EBADF)
+ You can't open files for writing; this is a readonly filesystem!'''
+ raise IOError(errno.EBADF, "write() on readonly filesystem")
+ def _check_open(self, flags):
+ '''checks the open() flags, and returns False if write access
+ was requested. Returns True otherwise.'''
+ accmode = os.O_RDONLY | os.O_WRONLY | os.O_RDWR
+ return (flags & accmode) == os.O_RDONLY
+
+class Repofs(FuseRO):
+ def __init__(self, *args, **kw):
+ Fuse.__init__(self, *args, **kw)
+ self.do_logging = False
+
+ # FIXME: this logging is terrible
+ def log(self,message):
+ if self.do_logging:
+ self.logfile.write("%s %s\n" % (time.asctime(), message))
+
+ def fsinit(self):
+ if not os.path.isdir(self.cachedir):
+ os.makedirs(self.cachedir)
+ if self.do_logging:
+ self.logfile = open(os.path.join(self.cachedir,".log"),"a",0)
+ self.log("fsinit(path=%s). Hang on.." % self.repopath)
+ # TODO: figure out the repo type (Yum, etc) and use the right class.
+ # That way we can support other distros. Yay!
+ self.repos = []
+ for rp in self.repopath.split(":"):
+ r = SimpleYumRepo(path=rp, cachedir=self.cachedir)
+ self.repos.append(r)
+ self.log(" cachedir=%s, repopath=%s" % (r.cachedir, r.path))
+
+ def _splitpath(self, path):
+ '''Split an absolute path into (packageuid, path)'''
+ path = path.lstrip('/')
+ p = path.split('/',1)
+ if len(p) == 1:
+ p.append('')
+ p[1] = '/' + p[1]
+ return p
+
+ def readdir(self, path, offset):
+ self.log("readdir('%s', %s)" % (path, str(offset)))
+ for repo in self.repos:
+ if path == "/":
+ for uid in repo.package_uids():
+ d = fuse.Direntry(str(uid))
+ d.type = S_IFDIR
+ yield d
+ else:
+ (packageuid, path) = self._splitpath(path)
+ for (f,t) in repo.files_for_package(packageuid):
+ (dir, basename) = os.path.split(f)
+ if dir == path and basename:
+ d = fuse.Direntry(str(basename))
+ if t == 'd':
+ d.type = S_IFDIR
+ else:
+ d.type = S_IFREG
+ yield d
+
+ def getattr(self, path):
+ self.log("getattr('%s')" % path)
+ if (path == '/'):
+ return DirStat()
+ (packageuid, path) = self._splitpath(path)
+ for repo in self.repos:
+ for (f,t) in repo.files_for_package(packageuid):
+ if f == path:
+ if t == 'f':
+ return FileStat()
+ elif t == 'd':
+ return DirStat()
+ #raise OSError(errno.ENOENT, "No such file or directory")
+ # SourceForge FUSE Python reference says to use this instead:
+ return -errno.ENOENT
+
+ def statfs(self):
+ #self.log("statfs()")
+ local_s = os.statvfs(self.cachedir)
+ #s = fuse.StatVFS()
+ # FIXME modify s using info from local_s
+ return local_s
+
+ class RepofsFile(object):
+ def __init__(self, path, flags, *mode):
+
+ # TODO: fgetattr flush fsdestroy fsync fsyncdir
+ # getxattr listxattr lock read utime utimens
+ # NOTE open, opendir, release, releasedir: unused/unneeded.
+ # NOTE bmap, readlink: not implemented (doesn't make sense)
+
+# def access(self, path, mode):
+# self.log("access('%s',%s)" % (path, oct(mode)))
+# s = self.getattr(path) # Will raise an exception if ENOENT
+# if mode & os.W_OK:
+# self.__rofs() # Raises EROFS
+# if S_ISREG(s.st_mode) and mode & os.X_OK:
+# raise OSError(errno.EACCES, "Permission denied")
+#
+# # FIXME: use file objects instead?
+#
+# def open(self, path, flags):
+# self.log("open(%s,%s)" % (path,flags))
+# return open(self._cachefile(path),flag2mode(flags))
+#
+# def read(self, path, length, offset, fh=None):
+# self.log("read(%s,%s,%s)" % (path,length,offset))
+# fh.seek(offset)
+# return fh.read(length)
+#
+# def release(self, path, fh=None):
+# self.log("release(%s)" % path)
+# fh.close()
+#
+# def fgetattr(self, path, fh=None):
+# self.log("fgetattr(%s)" % path)
+# return os.fstat(fh.fileno())
+
+ def main(self, *a, **kw):
+ return Fuse.main(self, *a, **kw)
+
+def main():
+ usage = 'Repofs: mount a package repo and export all the files in the packages.\n\n' + Fuse.fusage
+ server = Repofs(version="%prog " + fuse.__version__, usage=usage,
+ dash_s_do='setsingle')
+ server.parser.add_option(mountopt="repo", metavar="PATH", dest="repopath",
+ help="Package repo to mount")
+ server.parser.add_option(mountopt="cachedir", metavar="PATH",
+ help="Cache dir for expanded packages")
+ server.parse(values=server, errex=1)
+ server.main()
+
+if __name__ == '__main__':
+ main()
+
+def will_test_setup():
+ r = Repofs()
+ r.repopath="/tmp/test-repofs/repo"
+ r.cachedir="/tmp/test-repofs/cache"
+ r.fsinit()
+ return r