summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorColin Walters <walters@verbum.org>2010-10-02 15:39:18 -0400
committerColin Walters <walters@verbum.org>2010-10-02 15:39:18 -0400
commit77ad7371c5906b83db49de309ba39d867b673975 (patch)
treebe15ba7c0cd193de3542d42cc7e7594c7bbff995
downloadrpmci-77ad7371c5906b83db49de309ba39d867b673975.tar.gz
rpmci-77ad7371c5906b83db49de309ba39d867b673975.tar.xz
rpmci-77ad7371c5906b83db49de309ba39d867b673975.zip
Initial untested stab at vcs mirroring component, with DESIGN plan
-rw-r--r--DESIGN15
-rw-r--r--async_subprocess.py81
-rw-r--r--lame_vcs_abstraction.py134
-rw-r--r--msgqueue.py87
-rw-r--r--rpmci-vcs-mirror165
-rw-r--r--sample.config12
-rw-r--r--spec.py264
7 files changed, 758 insertions, 0 deletions
diff --git a/DESIGN b/DESIGN
new file mode 100644
index 0000000..a6bebfa
--- /dev/null
+++ b/DESIGN
@@ -0,0 +1,15 @@
+rpmci-generate-config:
+ takes master build configuration, generates config files for individual components
+
+rpmci-vcs-mirror:
+ mirror git repositories into local dirs
+ send update message notifications
+
+rpmci-srpm-builder:
+ consumes updated git repositories
+ makes SRPMs for configured release sets
+
+rpmci-builder:
+ takes updated srpms
+ calls mock-many-srpms for configured releases
+
diff --git a/async_subprocess.py b/async_subprocess.py
new file mode 100644
index 0000000..53abaa1
--- /dev/null
+++ b/async_subprocess.py
@@ -0,0 +1,81 @@
+# async_subprocess.py:
+# Run a subprocess asynchronously using GLib
+#
+# Licensed under the new-BSD license (http://www.opensource.org/licenses/bsd-license.php)
+# Copyright (C) 2010 Red Hat, Inc.
+# Written by Colin Walters <walters@verbum.org>
+
+import glib
+import logging
+
+PIPE_STDOUT = 'pipe-stdout'
+
+def spawn_async_log_info(argv, cwd=None, stdout=None):
+ logging.info("Starting subprocess: %r" % (argv, ))
+ return AsyncProcess(argv, cwd=cwd, stdout=stdout)
+
+def spawn_async_output_to_file(argv, output_filepath, on_exited, cwd=None):
+ f = open(output_filepath, 'w')
+ process = AsyncProcess(argv, cwd=cwd, stdout=PIPE_STDOUT)
+ def _write_to_file(process, buf):
+ f.write(buf)
+ def _on_exited(process):
+ f.close()
+ on_exited()
+ process.set_callbacks(_write_to_file, _on_exited)
+ return process
+
+class AsyncProcess(object):
+ def __init__(self, argv, cwd=None, stdout=None):
+ self.argv = argv
+ if stdout == PIPE_STDOUT:
+ (pid, stdin, stdout, stderr) = glib.spawn_async(argv,
+ working_directory=cwd,
+ flags=(glib.SPAWN_SEARCH_PATH
+ | glib.SPAWN_DO_NOT_REAP_CHILD),
+ standard_output=True,
+ standard_error=None)
+ assert stdout == stderr
+ else:
+ (pid, stdin, stdout, stderr) = glib.spawn_async(argv,
+ working_directory=cwd,
+ (glib.SPAWN_SEARCH_PATH
+ | glib.SPAWN_DO_NOT_REAP_CHILD
+ | glib.SPAWN_STDOUT_TO_DEV_NULL
+ | glib.SPAWN_STDERR_TO_DEV_NULL))
+ assert stdout is None
+ assert stderr is None
+
+ assert stdin is None
+ self.pid = pid
+ self._child_watch_id = glib.child_watch_add(pid, self._on_child_exited)
+ self._stdout_fd = stdout
+
+ def set_callbacks(self, output_callback, exited_callback):
+ self._output_callback = output_callback
+ self._exited_callback = exited_callback
+ if (self.stdout and self._stdout_watch_id == 0):
+ glib.io_add_watch(self.stdout, glib.IO_IN | glib.IO_ERR | glib.IO_HUP,
+ self._on_io)
+
+ def _on_child_exited(self, pid, condition):
+ logging.info("Child pid %d exited with code %r" % (pid, condition))
+ self._exited_callback(self, condition)
+
+ def _on_io(self, source, condition):
+ have_read = condition & gobject.IO_IN
+ if have_read:
+ buf = os.read(source, 8192)
+ self._output_callback(self, buf)
+ if ((condition & glib.IO_ERR) > 0
+ or (condition & glib.IO_HUP) > 0):
+ os.close(source)
+ self._stdout_watch_id = 0
+ return False
+ return have_read
+
+
+
+
+
+
diff --git a/lame_vcs_abstraction.py b/lame_vcs_abstraction.py
new file mode 100644
index 0000000..67a3c1d
--- /dev/null
+++ b/lame_vcs_abstraction.py
@@ -0,0 +1,134 @@
+#!/usr/bin/python
+# lame_vcs_abstraction.py:
+#
+# Licensed under the new-BSD license (http://www.opensource.org/licenses/bsd-license.php)
+# Copyright (C) 2010 Red Hat, Inc.
+# Written by Colin Walters <walters@verbum.org>
+
+# Feel free to replace the bits here with something better...
+
+import os
+import sys
+import re
+import urlparse
+import getopt
+import subprocess
+import shutil
+import hashlib
+import logger
+
+from . import async_subprocess
+
+class Vcs(object):
+ def __init__(self, parsedurl):
+ self._parsed_url = parsedurl
+ # Deliberately drop params/query
+ self._nonfragment_url_string = urlparse.urlunparse((parsedurl.scheme,
+ parsedurl.netloc,
+ parsedurl.path,
+ '', '', ''))
+ self._branch = self._parsed_url.fragment
+
+ self._dir = None
+
+ def get_url(self):
+ return self._parsed_url
+
+ def checkout_async(self, destdir):
+ """Retrieve a new copy of the source tree, saving as destdir"""
+ raise Exception("not implemented")
+
+ def update(self):
+ """Update directory from the latest upstream"""
+ raise Exception("not implemented")
+
+ def export_archive(self, prefix, target_filename):
+ """Export a tarball with minimal (or no) version control data."""
+ raise Exception("not implemented")
+
+ def get_scheme(self):
+ return self._parsed_url.scheme
+
+ def get_id(self):
+ raise Exception("not implemented")
+
+ def get_abbreviated_id(self):
+ raise Exception("not implemented")
+
+ def _vcs_exec_sync_log_error(self, args):
+ logger.info("Synchronously executing: %r" % (args, ))
+ return subprocess.check_output(args, stderr=subprocess.STDOUT, close_fds=True, cwd=self._dir)
+
+ def _vcs_exec_async(self, args, logfile_path, on_exited):
+ logger.info("Asynchronously executing: %r" % (args, ))
+ return async_subprocess.spawn_async_output_to_file(args, logfile_path, on_exited, cwd=self._dir)
+
+ @classmethod
+ def new_from_spec(cls, spec):
+ """See http://maven.apache.org/scm/scm-url-format.html ; we use this format,
+ but without the "scm:" prefix."""
+ # Hack for backwards compatibility
+ if spec.startswith('git://'):
+ (vcstype, url) = ('git', spec)
+ else:
+ (vcstype, url) = spec.split(':', 1)
+ orig = urlparse.urlsplit(url)
+ # We want to support fragments, even if the URL type isn't recognized. So change the
+ # scheme to http temporarily.
+ temp = urlparse.urlunsplit(('http', orig.netloc, orig.path, orig.query, orig.fragment))
+ new = urlparse.urlsplit(temp)
+ combined = urlparse.SplitResult(orig.scheme, new.netloc, new.path, new.query, new.fragment)
+ if vcstype == 'git':
+ return GitVcs(combined)
+
+class GitVcs(Vcs):
+ vcstype = "git"
+
+ def checkout_async(self, destdir, logfile, on_exited):
+ assert self._dir is None
+ args = ['git', 'clone']
+ if self._branch:
+ args.extend(['-b', self._branch])
+ args.extend([self._nonfragment_url_string, destdir])
+ return self._vcs_exec_async(args, logfile, on_exited)
+
+ def update_async(self, logfile, on_exited):
+ assert self._dir is not None
+ return self._vcs_exec_async(['git', 'pull', '-r'], logfile, on_exited)
+
+ def export_archive(self, prefix, target_filename, logfile):
+ if not prefix.endswith('/'):
+ prefix += '/'
+ args = ['git', 'archive', '--format=tar', '--prefix=%s' % (prefix,), 'HEAD']
+ logger.info("Synchronously executing: %r" % (args, ))
+ log_f = open(logfile, 'w')
+ gitproc = subprocess.Popen(args, cwd=src_directory, stdout=subprocess.PIPE, stderr=log_f)
+ if target_filename.endswith('.bz2'):
+ zipbin = 'bzip2'
+ elif target_filename.endswith('.gz'):
+ zipbin = 'gzip'
+ else:
+ raise ValueError("Unknown compression for filename %r" % (target_filename,))
+ args = [zipbin, '-c']
+ logger.info("Synchronously executing: %r" % (args, ))
+ f = open(target_filename, 'w')
+ zipproc = subprocess.Popen(args, cwd=src_directory, stdout=f, stdin=gitproc.stdout, stderr=log_f)
+ zipproc.wait()
+
+ def get_commit_as_patch(self, commitid, destfile):
+ output = self._vcs_exec_sync_log_error(['git', 'format-patch', '--stdout', commitid + '^..' + commitid])
+ f = open(destfile, 'w')
+ f.write(output)
+ f.close()
+
+ def get_id(self):
+ output = self._vcs_exec_sync_log_error(['git', 'show', '--format=%H'])
+ return output.split('\n')[0]
+
+ def get_abbreviated_id(self):
+ full_id = self.get_id()
+ return full_id[0:8]
+
+ def get_commit_summary_as_filename(self, commitid):
+ output = self._vcs_exec_sync_log_error(['git', 'show', '--format=%f', commitid])
+ return output.split('\n')[0]
diff --git a/msgqueue.py b/msgqueue.py
new file mode 100644
index 0000000..737740f
--- /dev/null
+++ b/msgqueue.py
@@ -0,0 +1,87 @@
+# msgqueue.py: Persistent message queue bassed on files
+#
+# Licensed under the new-BSD license (http://www.opensource.org/licenses/bsd-license.php)
+# Copyright (C) 2010 Red Hat, Inc.
+# Written by Colin Walters <walters@verbum.org>
+
+import gio
+import hashlib
+import json
+
+class Message(object):
+ def __init__(self, ident, headers, payload):
+ self.ident = ident
+ self.headers = headers
+ self.payload = payload
+
+ def serialize(self):
+ return json.dumps({'headers': self.headers,
+ 'payload': self.payload})
+
+ def __cmp__(self, other):
+ return cmp(self.ident, other.ident)
+
+ @classmethod
+ def parse(cls, md5sum, stream):
+ contents = json.load(stream)
+ if 'headers' in contents:
+ headers = contents['headers']
+ else:
+ headers = {}
+ if 'payload' in contents:
+ payload = contents['payload']
+ else:
+ raise ValueError("Missing 'payload' in message %r" % (string, ))
+ return cls(md5sum, headers, payload)
+
+class MessageQueue(object):
+ def __init__(self, dirpath):
+ self._dirpath = dirpath
+ self._dir_gfile = gio.File(path=dirpath)
+ self._monitor = self._dir_gfile.monitor(gio.FILE_MONITOR_NONE)
+ self._monitor.connect('changed', self._on_dir_changed)
+ self._subscribers = []
+ self._consumed = []
+
+ def connect(self, callback):
+ self._subscribers.append(callback)
+
+ def consume(self, message):
+ self._consumed.append(message)
+
+ def append(self, message):
+ serialized = message.serialize()
+ digest = hashlib.md5()
+ digest.update(serialized)
+ hexdigest = digest.hexdigest()
+ filename = os.path.join(self._dirpath, hexdigest)
+ temp_filename = os.path.join(self._dirpath, '_' + hexdigest)
+ f = open(temp_filename, 'w')
+ f.write(serialized)
+ f.close()
+ os.rename(temp_filename, filename)
+
+ def _on_dir_changed(self, mon, gfile, other, event):
+ dir_contents = os.listdir(self._dirpath)
+ messages = set()
+ for filename in dir_contents:
+ if filename.startswith('_'):
+ continue
+ file_path = os.path.join(self._dirpath, filename)
+ if not os.path.isfile(file_path):
+ continue
+ f = open(file_path)
+ message = Message.parse(filename, f)
+ f.close()
+ messages.add(message)
+
+ self._consumed = []
+ for subscriber in self._subscribers:
+ subscriber(iter(messages))
+ for msg in self._consumed:
+ messages.remove(msg)
+ self._consumed = []
+
+
+
+
diff --git a/rpmci-vcs-mirror b/rpmci-vcs-mirror
new file mode 100644
index 0000000..af114ac
--- /dev/null
+++ b/rpmci-vcs-mirror
@@ -0,0 +1,165 @@
+#!/usr/bin/python
+
+# rpmci-vcs-mirror:
+# Poll set of VCS URLs, caching local repositories,
+#
+# Licensed under the new-BSD license (http://www.opensource.org/licenses/bsd-license.php)
+# Copyright (C) 2010 Red Hat, Inc.
+# Written by Colin Walters <walters@verbum.org>
+
+import os
+import sys
+import time
+import shutil
+import optparse
+from ConfigParser import SafeConfigParser
+import logging
+
+import glib
+import gobject
+import gio
+
+from . import msgqueue
+from . import lame_vcs_abstraction
+
+class VCSMirror(object):
+ def __init__(self, config, urls):
+ self.config = config
+ self.urls = urls
+
+ self._dir = config.get('VCS', 'mirrordir')
+ self._process_dir = config.get('DEFAULT', 'process_logdir')
+ self._timeout_seconds = config.get('VCS', 'poll_seconds')
+ self._max_host_concurrency = config.get('VCS', 'max_host_concurrency')
+ self._max_concurrency = config.get('VCS', 'max_concurrency')
+
+ self._vcslist = []
+ for url in urls:
+ self._vcslist.append(lame_vcs_abstraction.Vcs.new_from_spec(url))
+
+ self._vcs_by_host = {}
+ for vcs in self._vcslist:
+ host = self._host_for_vcs(vcs)
+ if not host in self._vcs_by_host:
+ self._vcs_by_host[host] = []
+ self._vcs_by_host[host].append(vcs)
+
+ self._vcs_queue = []
+ self._jobs_by_host = {} # string host -> Job
+
+ def _host_for_vcs(self, vcs):
+ return vcs.get_url().netloc
+
+ def _escape_vcs_url(self, vcs):
+ return urllib.quote(vcs.get_url(), '')
+
+ def _cachedir_for_vcs(self, vcs):
+ return os.path.join(self._dir, self._escape_vcs_url(vcs))
+
+ def start(self):
+ logging.info("Starting poll of %d repositories on %d unique hosts"
+ % (len(self._vcslist), len(self._vcs_by_host.iterkeys())))
+
+ # Schedule jobs for now
+ for vcs in self._vcslist:
+ self._vcs_queue.append((vcs, 0))
+ for host in self._vcs_by_host:
+ self._jobs_by_host[host] = []
+ self._poll()
+
+ def _on_vcs_job_exited(self, vcs, process, condition):
+ text = "VCS %r task %r exited with code %d" % (vcs, process, condition)
+ if condition != 0:
+ logging.warning(text)
+ else:
+ logging.info(text)
+ host = self._host_for_vcs(vcs)
+ jobs = self._jobs_by_host[host]
+ index = -1
+ for i, (process, _vcs) in enumerate(jobs):
+ if _vcs == vcs:
+ index = i
+ break
+ assert index >= 0
+ del jobs[i]
+
+ self._poll()
+
+ target_time = int(time.time() + self._timeout_seconds)
+ self._vcs_queue.append((vcs, target_time))
+
+ def _num_active_jobs(self):
+ active = 0
+ for host in self._jobs_by_host:
+ active += len(self._jobs_by_host[host])
+ return active
+
+ def _poll(self):
+ current_time = int(time.time())
+ timeout = self._timeout_seconds
+ timeout_id = 0
+
+ logging.info("Doing poll (%d active tasks)" % (self._num_active_jobs(), ))
+ for (vcs, target_time) in self._vcs_queue:
+ time_delta = target_time - current_time:
+ if time_delta > 0 and time_delta < timeout:
+ timeout = time_delta
+ host = self._host_for_vcs(vcs)
+ if len(self._jobs_by_host[host]) >= self._max_host_concurrency:
+ logging.debug("Skipping job %r; at concurrency limit %d"
+ % (vcs, self._max_host_concurrency))
+ continue
+
+ vcsdir = self._cachedir_for_vcs(vcs)
+ job_logname = '%s-update-%s-%d.log' % (vcs.vcstype, self._escape_vcs_url(vcs),
+ int(time.time()), )
+ job_logpath = os.path.join(self._process_dir, job_logname)
+ def _on_job_exited(process, condition):
+ self._on_vcs_job_exited(vcs, process, condition)
+ if not os.path.isdir(vcsdir):
+ previous_id = None
+ logging.info("Doing initial checkout for %r" % (vcs.get_url(), ))
+ process = vcs.checkout_async(vcsdir, job_logpath, _on_job_exited)
+ else:
+ previous_id = vcs.get_id()
+ logging.info("Doing update for %r (starting from commit id %s)" % (vcs.get_url(), previous_id))
+ process = vcs.update_async(job_logpath, _on_job_exited)
+ self._jobs_by_host[host].append((process, vcs, previous_id))
+ glib.timeout_add_seconds(timeout, self._poll)
+ return False
+
+
+def main():
+ opts = optparse.OptionParser("usage: %prog [options]")
+ opts.add_option('-c', '--config', dest='config', help="Path to configuration file")
+ opts.add_option('', '--debug', dest='debug', help="Print verbose debugging")
+
+ (options, args) = opts.parse_args()
+
+ if options.config is None:
+ print "Must specify --config"
+ sys.exit(1)
+
+ config = SafeConfigParser.read(options.config)
+ level = logging.DEBUG if options.debug else logging.INFO
+ logging.basicConfig(stream=sys.stderr, level=level)
+
+ mirrordir = config.get('VCS', 'mirror_dir')
+
+ vcslist = os.path.join(mirrordir, 'vcs.txt')
+
+ if not os.path.isfile(vcslist):
+ print "Missing mirror configuration file %r" % (vcslist, )
+ sys.exit(1)
+
+ f = open(options.vcs_url_list)
+ urls = f.readlines()
+ f.close()
+
+ mirror = VCSMirror(config, urls)
+ mirror.start()
+
+ glib.main()
+
+if __name__ == '__main__':
+ main()
diff --git a/sample.config b/sample.config
new file mode 100644
index 0000000..f7c977f
--- /dev/null
+++ b/sample.config
@@ -0,0 +1,12 @@
+[DEFAULT]
+basedir=%(home)/rpmci
+process_logdir=%(basedir)/process-logs
+
+[VCS]
+mirrordir=%(basedir)/vcs-mirror
+# Minimum seconds between poll requests for a respository
+poll_seconds=60
+# Max connections to a given host like "git.gnome.org"
+max_host_concurrency=4
+# Hard limit on maximum concurrent operations
+max_concurrency=16
diff --git a/spec.py b/spec.py
new file mode 100644
index 0000000..40ef6af
--- /dev/null
+++ b/spec.py
@@ -0,0 +1,264 @@
+#!/usr/bin/python
+# spec.py: Read and write RPM .spec files
+#
+# Licensed under the new-BSD license (http://www.opensource.org/licenses/bsd-license.php)
+# Copyright (C) 2010 Red Hat, Inc.
+# Written by Colin Walters <walters@verbum.org>
+
+import os
+import sys
+import re
+import urlparse
+import getopt
+import subprocess
+import shutil
+import hashlib
+
+class Spec(object):
+ # These two constants were cribbed from rpm-spec-mode.el.
+ SECTIONS = ('%preamble', '%description', '%prep', '%setup',
+ '%build', '%install', '%check', '%clean',
+ '%changelog', '%files')
+ SCRIPTS = ('%pre', '%post', '%preun', '%postun',
+ '%trigger', '%triggerin', '%treiggerprein',
+ '%triggerun', '%triggerpostun', '%pretrans',
+ '%posttrans')
+ def __init__(self, filename):
+ self._filename = filename
+ f = open(filename)
+ self._lines = self._read_lines_joining_backslash(f)
+ f.close()
+ self._saved = False
+ self._append_buildrequires = []
+ self._new_release = None
+ self._source_dirname = None
+ self._source_archivename = None
+ self._substitutions = []
+ # Map from section name (e.g. '%build') -> (list of functions)
+ self._section_filters = {}
+ self._added_patches = []
+
+ def _read_lines_joining_backslash(self, f):
+ lines = f.readlines()
+ concat_line = None
+ out_lines = []
+ for line in lines:
+ if line.endswith('\\\n'):
+ if concat_line is None:
+ concat_line = line[:-2]
+ else:
+ concat_line += line[:-2]
+ elif concat_line:
+ out_lines.append(concat_line + line)
+ concat_line = None
+ else:
+ out_lines.append(line)
+ if concat_line:
+ out_lines.append(concat_line + '\n')
+ return out_lines
+
+ def get_name(self):
+ return self._filename[:-5]
+
+ def add_buildrequires(self, new_buildrequires):
+ assert not self._saved
+ current_buildrequires = self.get_key_allvalues('BuildRequires')
+ new_buildrequires = filter(lambda x: x not in current_buildrequires, new_buildrequires)
+ self._append_buildrequires = new_buildrequires
+
+ def increment_release_snapshot(self, identifier):
+ assert not self._saved
+ cur_release = self.get_key('Release')
+ release_has_dist = cur_release.endswith('%{?dist}')
+ if release_has_dist:
+ cur_release = cur_release[:-8]
+ snapshot_release_re = re.compile(r'^([0-9]+)\.([0-9]+)\.')
+ numeric_re = re.compile(r'^([0-9]+)$')
+ match = snapshot_release_re.match(cur_release)
+ if match:
+ firstint = int(match.group(1))
+ relint = int(match.group(2)) + 1
+ new_release = '%d.%d.%s' % (firstint, relint, identifier)
+ else:
+ match = numeric_re.match(cur_release)
+ if not match:
+ raise ValueError("Can't handle Release value: %r" % (cur_release, ))
+ new_release = '%s.0.%s' % (cur_release, identifier)
+ if release_has_dist:
+ new_release += '%{?dist}'
+
+ self._new_release = new_release
+
+ def set_source(self, dirname, archivename):
+ assert not self._saved
+ self._source_dirname = dirname
+ self._source_archivename = archivename
+
+ def add_section_filter(self, name, function):
+ if not (name in self.SECTIONS or name in self.SCRIPTS):
+ raise KeyError("Invalid section name %r" % (name, ))
+ if name not in self._section_filters:
+ self._section_filters[name] = []
+ self._section_filters[name].append(function)
+
+ def _line_is_section(self, line):
+ for section in self.SECTIONS:
+ if line.startswith(section):
+ return True
+ for section in self.SCRIPTS:
+ if line.startswith(section):
+ return True
+ return False
+
+ def _get_range_for_section(self, name):
+ if not (name in self.SECTIONS or name in self.SCRIPTS):
+ raise KeyError("Invalid section name %r" % (name, ))
+ section_start = -1
+ section_end = -1
+ for i, line in enumerate(self._lines):
+ if section_start == -1 and line.startswith(name):
+ section_start = i
+ elif section_start >= 0:
+ if self._line_is_section(line):
+ section_end = i
+ break
+ if section_start >= 0:
+ section_end = len(self._lines) - 1
+ return (section_start, section_end)
+
+ def replace_key_line(self, key, new_value, line):
+ """Takes a line of the form "Release: 42 # foo" and replaces
+the 42 with new_value, preserving the comment # foo."""
+ comment = line.rfind('#')
+ if comment >= 0:
+ return '%s: %s %s\n' % (key, new_value, line[comment:])
+ else:
+ return '%s: %s\n' % (key, new_value)
+
+ def add_patch(self, filename):
+ patches = self.get_patches()
+ if len(patches) == 0:
+ patchnum = 0
+ else:
+ patchnums = map(lambda a: a[0], patches)
+ patchnum = max(patchnums)
+ self._added_patches.append(filename)
+
+ def save(self):
+ self._saved = True
+ tmpname = self._filename + '.tmp'
+ self.save_as(tmpname)
+ os.rename(tmpname, self._filename)
+
+ def save_as(self, new_filename):
+ wrote_buildrequires = False
+ output = open(new_filename, 'w')
+
+ apply_patchmeta_at_line = -1
+ apply_patch_apply_at_line = -1
+ source_re = re.compile(r'^Source([0-9]*):')
+ patch_re = re.compile(r'^Patch([0-9]+):')
+ apply_re = re.compile(r'^%patch')
+ highest_patchnum = -1
+
+ output_lines = self._lines
+
+ for i,line in enumerate(output_lines):
+ match = patch_re.search(line)
+ if match:
+ apply_patchmeta_at_line = i
+ highest_patchnum = int(match.group(1))
+ continue
+ match = source_re.search(line)
+ if match:
+ apply_patchmeta_at_line = i
+ if highest_patchnum == -1:
+ highest_patchnum = 0
+ continue
+ if line.startswith('%setup'):
+ apply_patch_apply_at_line = i + 1
+ continue
+ match = apply_re.search(line)
+ if match:
+ apply_patch_apply_at_line = i + 1
+ continue
+ if apply_patchmeta_at_line == -1:
+ print "Error: Couldn't determine where to add Patch:"
+ sys.exit(1)
+ if apply_patch_apply_at_line == -1:
+ print "Error: Couldn't determine where to add %patch"
+ sys.exit(1)
+
+ for section,filters in self._section_filters.iteritems():
+ (start, end) = self._get_range_for_section(section)
+ for i,line in enumerate(output_lines[start:end]):
+ for f in filters:
+ result = f(line)
+ if result is not None:
+ output_lines[start+i] = line = result
+
+ for i,line in enumerate(output_lines):
+ if i == apply_patchmeta_at_line:
+ for pnum,patch in enumerate(self._added_patches):
+ output.write('Patch%d: %s\n' % (highest_patchnum + pnum + 1, patch))
+ elif i == apply_patch_apply_at_line:
+ for pnum,patch in enumerate(self._added_patches):
+ output.write('%%patch%d -p1\n' % (highest_patchnum + pnum + 1, ))
+
+ if line.startswith('%setup') and self._source_dirname: # This is dumb, need to automate this in RPM
+ output.write('%%setup -q -n %s\n' % self._source_dirname)
+ elif ':' in line:
+ key, value = line.split(':', 1)
+ if key == 'Release' and self._new_release:
+ output.write(self.replace_key_line(key, self._new_release, line))
+ elif (line.startswith('Source0:') or line.startswith('Source:')) and self._source_archivename:
+ output.write(self.replace_key_line(key, self._source_archivename, line))
+ elif key == 'BuildRequires' and not wrote_buildrequires:
+ output.write(line)
+ for req in self._append_buildrequires:
+ output.write('BuildRequires: %s\n' % req)
+ wrote_buildrequires = True
+ else:
+ output.write(line)
+ else:
+ output.write(line)
+
+ output.close()
+
+ def get_patches(self):
+ patchre = re.compile(r'^Patch([0-9]+):')
+ patches = []
+ for line in self._lines:
+ match = patchre.search(line)
+ if not match:
+ continue
+ patches.append((int(match.group(1)), line.split(':', 1)[1].strip()))
+ return patches
+
+ def get_version(self):
+ return self.get_key('Version')
+
+ def get_vcs(self):
+ for line in self._lines:
+ if line.startswith('#VCS:'):
+ return line[5:].strip()
+ raise ValueError("No such key #VCS in file %r" % (self._filename, ))
+
+ def get_key(self, key):
+ key = key + ':'
+ for line in self._lines:
+ if line.startswith(key):
+ return line[len(key):].strip()
+ raise ValueError("No such key %r in file %r" % (key, self._filename))
+
+ def get_key_allvalues(self, key):
+ key = key + ':'
+ result = []
+ for line in self._lines:
+ if line.startswith(key):
+ result.append(line[len(key):].strip())
+ return result
+
+ def __str__(self):
+ return self._filename
+