#!/usr/bin/python # metabuild: Generic build system wrapper # Copyright 2010 Colin Walters # Licensed under the new-BSD license (http://www.opensource.org/licenses/bsd-license.php) # metabuild currently just wraps autotools (configure+make). # To use it, you must first use the "inroot" tool to enter an alternative # buildroot. # # $ inroot /path/to/buildroot bash # # Next, just type: # $ metabuild # This will: # 1) Run ./configure if necessary # 2) Run make # # The build output is automatically logged to $TMPDIR/build-$(PWD).log. # For example, invoking metabuild in a directory named "foo" will log # to /tmp/build-foo.log # # You can pass arguments to metabuild; if they start with '--', they're # given to configure. Otherwise, they're passed to make. # # $ metabuild --enable-libfoo # passed to configure # $ metabuild -j 1 # passed to make import os,sys,subprocess,tempfile from multiprocessing import cpu_count import glib,gio if 'INROOT_DIR' not in os.environ: print "INROOT_DIR not set; run under inroot" sys.exit(1) root = os.environ['INROOT_DIR'] if os.path.isdir('/lib64'): libdir=os.path.join(root, 'lib64') else: libdir=os.path.join(root, 'lib') # "Constants" (well, some are derived from the environment) subprocess_nice_args = ['nice', 'ionice', '-c', '3', '-t'] default_make_parallel = ['-j', '%d' % (cpu_count() * 6, )] user_specified_jobs = False configargs = ['--prefix=' + root, '--libdir=' + libdir] makeargs = ['make'] for arg in sys.argv[1:]: if arg.startswith('--'): configargs.append(arg) else: if arg == '-j': user_specified_jobs = True makeargs.append(arg) if not user_specified_jobs: makeargs.extend(default_make_parallel) loop = glib.MainLoop() class Tail(object): def __init__(self, filename, output): self.filename = filename self.output = output self._gfile = gio.File(path=filename) self._mon = self._gfile.monitor(gio.FILE_MONITOR_NONE) self._instream = self._gfile.read() self._read_queued = False self._quit_data = None self._mon.connect('changed', self._on_changed) def _do_read(self): if self._read_queued: return self._read_queued = True self._instream.read_async(8192, self._on_read) def _on_read(self, src, result): self._read_queued = False buf = src.read_finish(result) if buf != '': self.output.write(buf) self._do_read() elif self._quit_data: self._quit_data[0].quit() self._quit_data[1]() def _on_changed(self, mon, gfile, other, event): self._do_read() def start(self): self._do_read() def finish(self, loop, callback): self._quit_data = (loop, callback) self._do_read() tempdir = os.environ.get('TMPDIR', '/tmp') logfile_path = os.path.join(tempdir, 'build-%s.log' % (os.path.basename(os.getcwd()), )) try: os.unlink(logfile_path) except OSError, e: pass logfile_write_fd = os.open(logfile_path, os.O_WRONLY | os.O_CREAT | os.O_EXCL) logfile_f = os.fdopen(logfile_write_fd, "w") sys.stdout.write('metabuild: logging to %r\n' % (logfile_path, )) sys.stdout.flush() loop = glib.MainLoop() tail = Tail(logfile_path, sys.stdout) tail.start() def log(msg): fullmsg = 'metabuild: ' + msg + '\n' logfile_f.write(fullmsg) logfile_f.flush() def global_failure_handler(): tail.finish(loop, lambda: sys.exit(1)) class BuildProcess(object): def __init__(self, args, cwd=None, nice=True): if nice: self.args = list(subprocess_nice_args) self.args.extend(args) else: self.args = args self.pid = None self.next_callback = None def _child_setup(self, *args): nullfd = os.open('/dev/null', os.O_RDONLY) os.dup2(nullfd, 0) os.close(nullfd) os.dup2(logfile_write_fd, 1) os.dup2(logfile_write_fd, 2) def _exit_callback(self, pid, condition): log("pid %d exited with condition %d" % (pid, condition)) if condition != 0: global_failure_handler() else: glib.idle_add(self.next_callback) def run_async(self, exit_callback): log("Running: %r" % (self.args, )) (pid, stdin_fd, stdout_fd, stderr_fd) = \ glib.spawn_async(self.args, flags=(glib.SPAWN_DO_NOT_REAP_CHILD | glib.SPAWN_SEARCH_PATH), child_setup=self._child_setup) self.pid = pid self.next_callback = exit_callback glib.child_watch_add(pid, self._exit_callback) have_configure=(os.path.exists('configure.ac') or os.path.exists('configure.in')) def phase_bootstrap(): if have_configure and not os.path.exists('configure'): if os.path.exists('autogen.sh'): log("Detected GNOME-style autogen.sh, using it") args = ['./autogen.sh'] args.extend(configargs) autogen = BuildProcess(args) autogen.run_async(phase_configure) else: log("No autogen.sh, trying autoreconf") autogen = BuildProcess(['autoreconf', '-f', '-i']) autogen.run_async(phase_configure) else: phase_configure() def phase_configure(): prefix_matches=True if have_configure and os.path.exists('config.log'): previous_prefix = None f = open('config.log') for line in f: if line.startswith('prefix=\''): previous_prefix = line[8:-2] break f.close() if previous_prefix != root: log("Reruning configure due to prefix change (%r -> %r)" % (root, previous_prefix)) prefix_matches=False if have_configure and (not os.path.exists('Makefile') or not prefix_matches): log("Detected configure script, using it") args = ['./configure'] args.extend(configargs) configure = BuildProcess(args) configure.run_async(phase_build) else: phase_build() build_status = False def phase_build(): if os.path.exists('Makefile'): log("Detected Makefile, using it") make = BuildProcess(makeargs) make.run_async(phase_complete) else: log("Couldn't find supported build system") log("Known systems:") log(" Makefile: make") def phase_complete(): log("Complete!") tail.finish(loop, lambda: sys.exit(0)) # Start off the process phase_bootstrap() loop.run()