summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Makefile.am2
-rw-r--r--abrt.spec.in46
-rw-r--r--configure.ac3
-rw-r--r--doc/Makefile.am1
-rw-r--r--doc/abrt-retrace-server.texi799
-rw-r--r--src/Makefile.am2
-rw-r--r--src/retrace/Makefile.am31
-rwxr-xr-xsrc/retrace/abrt-retrace-cleanup60
-rwxr-xr-xsrc/retrace/abrt-retrace-reposync118
-rw-r--r--src/retrace/backtrace.wsgi46
-rwxr-xr-xsrc/retrace/coredump2packages293
-rw-r--r--src/retrace/create.wsgi111
-rwxr-xr-xsrc/retrace/install.sh212
-rw-r--r--src/retrace/log.wsgi46
-rw-r--r--src/retrace/plugins/Makefile.am2
-rw-r--r--src/retrace/plugins/__init__.py24
-rw-r--r--src/retrace/plugins/fedora.py29
-rw-r--r--src/retrace/retrace.conf33
-rw-r--r--src/retrace/retrace.py412
-rw-r--r--src/retrace/retrace.repo23
-rw-r--r--src/retrace/retrace_httpd.conf5
-rw-r--r--src/retrace/settings.wsgi18
-rw-r--r--src/retrace/status.wsgi49
-rw-r--r--src/retrace/worker.c77
-rwxr-xr-xsrc/retrace/worker.py305
25 files changed, 2 insertions, 2745 deletions
diff --git a/Makefile.am b/Makefile.am
index 2232979f..95c748ed 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -1,5 +1,5 @@
ACLOCAL_AMFLAGS = -I m4
-SUBDIRS = src po icons tests doc
+SUBDIRS = src po icons tests
DISTCHECK_CONFIGURE_FLAGS = \
--with-systemdsystemunitdir=$$dc_install_base/$(systemdsystemunitdir)
diff --git a/abrt.spec.in b/abrt.spec.in
index ac71a12f..143e8b65 100644
--- a/abrt.spec.in
+++ b/abrt.spec.in
@@ -33,7 +33,6 @@ BuildRequires: libtar-devel
BuildRequires: intltool
BuildRequires: libtool
BuildRequires: nss-devel
-BuildRequires: texinfo
BuildRequires: asciidoc
BuildRequires: xmlto
BuildRequires: libreport-devel
@@ -206,21 +205,6 @@ Provides: bug-buddy
%description desktop
Virtual package to make easy default installation on desktop environments.
-%package retrace-server
-Summary: %{name}'s retrace server using HTTP protocol
-Group: System Environment/Daemons
-Requires: abrt-addon-ccpp
-Requires: gdb >= 7.0-3
-Requires: httpd, mod_wsgi, mod_ssl, python-webob
-Requires: mock, xz, elfutils, createrepo, rsync
-%{?el6:Requires: python-argparse}
-Requires(preun): /sbin/install-info
-Requires(post): /sbin/install-info
-
-%description retrace-server
-The retrace server provides a coredump analysis and backtrace
-generation service over a network using HTTP protocol.
-
%prep
%setup -q
@@ -246,9 +230,6 @@ install -m 755 %SOURCE3 ${RPM_BUILD_ROOT}/%{_initrddir}/abrt-oops
mkdir -p $RPM_BUILD_ROOT/var/cache/abrt-di
mkdir -p $RPM_BUILD_ROOT/var/run/abrt
mkdir -p $RPM_BUILD_ROOT/var/spool/abrt
-mkdir -p $RPM_BUILD_ROOT/var/spool/abrt-retrace
-mkdir -p $RPM_BUILD_ROOT/var/cache/abrt-retrace
-mkdir -p $RPM_BUILD_ROOT/var/log/abrt-retrace
mkdir -p $RPM_BUILD_ROOT/var/spool/abrt-upload
desktop-file-install \
@@ -302,10 +283,6 @@ if [ $1 -eq 1 ]; then
fi
#systemd: TODO
-%post retrace-server
-/sbin/install-info %{_infodir}/abrt-retrace-server %{_infodir}/dir 2> /dev/null || :
-/usr/sbin/usermod -G mock apache 2> /dev/null || :
-
%preun
if [ "$1" -eq "0" ] ; then
service abrtd stop >/dev/null 2>&1
@@ -345,11 +322,6 @@ if [ "$1" -eq "0" ] ; then
fi
%endif
-%preun retrace-server
-if [ "$1" = 0 ]; then
- /sbin/install-info --delete %{_infodir}/abrt-retrace-server %{_infodir}/dir 2> /dev/null || :
-fi
-
%postun
#systemd
%if %{?with_systemd}
@@ -535,24 +507,6 @@ gtk-update-icon-cache %{_datadir}/icons/hicolor &>/dev/null || :
%files desktop
%defattr(-,root,root,-)
-%files retrace-server
-%defattr(-,root,root,-)
-%config(noreplace) %{_sysconfdir}/%{name}/retrace.conf
-%config(noreplace) %{_sysconfdir}/httpd/conf.d/retrace_httpd.conf
-%config(noreplace) %{_sysconfdir}/yum.repos.d/retrace.repo
-%dir %attr(0775, apache, abrt) %{_localstatedir}/spool/abrt-retrace
-%dir %attr(0755, abrt, abrt) %{_localstatedir}/cache/abrt-retrace
-%dir %attr(0755, abrt, abrt) %{_localstatedir}/log/abrt-retrace
-%caps(cap_setuid=ep) %{_bindir}/abrt-retrace-worker
-%{_bindir}/abrt-retrace-cleanup
-%{_bindir}/abrt-retrace-reposync
-%{_bindir}/coredump2packages
-%{python_site}/retrace.py*
-%{_datadir}/abrt-retrace/*.py*
-%{_datadir}/abrt-retrace/plugins/*.py*
-%{_datadir}/abrt-retrace/*.wsgi
-%{_infodir}/abrt-retrace-server*
-
%changelog
* Wed Mar 16 2011 Jiri Moskovcak <jmoskovc@redhat.com> 2.0.0-1
- update to the latest upstream version
diff --git a/configure.ac b/configure.ac
index b30f017c..0c4e42dd 100644
--- a/configure.ac
+++ b/configure.ac
@@ -133,7 +133,6 @@ AC_CONFIG_HEADERS([config.h])
AC_CONFIG_FILES([
Makefile
abrt.pc
- doc/Makefile
src/include/Makefile
src/lib/Makefile
src/plugins/Makefile
@@ -145,8 +144,6 @@ AC_CONFIG_FILES([
src/applet/Makefile
src/gui-gtk/Makefile
src/cli/Makefile
- src/retrace/Makefile
- src/retrace/plugins/Makefile
po/Makefile.in
icons/Makefile
tests/btparser/Makefile
diff --git a/doc/Makefile.am b/doc/Makefile.am
deleted file mode 100644
index 4896aa1a..00000000
--- a/doc/Makefile.am
+++ /dev/null
@@ -1 +0,0 @@
-info_TEXINFOS = abrt-retrace-server.texi
diff --git a/doc/abrt-retrace-server.texi b/doc/abrt-retrace-server.texi
deleted file mode 100644
index 9eb02ca8..00000000
--- a/doc/abrt-retrace-server.texi
+++ /dev/null
@@ -1,799 +0,0 @@
-\input texinfo
-@c abrt-retrace-server.texi - Retrace Server Documentation
-@c
-@c .texi extension is recommended in GNU Automake manual
-@setfilename abrt-retrace-server.info
-@include version.texi
-
-@settitle Retrace server for ABRT @value{VERSION} Manual
-
-@dircategory Retrace server
-@direntry
-* Retrace server: (retrace-server). Remote coredump analysis via HTTP.
-@end direntry
-
-@titlepage
-@title Retrace server
-@subtitle for ABRT version @value{VERSION}, @value{UPDATED}
-@author Karel Klic (@email{kklic@@redhat.com})
-@page
-@vskip 0pt plus 1filll
-@end titlepage
-
-@contents
-
-@ifnottex
-@node Top
-@top Retrace server
-
-This manual is for retrace server for ABRT version @value{VERSION},
-@value{UPDATED}. The retrace server provides coredump analysis and
-backtrace generation service over a network using HTTP protocol.
-@end ifnottex
-
-@menu
-* Overview::
-* HTTP interface::
-* Retrace worker::
-* Task cleanup::
-* Package repository::
-* Traffic and load estimation::
-* Security::
-* Future work::
-@end menu
-
-@node Overview
-@chapter Overview
-
-Analyzing a program crash from a coredump is a difficult task. The GNU
-Debugger (GDB), that is commonly used to analyze coredumps on free
-operating systems, expects that the system analyzing the coredump is
-identical to the system where the program crashed. Software updates
-often break this assumption even on the system where the crash occured,
-making the coredump analyzable only with significant
-effort. Furthermore, older versions of software packages are often
-removed from software repositories, including the packages with
-debugging symbols, so the package with debugging symbols is often not
-available when user needs to install it for coredump analysis. Packages
-with the debugging symbols are large, requiring a lot of free space and
-causing problems with downloading them via unreliable internet
-connection.
-
-Retrace server solves these problems for Fedora 14+ and RHEL 6+
-operating systems, and allows developers to analyze coredumps without
-having access to the machine where the crash occurred.
-
-Retrace server is usually run as a service on a local network, or on
-Internet. A user sends a coredump together with some additional
-information to a retrace server. The server reads the coredump and
-depending on its contents it installs necessary software dependencies to
-create a software environment which is, from the GDB point of view,
-identical to the environment where the crash happened. Then the server
-runs GDB to generate a backtrace from the coredump and provides it back
-to the user.
-
-Core dumps generated on i386 and x86_64 architectures are supported
-within a single x86_64 retrace server instance.
-
-The retrace server consists of the following major parts:
-@enumerate
-@item
-a HTTP interface, consisting of a set of scripts handling communication
-with clients
-@item
-a retrace worker, doing the coredump processing, environment
-preparation, and running the debugger to generate a backtrace
-@item
-a cleanup script, handling stalled retracing tasks and removing old data
-@item
-a package repository, providing the application binaries, libraries, and
-debuginfo necessary for generating backtraces from coredumps
-@end enumerate
-
-@node HTTP interface
-@chapter HTTP interface
-
-@menu
-* Creating a new task::
-* Task status::
-* Requesting a backtrace::
-* Requesting a log::
-* Limiting traffic::
-@end menu
-
-The client-server communication proceeds as follows:
-@enumerate
-@item
-Client uploads a coredump to a retrace server. Retrace server creates a
-task for processing the coredump, and sends the task ID and task
-password in response to the client.
-@item
-Client asks server for the task status using the task ID and password.
-Server responds with the status information (task finished successfully,
-task failed, task is still running).
-@item
-Client asks server for the backtrace from a successfully finished task
-using the task ID and password. Server sends the backtrace in response.
-@item
-Client asks server for a log from the finished task using the task ID
-and password, and server sends the log in response.
-@end enumerate
-
-The HTTP interface application is a set of script written in Python,
-using the @uref{http://www.python.org/dev/peps/pep-0333/, Python Web
-Server Gateway Interface} (WSGI) to interact with a web server. The only
-supported and tested configuration is the Apache HTTPD Server with
-@uref{http://code.google.com/p/modwsgi/, mod_wsgi}.
-
-Only secure (HTTPS) communication is allowed for communicating with a
-public instance of retrace server, because coredumps and backtraces are
-private data. Users may decide to publish their backtraces in a bug
-tracker after reviewing them, but the retrace server doesn't do
-that. The server is supposed to use HTTP persistent connections to to
-avoid frequent SSL renegotiations.
-
-@node Creating a new task
-@section Creating a new task
-
-A client might create a new task by sending a HTTP request to the
-@indicateurl{https://server/create} URL, and providing an archive as the
-request content. The archive contains crash data files. The crash data
-files are a subset of some local @file{/var/spool/abrt/ccpp-time-pid}
-directory contents, so the client must only pack and upload them.
-
-The server supports uncompressed tar archives, and tar archives
-compressed with gzip and xz. Uncompressed archives are the most
-efficient way for local network delivery, and gzip can be used there as
-well because of its good compression speed.
-
-The xz compression file format is well suited for public server setup
-(slow network), as it provides good compression ratio, which is
-important for compressing large coredumps, and it provides reasonable
-compress/decompress speed and memory consumption. See @ref{Traffic and
-load estimation} for the measurements. The @uref{http://tukaani.org/xz/,
-XZ Utils} implementation with the compression level 2 is used to
-compress the data.
-
-The HTTP request for a new task must use the POST method. It must
-contain a proper @var{Content-Length} and @var{Content-Type} fields. If
-the method is not POST, the server returns the @code{405 Method Not
-Allowed} HTTP error code. If the @var{Content-Length} field is missing,
-the server returns the @code{411 Length Required} HTTP error code. If an
-@var{Content-Type} other than @samp{application/x-tar},
-@samp{application/x-gzip}, @samp{application/x-xz} is used, the server
-returns the @code{415 unsupported Media Type} HTTP error code. If the
-@var{Content-Length} value is greater than a limit set by
-@var{MaxPackedSize} option in the server configuration file (50 MB by
-default), or the real HTTP request size gets larger than the limit + 10
-KB for headers, then the server returns the @code{413 Request Entity Too
-Large} HTTP error code, and provides an explanation, including the
-limit, in the response body. The limit is changeable from the server
-configuration file.
-
-If unpacking the archive would result in having the free disk space
-under certain limit in the @file{/var/spool/abrt-retrace} directory, the
-server returns the @code{507 Insufficient Storage} HTTP error code. The
-limit is specified by the @var{MinStorageLeft} option in the server
-configuration file, and it is set to 1024 MB by default.
-
-If the data from the received archive would take more than 1024 MB of
-disk space when uncompressed, the server returns the @code{413 Request
-Entity Too Large} HTTP error code, and provides an explanation,
-including the limit, in the response body. The size limit is changeable
-by the @var{MaxUnpackedSize} option in the server configuration file. It
-can be set pretty high because coredumps, that take most disk space, are
-stored on the server only temporarily until the backtrace is
-generated. When the backtrace is generated the coredump is deleted by
-the @command{abrt-retrace-worker}, so most disk space is released.
-
-The uncompressed data size for xz archives is obtained by calling
-@code{`xz --list file.tar.xz`}. The @option{--list} option has been
-implemented only recently, so updating @command{xz} on your server might
-be necessary. Likewise, the uncompressed data size for gzip archives is
-obtained by calling @code{`gzip --list file.tar.gz`}.
-
-If an upload from a client succeeds, the server creates a new directory
-@file{/var/spool/abrt-retrace/@var{id}} and extracts the
-received archive into it. Then it checks that the directory contains all
-the required files, checks their sizes, and then sends a HTTP
-response. After that it spawns a subprocess with
-@command{abrt-retrace-worker} on that directory.
-
-The following files from the local crash directory are required to be
-present in the archive: @file{coredump}, @file{executable},
-@file{package}. If one or more files are not present in the archive, or
-some other file is present in the archive, the server returns the
-@code{403 Forbidden} HTTP error code.
-
-If the file check succeeds, the server HTTP response has the @code{201
-Created} HTTP code. The response includes the following HTTP header
-fields:
-@itemize
-@item
-@var{X-Task-Id} containing a new server-unique numerical
-task id
-@item
-@var{X-Task-Password} containing a newly generated
-password, required to access the result
-@end itemize
-
-The @var{X-Task-Password} is a random alphanumeric (@samp{[a-zA-Z0-9]})
-sequence 22 characters long. The password is stored in the
-@file{/var/spool/abrt-retrace/@var{id}/password} file, and passwords
-sent by a client in subsequent requests are verified by comparing with
-this file.
-
-The task id is intentionally not used as a password, because it is
-desirable to keep the id readable and memorable for
-humans. Password-like ids would be a loss when an user authentication
-mechanism is added, and server-generated password will no longer be
-necessary.
-
-@node Task status
-@section Task status
-
-A client might request a task status by sending a HTTP GET request to
-the @indicateurl{https://someserver/@var{id}} URL, where @var{id} is the
-numerical task id returned in the @var{X-Task-Id} field by
-@indicateurl{https://someserver/create}. If the @var{id} is not in the
-valid format, or the task @var{id} does not exist, the server returns
-the @code{404 Not Found} HTTP error code.
-
-The client request must contain the @var{X-Task-Password} field, and its
-content must match the password stored in the
-@file{/var/spool/abrt-retrace/@var{id}/password} file. If the password is
-not valid, the server returns the @code{403 Forbidden} HTTP error code.
-
-If the checks pass, the server returns the @code{200 OK} HTTP code, and
-includes a field @var{X-Task-Status} containing one of the following
-values: @samp{FINISHED_SUCCESS}, @samp{FINISHED_FAILURE},
-@samp{PENDING}.
-
-The field contains @samp{FINISHED_SUCCESS} if the file
-@file{/var/spool/abrt-retrace/@var{id}/backtrace} exists. The client might
-get the backtrace on the @indicateurl{https://someserver/@var{id}/backtrace}
-URL. The log might be obtained on the
-@indicateurl{https://someserver/@var{id}/log} URL, and it might contain
-warnings about some missing debuginfos etc.
-
-The field contains @samp{FINISHED_FAILURE} if the file
-@file{/var/spool/abrt-retrace/@var{id}/backtrace} does not exist, and file
-@file{/var/spool/abrt-retrace/@var{id}/retrace-log} exists. The retrace-log
-file containing error messages can be downloaded by the client from the
-@indicateurl{https://someserver/@var{id}/log} URL.
-
-The field contains @samp{PENDING} if neither file exists. The client
-should ask again after 10 seconds or later.
-
-@node Requesting a backtrace
-@section Requesting a backtrace
-
-A client might request a backtrace by sending a HTTP GET request to the
-@indicateurl{https://someserver/@var{id}/backtrace} URL, where @var{id}
-is the numerical task id returned in the @var{X-Task-Id} field by
-@indicateurl{https://someserver/create}. If the @var{id} is not in the
-valid format, or the task @var{id} does not exist, the server returns
-the @code{404 Not Found} HTTP error code.
-
-The client request must contain the @var{X-Task-Password} field, and its
-content must match the password stored in the
-@file{/var/spool/abrt-retrace/@var{id}/password} file. If the password
-is not valid, the server returns the @code{403 Forbidden} HTTP error
-code.
-
-If the file @file{/var/spool/abrt-retrace/@var{id}/backtrace} does not
-exist, the server returns the @code{404 Not Found} HTTP error code.
-Otherwise it returns the file contents, and the @var{Content-Type}
-header is set to @samp{text/plain}.
-
-@node Requesting a log
-@section Requesting a log
-
-A client might request a task log by sending a HTTP GET request to the
-@indicateurl{https://someserver/@var{id}/log} URL, where @var{id} is the
-numerical task id returned in the @var{X-Task-Id} field by
-@indicateurl{https://someserver/create}. If the @var{id} is not in the
-valid format, or the task @var{id} does not exist, the server returns
-the @code{404 Not Found} HTTP error code.
-
-The client request must contain the @var{X-Task-Password} field, and its
-content must match the password stored in the
-@file{/var/spool/abrt-retrace/@var{id}/password} file. If the password
-is not valid, the server returns the @code{403 Forbidden} HTTP error
-code.
-
-If the file @file{/var/spool/abrt-retrace/@var{id}/retrace-log} does not
-exist, the server returns the @code{404 Not Found} HTTP error code.
-Otherwise it returns the file contents, and the @var{Content-Type}
-header is set to @samp{text/plain}.
-
-@node Limiting traffic
-@section Limiting traffic
-
-The maximum number of simultaneously running tasks is limited to 5 by
-the server. The limit is changeableby the @var{MaxParallelTasks} option
-in the server configuration file. If a new request comes when the server
-is fully occupied, the server returns the @code{503 Service Unavailable}
-HTTP error code.
-
-The archive extraction, chroot preparation, and gdb analysis is
-mostly limited by the hard drive size and speed.
-
-@node Retrace worker
-@chapter Retrace worker
-
-Retrace worker is a program (usually residing in
-@command{/usr/bin/abrt-retrace-worker}), which:
-@enumerate
-@item
-takes a task id as a parameter, and turns it into a directory containing
-a coredump
-@item
-determines which packages need to be installed from the coredump
-@item
-installs the packages in a newly created chroot environment together
-with @command{gdb}
-@item
-copies the coredump to the chroot environment
-@item
-runs @command{gdb} from inside the environment to generate a backtrace
-from the coredump
-@item
-copies the resulting backtrace from the environment to the directory
-@end enumerate
-
-The tasks reside in @file{/var/spool/abrt-retrace/@var{taskid}}
-directories.
-
-To determine which packages need to be installed,
-@command{abrt-retrace-worker} runs the @command{coredump2packages} tool.
-The tool reads build-ids from the coredump, and tries to find the best
-set of packages (epoch, name, version, release) matching the
-build-ids. Local yum repositories are used as the source of
-packages. GDB requirements are strict, and this is the reason why proper
-backtraces cannot be directly and reliably generated on systems whose
-software is updated:
-@itemize
-@item
-The exact binary which crashed needs to be available to GDB.
-@item
-All libraries which are linked to the binary need to be available in the
-same exact versions from the time of the crash.
-@item
-The binary plugins loaded by the binary or libraries via @code{dlopen}
-need to be present in proper versions.
-@item
-The files containing the debugging symbols for the binary and libraries
-(build-ids are used to find the pairs) need to be available to GDB.
-@end itemize
-
-The chroot environments are created and managed by @command{mock}, and
-they reside in @file{/var/lib/mock/@var{taskid}}. The retrace worker
-generates a mock configuration file and then invokes @command{mock} to
-create the chroot, and to run programs from inside the chroot.
-
-The chroot environment is populated by installing packages using
-@command{yum}. Package installation cannot be avoided, as GDB expects to
-operate on an installed system, and on crashes from that system. GDB
-uses plugins written in Python, that are shipped with packages (for
-example see @command{rpm -ql libstdc++}).
-
-Coredumps might be affected by @command{prelink}, which is used on
-Fedora to speed up dynamic linking by caching its results directly in
-binaries. The system installed by @command{mock} for the purpose of
-retracing doesn't use @command{prelink}, so the binaries differ between
-the system of origin and the mock environment. It has been tested that
-this is not an issue, but in the case some issue
-@uref{http://sourceware.org/ml/gdb/2009-05/msg00175.html, occurs}
-(GDB fails to work with a binary even if it's the right one), a bug
-should be filed on @code{prelink}, as its operation should not affect
-the area GDB operates on.
-
-No special care is taken to avoid the possibility that GDB will not run
-with the set of packages (fixed versions) as provided by coredump. It is
-expected that any combination of packages user might use in a released
-system satisfies the needs of some version of GDB. Yum selects the
-newest possible version which has its requirements satisfied.
-
-@node Task cleanup
-@chapter Task cleanup
-
-It is neccessary to watch and limit the resource usage of tasks for a
-retrace server to remain operational. This is performed by the
-@command{abrt-retrace-cleanup} tool. It is supposed that the server
-administrator sets @command{cron} to run the tool every hour.
-
-Tasks that were created more than 120 hours (5 days) ago are
-deleted. The limit can be changed by the @var{DeleteTaskAfter} option in
-the server configuration file. Coredumps are deleted when the retrace
-process is finished, and only backtraces, logs, and configuration remain
-available for every task until the cleanup. The
-@command{abrt-retrace-cleanup} checks the creation time and deletes the
-directories in @file{/var/spool/abrt-retrace/}.
-
-Tasks running for more than 1 hour are terminated and removed from the
-system. Tasks for which the @command{abrt-retrace-worker} crashed for
-some reason without marking the task as finished are also removed.
-
-@node Package repository
-@chapter Package repository
-
-Retrace server is able to support every Fedora release with all packages
-that ever made it to the updates and updates-testing repositories. In
-order to provide all that packages, a local repository needs to be
-maintained for every supported operating system.
-
-A repository with Fedora packages must be maintained locally on the
-server to provide good performance and to provide data from older
-packages already removed from the official repositories. Retrace server
-contains a tool @command{abrt-retrace-reposync}, which is a package
-downloader scanning Fedora servers for new packages, and downloading
-them so they are immediately available.
-
-Older versions of packages are regularly deleted from the updates and
-updates-testing repositories. Retrace server supports older versions of
-packages, as this is one of major pain-points that the retrace server is
-supposed to solve.
-
-The @command{abrt-retrace-reposync} downloads packages from Fedora
-repositories, and it does not delete older versions of the packages. The
-retrace server administrator is supposed to call this script using cron
-approximately every 6 hours. The script uses @command{rsync} to get the
-packages and @command{createrepo} to generate respository metadata.
-
-The packages are downloaded to a local repository in
-@file{/var/cache/abrt-retrace/}. The location can be changed via the
-@var{RepoDir} option in the server configuration file.
-
-@node Traffic and load estimation
-@chapter Traffic and load estimation
-
-2500 bugs are reported from ABRT every month. Approximately 7.3%
-from that are Python exceptions, which don't need a retrace
-server. That means that 2315 bugs need a retrace server. That is 77
-bugs per day, or 3.3 bugs every hour on average. Occasional spikes
-might be much higher (imagine a user that decided to report all his 8
-crashes from last month).
-
-We should probably not try to predict if the monthly bug count goes up
-or down. New, untested versions of software are added to Fedora, but
-on the other side most software matures and becomes less crashy. So
-let's assume that the bug count stays approximately the same.
-
-Test crashes (see why we use @code{`xz -2`} to compress coredumps):
-@itemize
-@item
-firefox with 7 tabs (random pages opened), coredump size 172 MB
-@itemize
-@item
-xz compression
-@itemize
-@item
-compression level 6 (default): compression took 32.5 sec, compressed
-size 5.4 MB, decompression took 2.7 sec
-@item
-compression level 3: compression took 23.4 sec, compressed size 5.6 MB,
-decompression took 1.6 sec
-@item
-compression level 2: compression took 6.8 sec, compressed size 6.1 MB,
-decompression took 3.7 sec
-@item
-compression level 1: compression took 5.1 sec, compressed size 6.4 MB,
-decompression took 2.4 sec
-@end itemize
-@item
-gzip compression
-@itemize
-@item
-compression level 9 (highest): compression took 7.6 sec, compressed size
-7.9 MB, decompression took 1.5 sec
-@item
-compression level 6 (default): compression took 2.6 sec, compressed size
-8 MB, decompression took 2.3 sec
-@item
-compression level 3: compression took 1.7 sec, compressed size 8.9 MB,
-decompression took 1.7 sec
-@end itemize
-@end itemize
-@item
-thunderbird with thousands of emails opened, coredump size 218 MB
-@itemize
-@item
-xz compression
-@itemize
-@item
-compression level 6 (default): compression took 60 sec, compressed size
-12 MB, decompression took 3.6 sec
-@item
-compression level 3: compression took 42 sec, compressed size 13 MB,
-decompression took 3.0 sec
-@item
-compression level 2: compression took 10 sec, compressed size 14 MB,
-decompression took 3.0 sec
-@item
-compression level 1: compression took 8.3 sec, compressed size 15 MB,
-decompression took 3.2 sec
-@end itemize
-@item
-gzip compression
-@itemize
-@item
-compression level 9 (highest): compression took 14.9 sec, compressed
-size 18 MB, decompression took 2.4 sec
-@item
-compression level 6 (default): compression took 4.4 sec, compressed size
-18 MB, decompression took 2.2 sec
-@item
-compression level 3: compression took 2.7 sec, compressed size 20 MB,
-decompression took 3 sec
-@end itemize
-@end itemize
-@item
-evince with 2 pdfs (1 and 42 pages) opened, coredump size 73 MB
-@itemize
-@item
-xz compression
-@itemize
-@item
-compression level 2: compression took 2.9 sec, compressed size 3.6 MB,
-decompression took 0.7 sec
-@item
-compression level 1: compression took 2.5 sec, compressed size 3.9 MB,
-decompression took 0.7 sec
-@end itemize
-@end itemize
-@item
-OpenOffice.org Impress with 25 pages presentation, coredump size 116 MB
-@itemize
-@item
-xz compression
-@itemize
-@item
-compression level 2: compression took 7.1 sec, compressed size 12 MB,
-decompression took 2.3 sec
-@end itemize
-@end itemize
-@end itemize
-
-So let's imagine there are some users that want to report their
-crashes approximately at the same time. Here is what the retrace
-server must handle:
-@enumerate
-@item
-2 OpenOffice crashes
-@item
-2 evince crashes
-@item
-2 thunderbird crashes
-@item
-2 firefox crashes
-@end enumerate
-
-We will use the xz archiver with the compression level 2 on the ABRT's
-side to compress the coredumps. So the users spend 53.6 seconds in
-total packaging the coredumps.
-
-The packaged coredumps have 71.4 MB, and the retrace server must
-receive that data.
-
-The server unpacks the coredumps (perhaps in the same time), so they
-need 1158 MB of disk space on the server. The decompression will take
-19.4 seconds.
-
-Several hundred megabytes will be needed to install all the
-required packages and debuginfos for every chroot (8 chroots 1 GB each
-= 8 GB, but this seems like an extreme, maximal case). Some space will
-be saved by using a debuginfofs.
-
-Note that most applications are not as heavyweight as OpenOffice and
-Firefox.
-
-@node Security
-@chapter Security
-
-The retrace server communicates with two other entities: it accepts
-coredumps form users, and it downloads debuginfos and packages from
-distribution repositories.
-
-@menu
-* Clients::
-* Packages and debuginfo::
-@end menu
-
-General security from GDB flaws and malicious data is provided by
-chroot. The GDB accesses the debuginfos, packages, and the coredump from
-within the chroot under a non-root user, unable to access the retrace
-server's environment.
-
-@c We should consider setting a disk quota to every chroot directory,
-@c and limit the GDB access to resources using cgroups.
-
-SELinux policy exists for both the retrace server's HTTP interface, and
-for the retrace worker.
-
-@node Clients
-@section Clients
-
-It is expected that the clients, which are using the retrace server and
-sending coredumps to it, trust the retrace server administrator. The
-server administrator must not try to get sensitive data from client
-coredumps. This is a major bottleneck of the retrace server. However,
-users of an operating system already trust the operating system provider
-in various important matters. So when the retrace server is operated by
-the OS provider, that might be acceptable for users.
-
-Sending clients' coredumps to the retrace server cannot be avoided if we
-want to generate good backtraces containing the values of
-variables. Minidumps lower the quality of the resulting backtraces,
-while not improving user security.
-
-A malicious client can craft a nonstandard coredump, which will be
-processed by server's GDB. GDB handles malformed coredumps well.
-
-Users can never be allowed to provide custom packages/debuginfo together
-with a coredump. Packages need to be installed to the environment, and
-installing untrusted programs is insecure.
-
-As for attacker trying to steal users' backtraces from the retrace
-server, the passwords protecting the backtraces in the
-@var{X-Task-Password} header are random alphanumeric
-(@samp{[a-zA-Z0-9]}) sequences 22 characters long. 22 alphanumeric
-characters corresponds to 128 bit password, because @samp{[a-zA-Z0-9]}
-is 62 characters, and @math{2^{128}} < @math{62^{22}}. The source of
-randomness is @file{/dev/urandom}.
-
-@node Packages and debuginfo
-@section Packages and debuginfo
-
-Packages and debuginfo are safely downloaded from the distribution
-repositories, as the packages are signed by the distribution, and the
-package origin is verified.
-
-When the debuginfo filesystem server is done, the retrace server can
-safely use it, as the data will also be signed.
-
-@node Future work
-@chapter Future work
-
-@section Coredump stripping
-Jan Kratochvil: With my test of OpenOffice.org presentation kernel core
-file has 181MB, xz -2 of it has 65MB. According to `set target debug 1'
-GDB reads only 131406 bytes of it (incl. the NOTE segment).
-
-@section Supporting other architectures
-Three approaches:
-@itemize
-@item
-Use GDB builds with various target architectures: gdb-i386, gdb-ppc64,
-gdb-s390.
-@item
-Run
-@uref{http://wiki.qemu.org/download/qemu-doc.html#QEMU-User-space-emulator,
-QEMU user space emulation} on the server
-@item
-Run @code{abrt-retrace-worker} on a machine with right
-architecture. Introduce worker machines and tasks, similarly to Koji.
-@end itemize
-
-@section Use gdbserver instead of uploading whole coredump
-GDB's gdbserver cannot process coredumps, but Jan Kratochvil's can:
-@verbatim
-git://git.fedorahosted.org/git/elfutils.git
-branch: jankratochvil/gdbserver
- src/gdbserver.c
- * Currently threading is not supported.
- * Currently only x86_64 is supported (the NOTE registers layout).
-@end verbatim
-
-@section User management for the HTTP interface
-Multiple authentication sources (x509 for RHEL).
-
-@section Make all files except coredump optional on the input
-Make @file{architecture}, @file{release}, @file{packages} files, which
-must be included in the package when creating a task, optional. Allow
-uploading a coredump without involving tar: just coredump, coredump.gz,
-or coredump.xz.
-
-@section Handle non-standard packages (provided by user)
-This would make retrace server very vulnerable to attacks, it never can
-be enabled in a public instance.
-
-@section Support vmcores
-See @uref{https://fedorahosted.org/cas/, Core analysis system}, its
-features etc.
-
-@section Do not refuse new tasks on a fully loaded server
-Consider using @uref{http://git.fedorahosted.org/git/?p=kobo.git, kobo}
-for task management and worker handling (master/slaves arch).
-
-@section Support synchronous operation
-Client sends a coredump, and keeps receiving the server response
-message. The server response HTTP body is generated and sent gradually
-as the task is performed. Client can choose to stop receiving the
-response body after getting all headers and ask the server for status
-and backtrace asynchronously.
-
-The server re-sends the output of abrt-retrace-worker (its stdout and
-stderr) to the response the body. In addition, a line with the task
-status is added in the form @code{X-Task-Status: PENDING} to the body
-every 5 seconds. When the worker process ends, either
-@samp{FINISHED_SUCCESS} or @samp{FINISHED_FAILURE} status line is
-sent. If it's @samp{FINISHED_SUCCESS}, the backtrace is attached after
-this line. Then the response body is closed.
-
-@section Provide task estimation time
-The response to the @code{/create} action should contain a header
-@var{X-Task-Est-Time}, that contains a number of seconds the server
-estimates it will take to generate the backtrace
-
-The algorithm for the @var{X-Task-Est-Time} time estimation
-should take the previous analyses of coredumps with the same
-corresponding package name into account. The server should store
-simple history in a SQLite database to know how long it takes to
-generate a backtrace for certain package. It could be as simple as
-this:
-@itemize
-@item
- initialization step one: @code{CREATE TABLE package_time (id INTEGER
- PRIMARY KEY AUTOINCREMENT, package, release, time)}; we need the
- @var{id} for the database cleanup - to know the insertion order of
- rows, so the @code{AUTOINCREMENT} is important here; the @var{package}
- is the package name without the version and release numbers, the
- @var{release} column stores the operating system, and the @var{time}
- is the number of seconds it took to generate the backtrace
-@item
- initialization step two: @code{CREATE INDEX package_release ON
- package_time (package, release)}; we compute the time only for single
- package on single supported OS release per query, so it makes sense to
- create an index to speed it up
-@item
- when a task is finished: @code{INSERT INTO package_time (package,
- release, time) VALUES ('??', '??', '??')}
-@item
- to get the average time: @code{SELECT AVG(time) FROM package_time
- WHERE package == '??' AND release == '??'}; the arithmetic mean seems
- to be sufficient here
-@end itemize
-
-So the server knows that crashes from an OpenOffice.org package
-take 5 minutes to process in average, and it can return the value 300
-(seconds) in the field. The client does not waste time asking about
-that task every 20 seconds, but the first status request comes after
-300 seconds. And even when the package changes (rebases etc.), the
-database provides good estimations after some time anyway
-(@ref{Task cleanup} chapter describes how the
-data are pruned).
-
-@section Keep the database with statistics small
-The database containing packages and processing times should also be
-regularly pruned to remain small and provide data quickly. The cleanup
-script should delete some rows for packages with too many entries:
-@enumerate
-@item
-get a list of packages from the database: @code{SELECT DISTINCT package,
-release FROM package_time}
-@item
-for every package, get the row count: @code{SELECT COUNT(*) FROM
-package_time WHERE package == '??' AND release == '??'}
-@item
-for every package with the row count larger than 100, some rows most be
-removed so that only the newest 100 rows remain in the database:
-@itemize
-@item
-to get highest row id which should be deleted, execute @code{SELECT id
-FROM package_time WHERE package == '??' AND release == '??' ORDER BY id
-LIMIT 1 OFFSET ??}, where the @code{OFFSET} is the total number of rows
-for that single package minus 100
-@item
-then all the old rows can be deleted by executing @code{DELETE FROM
-package_time WHERE package == '??' AND release == '??' AND id <= ??}
-@end itemize
-@end enumerate
-
-@section Support Fedora Rawhide
-When the @command{abrt-retrace-reposync} is used to sync with the
-Rawhide repository, unneeded packages (where a newer version exists)
-must be removed after residing one week with the newer package in the
-same repository.
-
-@bye
diff --git a/src/Makefile.am b/src/Makefile.am
index 84499069..3154d5c4 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -1 +1 @@
-SUBDIRS = include lib hooks btparser daemon applet gui-gtk cli plugins retrace
+SUBDIRS = include lib hooks btparser daemon applet gui-gtk cli plugins
diff --git a/src/retrace/Makefile.am b/src/retrace/Makefile.am
deleted file mode 100644
index 684d0f5a..00000000
--- a/src/retrace/Makefile.am
+++ /dev/null
@@ -1,31 +0,0 @@
-SUBDIRS = plugins
-
-bin_PROGRAMS = abrt-retrace-worker
-abrt_retrace_worker_SOURCES = worker.c
-
-dist_bin_SCRIPTS = abrt-retrace-cleanup abrt-retrace-reposync coredump2packages
-
-python_PYTHON = retrace.py
-
-worker_PYTHON = worker.py
-workerdir = $(datadir)/abrt-retrace
-
-interface_PYTHON = backtrace.wsgi create.wsgi log.wsgi settings.wsgi status.wsgi
-# interfacedir should probably be $$(pkgdatadir)/retrace
-interfacedir = $(datadir)/abrt-retrace
-
-repo_DATA = retrace.repo
-repodir = ${sysconfdir}/yum.repos.d
-
-retraceconf_DATA = retrace.conf
-retraceconfdir = ${sysconfdir}/abrt
-
-httpdconf_DATA = retrace_httpd.conf
-httpdconfdir = ${sysconfdir}/httpd/conf.d
-
-EXTRA_DIST = retrace.conf retrace_httpd.conf retrace.repo
-
-# Apache config files can be owned by root, httpd just needs read
-# access.
-#install-data-hook:
-# chown apache:apache $(DESTDIR)$(httpdconfdir)/retrace_httpd.conf
diff --git a/src/retrace/abrt-retrace-cleanup b/src/retrace/abrt-retrace-cleanup
deleted file mode 100755
index cbe1b818..00000000
--- a/src/retrace/abrt-retrace-cleanup
+++ /dev/null
@@ -1,60 +0,0 @@
-#!/usr/bin/python
-
-import os
-import sys
-import time
-from retrace import *
-
-if __name__ == "__main__":
- now = int(time.time())
-
- logfile = "%s/cleanup.log" % CONFIG["LogDir"]
-
- try:
- log = open(logfile, "a")
- except IOError, ex:
- print "Error opening log file: %s" % ex
- sys.exit(1)
-
- log.write(time.strftime("[%Y-%m-%d %H:%M:%S] Running cleanup\n"))
-
- # kill tasks running > 1 hour
- ps_output = run_ps()
- running_tasks = get_running_tasks(ps_output)
- for pid, taskid, runtime in running_tasks:
- # ToDo: 5 = mm:ss, >5 = hh:mm:ss
- if len(runtime) > 5:
- log.write("Killing task %d running for %s\n" % (taskid, runtime))
- kill_process_and_childs(pid, ps_output)
-
- # kill orphaned tasks
- running_tasks = get_running_tasks()
- running_ids = []
- for pid, taskid, runtime in running_tasks:
- running_ids.append(taskid)
-
- for task in get_active_tasks():
- if not task in running_ids:
- log.write("Cleaning up orphaned task %d\n" % task)
- cleanup_task(task)
-
- # clean up old tasks
- try:
- files = os.listdir(CONFIG["SaveDir"])
- except OSError, ex:
- files = []
- log.write("Error listing task directory: %s\n" % ex)
-
- for filename in files:
- try:
- taskid = int(filename)
- except:
- continue
-
- dirpath = "%s/%s" % (CONFIG["SaveDir"], filename)
- if os.path.isdir(dirpath) and \
- (now - os.path.getatime(dirpath)) / 3600 >= CONFIG["DeleteTaskAfter"]:
- log.write("Deleting old task %s\n" % filename)
- call(["rm", "-rf", dirpath])
-
- log.close()
diff --git a/src/retrace/abrt-retrace-reposync b/src/retrace/abrt-retrace-reposync
deleted file mode 100755
index 30b437f2..00000000
--- a/src/retrace/abrt-retrace-reposync
+++ /dev/null
@@ -1,118 +0,0 @@
-#!/usr/bin/python
-
-import argparse
-import os
-import pwd
-import sys
-from retrace import *
-
-sys.path = ["/usr/share/abrt-retrace"] + sys.path
-from plugins import *
-
-TARGET_USER = "abrt"
-
-if __name__ == "__main__":
- # parse arguments
- argparser = argparse.ArgumentParser(description="Retrace Server repository downloader")
- argparser.add_argument("distribution", type=str, help="Distribution name")
- argparser.add_argument("version", type=str, help="Release version")
- argparser.add_argument("architecture", type=str, help="CPU architecture")
- args = argparser.parse_args()
-
- distribution = args.distribution
- version = args.version
- arch = args.architecture
-
- if arch == "i686":
- arch = "i386"
-
- # drop privilegies if possible
- try:
- pw = pwd.getpwnam(TARGET_USER)
- os.setgid(pw.pw_gid)
- os.setuid(pw.pw_uid)
- print "Privilegies set to '%s'." % TARGET_USER
- except KeyError:
- print "User '%s' does not exist. Running with default privilegies." % TARGET_USER
- except OSError:
- print "Unable to switch UID or GID. Running with default privilegies."
-
- # load plugin
- plugin = None
- for iplugin in PLUGINS:
- if iplugin.distribution == distribution:
- plugin = iplugin
- break
-
- if not plugin:
- print "Unknown distribution: '%s'" % distribution
- sys.exit(1)
-
- lockfile = "/tmp/abrt-retrace-lock-%s-%s-%s" % (distribution, version, arch)
-
- if os.path.isfile(lockfile):
- print "Another process with repository download is running."
- sys.exit(2)
-
- # set lock
- if not lock(lockfile):
- print "Unable to set lock."
- sys.exit(3)
-
- null = open("/dev/null", "w")
-
- targetdir = "%s/%s-%s-%s" % (CONFIG["RepoDir"], distribution, version, arch)
-
- # run rsync
- for repo in plugin.repos:
- retcode = -1
- for mirror in repo:
- repourl = mirror.replace("$ARCH", arch).replace("$VER", version)
-
- print "Running rsync on '%s'..." % repourl,
- sys.stdout.flush()
-
- if repourl.startswith("rsync://"):
- files = [repourl]
- else:
- files = []
- try:
- for package in os.listdir(repourl):
- files.append("%s/%s" % (repourl, package))
- except Exception as ex:
- print "Error: %s. Trying another mirror..." % ex
- continue
-
- pipe = Popen(["rsync", "-t"] + files + [targetdir], stdout=null, stderr=null)
- pipe.wait()
- retcode = pipe.returncode
-
- if retcode == 0:
- print "OK"
- break
-
- print "Error. Trying another mirror..."
-
- if retcode != 0:
- print "No more mirrors to try."
-
- # run createrepo
- print "Running createrepo on '%s'..." % targetdir,
- sys.stdout.flush()
-
- pipe = Popen(["createrepo", targetdir], stdout=null, stderr=null)
- pipe.wait()
-
- null.close()
-
- if pipe.returncode != 0:
- print "Failed"
- unlock(lockfile)
- sys.exit(4)
-
- print "OK"
-
- # remove lock
- if not unlock(lockfile):
- print "Unable to remove lock."
- sys.exit(5)
diff --git a/src/retrace/backtrace.wsgi b/src/retrace/backtrace.wsgi
deleted file mode 100644
index 92c4d040..00000000
--- a/src/retrace/backtrace.wsgi
+++ /dev/null
@@ -1,46 +0,0 @@
-#!/usr/bin/python
-
-from retrace import *
-
-def application(environ, start_response):
- request = Request(environ)
-
- match = URL_PARSER.match(request.script_name)
- if not match:
- return response(start_response, "404 Not Found",
- "Invalid URL")
-
- taskdir = "%s/%s" % (CONFIG["SaveDir"], match.group(1))
-
- if not os.path.isdir(taskdir):
- return response(start_response, "404 Not Found",
- "There is no such task")
-
- pwdpath = "%s/password" % taskdir
- try:
- pwdfile = open(pwdpath, "r")
- pwd = pwdfile.read()
- pwdfile.close()
- except:
- return response(start_response, "500 Internal Server Error",
- "Unable to verify password")
-
- if not "X-Task-Password" in request.headers or \
- request.headers["X-Task-Password"] != pwd:
- return response(start_response, "403 Forbidden",
- "Invalid password")
-
- btpath = "%s/retrace_backtrace" % taskdir
- if not os.path.isfile(btpath):
- return response(start_response, "404 Not Found",
- "There is no backtrace for the specified task")
-
- try:
- btfile = open(btpath, "r")
- output = btfile.read()
- btfile.close()
- except:
- return response(start_response, "500 Internal Server Error",
- "Unable to read backtrace file")
-
- return response(start_response, "200 OK", output)
diff --git a/src/retrace/coredump2packages b/src/retrace/coredump2packages
deleted file mode 100755
index ac2db9f2..00000000
--- a/src/retrace/coredump2packages
+++ /dev/null
@@ -1,293 +0,0 @@
-#! /usr/bin/python
-# -*- coding:utf-8;mode:python -*-
-# Gets list of packages necessary for processing of a coredump.
-# Uses eu-unstrip and yum.
-
-import subprocess
-import yum
-import sys
-import argparse
-
-parser = argparse.ArgumentParser(description='Get packages for coredump processing.')
-parser.add_argument('--repos', default='*', metavar='WILDCARD',
- help='Yum repository wildcard to be enabled')
-parser.add_argument('coredump', help='Coredump')
-parser.add_argument('--log', metavar='FILENAME',
- help='Store debug output to a file')
-args = parser.parse_args()
-
-if args.log:
- log = open(args.log, "w")
-else:
- log = open("/dev/null", "w")
-
-#
-# Initialize yum, enable only repositories specified via command line
-# --repos option.
-#
-stdout = sys.stdout
-sys.stdout = log
-yumbase = yum.YumBase()
-yumbase.doConfigSetup()
-if not yumbase.setCacheDir():
- exit(2)
-log.write("Closing all enabled repositories...\n")
-for repo in yumbase.repos.listEnabled():
- log.write(" - {0}\n".format(repo.name))
- repo.close()
- yumbase.repos.disableRepo(repo.id)
-log.write("Enabling repositories matching \'{0}\'...\n".format(args.repos))
-for repo in yumbase.repos.findRepos(args.repos):
- log.write(" - {0}\n".format(repo.name))
- repo.enable()
- repo.skip_if_unavailable = True
-yumbase.repos.doSetup()
-yumbase.repos.populateSack(mdtype='metadata', cacheonly=1)
-yumbase.repos.populateSack(mdtype='filelists', cacheonly=1)
-sys.stdout = stdout
-
-#
-# Get eu-unstrip output, which contains build-ids and binary object
-# paths
-#
-log.write("Running eu-unstrip...\n")
-unstrip_args = ['eu-unstrip', '--core={0}'.format(args.coredump), '-n']
-unstrip_proc = subprocess.Popen(unstrip_args, stdout=subprocess.PIPE)
-unstrip = unstrip_proc.communicate()[0]
-log.write("{0}\n".format(unstrip))
-if not unstrip:
- exit(1)
-
-def binary_packages_from_debuginfo_package(debuginfo_package, binobj_path):
- """
- Returns a list of packages corresponding to the provided debuginfo
- package. One of the packages in the list contains the binary
- specified in binobj_path; this is a list because if binobj_patch
- is not specified (and sometimes it is not, binobj_path might
- contain just '-'), we do not know which package contains the
- binary, we know only packages from the same SRPM as the debuginfo
- package.
- """
- package_list = []
- if binobj_path == '-': # [exe] without binary name
- log.write(" Yum search for [exe] without binary name, "
- "packages with NVR {0}:{1}-{2}.{3}...\n".format(debuginfo_package.epoch,
- debuginfo_package.ver,
- debuginfo_package.rel,
- debuginfo_package.arch))
- # Append all packages with the same base package name.
- # Other possibility is to download the debuginfo RPM,
- # unpack it, and get the name of the binary from the
- # /usr/lib/debug/.build-id/xx/yyyyyy symlink.
- evra_list = yumbase.pkgSack.searchNevra(epoch=debuginfo_package.epoch,
- ver=debuginfo_package.ver,
- rel=debuginfo_package.rel,
- arch=debuginfo_package.arch)
- for package in evra_list:
- log.write(" - {0}: base name \"{1}\"\n".format(str(package), package.base_package_name))
- if package.base_package_name != debuginfo_package.base_package_name:
- continue
- package_list.append(package)
- else:
- log.write(" Yum search for {0}...\n".format(binobj_path))
- binobj_package_list = yumbase.pkgSack.searchFiles(binobj_path)
- for binobj_package in binobj_package_list:
- log.write(" - {0}".format(str(binobj_package)))
- if 0 != binobj_package.returnEVR().compare(debuginfo_package.returnEVR()):
- log.write(": NVR doesn't match\n")
- continue
- log.write(": NVR matches\n")
- package_list.append(binobj_package)
- return package_list
-
-def process_unstrip_entry(build_id, binobj_path):
- """
- Returns a tuple of two items.
-
- First item is a list of packages which we found to be associated
- with the unstrip entry defined by build_id and binobj_path.
-
- Second item is a list of package versions (same package name,
- different epoch-version-release), which contain the binary object
- (an executable or shared library) corresponding to this unstrip
- entry. If this method failed to find an unique package name (with
- only different versions), this list contains the list of base
- package names. This item can be used to associate a coredump with
- some crashing package.
- """
- package_list = []
- coredump_package_list = []
- coredump_base_package_list = []
- # Ask for a known path from debuginfo package.
- debuginfo_path = "/usr/lib/debug/.build-id/{0}/{1}.debug".format(build_id[:2], build_id[2:])
- log.write("Yum search for {0}...\n".format(debuginfo_path))
- debuginfo_package_list = yumbase.pkgSack.searchFiles(debuginfo_path)
-
- # A problem here is that some libraries lack debuginfo. Either
- # they were stripped during build, or they were not stripped by
- # /usr/lib/rpm/find-debuginfo.sh because of wrong permissions or
- # something. The proper solution is to detect such libraries and
- # fix the packages.
- for debuginfo_package in debuginfo_package_list:
- log.write(" - {0}\n".format(str(debuginfo_package)))
- package_list.append(debuginfo_package)
- binary_packages = binary_packages_from_debuginfo_package(debuginfo_package, binobj_path)
- coredump_base_package_list.append(debuginfo_package.base_package_name)
- if len(binary_packages) == 1:
- coredump_package_list.append(str(binary_packages[0]))
- package_list.extend(binary_packages)
- if len(coredump_package_list) == len(coredump_base_package_list):
- return package_list, coredump_package_list
- else:
- return package_list, coredump_base_package_list
-
-
-def process_unstrip_output():
- """
- Parse the eu-unstrip output, and search for packages via yum.
-
- Returns a tuple containing three items:
- - a list of package objects
- - a list of missing buildid entries
- - a list of coredump package adepts
- """
- # List of packages found in yum repositories and matching the
- # coredump.
- package_list = []
- # List of pairs (library/executable path, build id) which were not
- # found via yum.
- missing_buildid_list = []
- # coredump package adepts
- coredump_package_list = []
- first_entry = True
- for line in unstrip.split('\n'):
- parts = line.split()
- if not parts or len(parts) < 3:
- continue
- build_id = parts[1].split('@')[0]
- binobj_path = parts[2]
- if binobj_path[0] != '/' and parts[4] != '[exe]':
- continue
- entry_package_list, entry_coredump_package_list = process_unstrip_entry(build_id, binobj_path)
- if first_entry:
- coredump_package_list = entry_coredump_package_list
- first_entry = False
- if len(entry_package_list) == 0:
- missing_buildid_list.append([binobj_path, build_id])
- else:
- for entry_package in entry_package_list:
- found = False
- for package in package_list:
- if str(entry_package) == str(package):
- found = True
- break
- if not found:
- package_list.append(entry_package)
- return package_list, missing_buildid_list, coredump_package_list
-
-package_list, missing_buildid_list, coredump_package_list = process_unstrip_output()
-
-#
-# The package list might contain multiple packages with the same name,
-# but different version. This happens because some binary had the same
-# build id over multiple package releases.
-#
-def find_duplicates(package_list):
- for p1 in range(0, len(package_list) - 1):
- package1 = package_list[p1]
- for p2 in range(p1 + 1, len(package_list)):
- package2 = package_list[p2]
- if package1.name == package2.name:
- return package1, package2
- return None, None
-
-def count_removals(package_list, base_package_name, epoch, ver, rel, arch):
- count = 0
- for package in package_list:
- if package.base_package_name != base_package_name:
- continue
- if package.epoch != epoch or package.ver != ver or package.rel != rel or package.arch != arch:
- continue
- count += 1
- return count
-
-log.write("Checking for duplicates...\n")
-while True:
- package1, package2 = find_duplicates(package_list)
- if package1 is None:
- break
- p1removals = count_removals(package_list,
- package1.base_package_name,
- package1.epoch,
- package1.ver,
- package1.rel,
- package1.arch)
- p2removals = count_removals(package_list,
- package2.base_package_name,
- package2.epoch,
- package2.ver,
- package2.rel,
- package2.arch)
-
- log.write(" - {0}".format(package1.base_package_name))
- if package1.base_package_name != package2.base_package_name:
- log.write(" {0}\n".format(package2.base_package_name))
- else:
- log.write("\n")
- log.write(" - {0}:{1}-{2}.{3} ({4} dependent packages)\n".format(package1.epoch,
- package1.ver,
- package1.rel,
- package1.arch,
- p1removals))
- log.write(" - {0}:{1}-{2}.{3} ({4} dependent packages)\n".format(package2.epoch,
- package2.ver,
- package2.rel,
- package2.arch,
- p2removals))
-
- removal_candidate = package1
- if p1removals == p2removals:
- # Remove older if we can choose
- if package1.returnEVR().compare(package2.returnEVR()) > 0:
- removal_candidate = package2
- log.write(" - decided to remove {0}:{1}-{2}.{3} because it's older\n".format(removal_candidate.epoch,
- removal_candidate.ver,
- removal_candidate.rel,
- removal_candidate.arch))
- else:
- if p1removals > p2removals:
- removal_candidate = package2
- log.write(" - decided to remove {0}:{1}-{2}.{3} because has fewer dependencies\n".format(removal_candidate.epoch,
- removal_candidate.ver,
- removal_candidate.rel,
- removal_candidate.arch))
- # Remove the removal_candidate packages from the package list
- for package in package_list[:]:
- if package.base_package_name == removal_candidate.base_package_name and \
- 0 == package.returnEVR().compare(removal_candidate.returnEVR()):
- package_list.remove(package)
-
-# Clean coredump_package_list:
-for coredump_package in coredump_package_list[:]:
- found = False
- for package in package_list:
- if str(package) == coredump_package or package.base_package_name == coredump_package:
- found = True
- break
- if not found:
- coredump_package_list.remove(coredump_package)
-
-#
-# Print names of found packages first, then a newline separator, and
-# then objects for which the packages were not found.
-#
-if len(coredump_package_list) == 1:
- print coredump_package_list[0]
-else:
- print "-"
-print
-for package in sorted(package_list):
- print str(package)
-print
-for path, build_id in missing_buildid_list:
- print "{0} {1}".format(path, build_id)
diff --git a/src/retrace/create.wsgi b/src/retrace/create.wsgi
deleted file mode 100644
index 7c5f81b4..00000000
--- a/src/retrace/create.wsgi
+++ /dev/null
@@ -1,111 +0,0 @@
-#!/usr/bin/python
-
-from retrace import *
-from tempfile import *
-
-def application(environ, start_response):
- request = Request(environ)
-
- if request.scheme != "https":
- return response(start_response, "403 Forbidden",
- "You must use HTTPS")
-
- if len(get_active_tasks()) >= CONFIG["MaxParallelTasks"]:
- return response(start_response, "503 Service Unavailable",
- "Retrace server is fully loaded at the moment")
-
- if request.method != "POST":
- return response(start_response, "405 Method Not Allowed",
- "You must use POST method")
-
- if not request.content_type in HANDLE_ARCHIVE.keys():
- return response(start_response, "415 Unsupported Media Type",
- "Specified archive format is not supported")
-
- if not request.content_length:
- return response(start_response, "411 Length Required",
- "You need to set Content-Length header properly")
-
- if request.content_length > CONFIG["MaxPackedSize"] * 1048576:
- return response(start_response, "413 Request Entity Too Large",
- "Specified archive is too large")
-
- if CONFIG["UseWorkDir"]:
- workdir = CONFIG["WorkDir"]
- else:
- workdir = CONFIG["SaveDir"]
-
- if not os.path.isdir(workdir):
- try:
- os.makedirs(workdir)
- except:
- return response(start_response, "500 Internal Server Error",
- "Unable to create working directory")
-
- space = free_space(workdir)
-
- if not space:
- return response(start_response, "500 Internal Server Error",
- "Unable to obtain disk free space")
-
- if space - request.content_length < CONFIG["MinStorageLeft"] * 1048576:
- return response(start_response, "507 Insufficient Storage",
- "There is not enough storage space on the server")
-
- try:
- archive = NamedTemporaryFile(mode="wb", delete=False, suffix=".tar.xz")
- archive.write(request.body)
- archive.close()
- except:
- return response(start_response, "500 Internal Server Error",
- "Unable to save archive")
-
- size = unpacked_size(archive.name, request.content_type)
- if not size:
- os.unlink(archive.name)
- return response(start_response, "500 Internal Server Error",
- "Unable to obtain unpacked size")
-
- if size > CONFIG["MaxUnpackedSize"] * 1048576:
- os.unlink(archive.name)
- return response(start_response, "413 Request Entity Too Large",
- "Specified archive's content is too large")
-
- if space - size < CONFIG["MinStorageLeft"] * 1048576:
- os.unlink(archive.name)
- return response(start_response, "507 Insufficient Storage",
- "There is not enough storage space on the server")
-
- taskid, taskpass, taskdir = new_task()
- if not taskid or not taskpass or not taskdir:
- return response(start_response, "500 Internal Server Error",
- "Unable to create new task")
-
- try:
- os.mkdir("%s/crash/" % taskdir)
- os.chdir("%s/crash/" % taskdir)
- unpack_retcode = unpack(archive.name, request.content_type)
- os.unlink(archive.name)
-
- if unpack_retcode != 0:
- raise Exception
- except:
- os.chdir("/")
- Popen(["rm", "-rf", taskdir])
- return response(start_response, "500 Internal Server Error",
- "Unable to unpack archive")
-
- files = os.listdir(".")
-
- for required_file in REQUIRED_FILES:
- if not required_file in files:
- os.chdir("/")
- Popen(["rm", "-rf", taskdir])
- return response(start_response, "403 Forbidden",
- "Required file \"%s\" is missing" % required_file)
-
- call(["/usr/bin/abrt-retrace-worker", "%d" % taskid])
-
- return response(start_response, "201 Created", "",
- [("X-Task-Id", "%d" % taskid),
- ("X-Task-Password", taskpass)])
diff --git a/src/retrace/install.sh b/src/retrace/install.sh
deleted file mode 100755
index d73a0b58..00000000
--- a/src/retrace/install.sh
+++ /dev/null
@@ -1,212 +0,0 @@
-#!/bin/bash
-
-ABRTDIR="/etc/abrt"
-LOGDIR="/var/log/abrt-retrace"
-REPODIR="/var/cache/abrt-retrace"
-SCRIPTDIR="/usr/share/abrt-retrace"
-SRCDIR="."
-WORKDIR="/var/spool/abrt-retrace"
-
-FILES="$SRCDIR/create.wsgi $SRCDIR/status.wsgi \
- $SRCDIR/log.wsgi $SRCDIR/backtrace.wsgi \
- $SRCDIR/retrace.py $SRCDIR/abrt-retrace-reposync \
- $SRCDIR/worker.py $SRCDIR/coredump2packages.py \
- $SRCDIR/abrt-retrace-cleanup.py"
-
-if [ ! $EUID = "0" ]
-then
- echo "You must run '$0' with root permissions."
- exit 1
-fi
-
-if ! rpm -q httpd > /dev/null 2>&1
-then
- echo "httpd package is required to install Retrace Server."
- exit 2
-fi
-
-if ! rpm -q mod_wsgi > /dev/null 2>&1
-then
- echo "mod_wsgi package is required to install Retrace Server"
- exit 3
-fi
-
-if ! rpm -q mod_ssl > /dev/null 2>&1
-then
- echo "mod_ssl package is required to install Retrace Server"
- exit 4
-fi
-
-if ! rpm -q python-webob > /dev/null 2>&1
-then
- echo "python-webob package is required to install Retrace Server"
- exit 5
-fi
-
-if ! rpm -q elfutils > /dev/null 2>&1
-then
- echo "elfutils package is required to install Retrace Server"
- exit 6
-fi
-
-if ! rpm -q createrepo > /dev/null 2>&1
-then
- echo "createrepo package is required to install Retrace Server"
- exit 7
-fi
-
-if ! rpm -q mock > /dev/null 2>&1
-then
- echo "mock package is required to install Retrace Server"
- exit 8
-fi
-
-if ! rpm -q xz > /dev/null 2>&1
-then
- echo "xz package is required to install Retrace Server"
- exit 9
-fi
-
-if ! rpm -q gcc > /dev/null 2>&1
-then
- echo "gcc package is required to install Retrace Server"
- exit 10
-fi
-
-if usermod -G mock root
-then
- echo "User 'root' added to 'mock' group"
-else
- echo "Unable to add user 'root' to group 'mock'"
- exit 11
-fi
-
-if [ ! -d "$ABRTDIR" ]
-then
- if mkdir "$ABRTDIR"
- then
- echo "Created directory '$ABRTDIR'"
- else
- echo "Error creating directory '$ABRTDIR'"
- exit 12
- fi
-fi
-
-if [ ! -d "$SCRIPTDIR" ]
-then
- if mkdir "$SCRIPTDIR"
- then
- echo "Created directory '$SCRIPTDIR'"
- else
- echo "Error creating directory '$SCRIPTDIR'"
- exit 13
- fi
-fi
-
-if [ ! -d "$WORKDIR" ]
-then
- if mkdir "$WORKDIR"
- then
- echo "Created directory '$WORKDIR'"
- if chown apache "$WORKDIR" && chgrp apache "$WORKDIR"
- then
- echo "$WORKDIR owner and group changed to 'apache'"
- else
- echo "$WORKDIR unable to change owner or group"
- exit 14
- fi
- else
- echo "Error creating directory '$WORKDIR'"
- exit 15
- fi
-fi
-
-if [ ! -d "$REPODIR" ]
-then
- if mkdir "$REPODIR"
- then
- echo "Created directory '$REPODIR'"
- else
- echo "Error creating directory '$REPODIR'"
- exit 16
- fi
-fi
-
-if [ ! -d "$LOGDIR" ]
-then
- if mkdir "$LOGDIR"
- then
- echo "Created directory '$LOGDIR'"
- else
- echo "Error creating directory '$LOGDIR'"
- exit 17
- fi
-fi
-
-if ! gcc -pedantic -Wall -Wextra -Werror -o "/usr/sbin/abrt-retrace-worker" "$SRCDIR/worker.c" \
- || ! chmod u+s "/usr/sbin/abrt-retrace-worker"
-then
- echo "Error compiling abrt-retrace-worker"
- exit 18
-fi
-
-echo "abrt-retrace-worker compiled"
-
-for FILE in $FILES
-do
- if cp "$FILE" "$SCRIPTDIR"
- then
- echo "Installed '$FILE'"
- else
- echo "Error installing '$FILE'"
- exit 19
- fi
-done
-
-if cp "$SRCDIR/retrace.conf" "/etc/abrt/retrace.conf"
-then
- echo "Copied '$SRCDIR/retrace.conf' to '/etc/abrt/retrace.conf'"
-else
- echo "Error copying '$SRCDIR/retrace.conf'"
- exit 23
-fi
-
-if cp "$SRCDIR/retrace.repo" "/etc/yum.repos.d/retrace.repo" \
- && cp "$SRCDIR/retrace-local.repo" "/etc/yum.repos.d/retrace-local.repo"
-then
- echo "Copied '$SRCDIR/retrace.repo' to '/etc/yum.repos.d/retrace.repo'"
- echo "Copied '$SRCDIR/retrace-local.repo' to '/etc/yum.repos.d/retrace-local.repo'"
- echo "Running initial repository download. This will take some time."
- "$SCRIPTDIR/abrt-retrace-reposync" fedora 14 i686
- createrepo "$REPODIR/fedora-14-i686" > /dev/null
- createrepo "$REPODIR/fedora-14-i686-debuginfo" > /dev/null
- "$SCRIPTDIR/abrt-retrace-reposync" fedora 14 x86_64
- createrepo "$REPODIR/fedora-14-x86_64" > /dev/null
- createrepo "$REPODIR/fedora-14-x86_64-debuginfo" > /dev/null
- "$SCRIPTDIR/abrt-retrace-reposync" fedora 15 i686
-# createrepo "$REPODIR/fedora-15-i686"
-# createrepo "$REPODIR/fedora-15-i686-debuginfo"
- "$SCRIPTDIR/abrt-retrace-reposync" fedora 15 x86_64
-# createrepo "$REPODIR/fedora-15-x86_64"
-# createrepo "$REPODIR/fedora-15-x86_64-debuginfo"
-else
- echo "Error copying '$SRCDIR/retrace.repo' or '$SRCDIR/retrace-local.repo'"
- exit 24
-fi
-
-if cp "$SRCDIR/retrace_httpd.conf" "/etc/httpd/conf.d/retrace.conf"
-then
- echo "Copied '$SRCDIR/retrace_httpd.conf' to '/etc/httpd/conf.d/retrace.conf'"
- service httpd restart
-else
- echo "Error copying '$SRCDIR/retrace_httpd.conf'"
- exit 25
-fi
-
-echo
-echo "Retrace Server setup OK."
-echo "You should set up cron to periodically synchronize local repositories. The recommended configuration is:"
-echo "0 0,8,16 * * * $SCRIPTDIR/abrt-retrace-reposync fedora 14 i686"
-echo "0 2,10,18 * * * $SCRIPTDIR/abrt-retrace-reposync fedora 14 x86_64"
-echo "0 4,12,20 * * * $SCRIPTDIR/abrt-retrace-reposync fedora 15 i686"
-echo "0 6,14,22 * * * $SCRIPTDIR/abrt-retrace-reposync fedora 15 x86_64"
diff --git a/src/retrace/log.wsgi b/src/retrace/log.wsgi
deleted file mode 100644
index 5bdc4ffb..00000000
--- a/src/retrace/log.wsgi
+++ /dev/null
@@ -1,46 +0,0 @@
-#!/usr/bin/python
-
-from retrace import *
-
-def application(environ, start_response):
- request = Request(environ)
-
- match = URL_PARSER.match(request.script_name)
- if not match:
- return response(start_response, "404 Not Found",
- "Invalid URL")
-
- taskdir = "%s/%s" % (CONFIG["SaveDir"], match.group(1))
-
- if not os.path.isdir(taskdir):
- return response(start_response, "404 Not Found",
- "There is no such task")
-
- pwdpath = "%s/password" % taskdir
- try:
- pwdfile = open(pwdpath, "r")
- pwd = pwdfile.read()
- pwdfile.close()
- except:
- return response(start_response, "500 Internal Server Error",
- "Unable to verify password")
-
- if not "X-Task-Password" in request.headers or \
- request.headers["X-Task-Password"] != pwd:
- return response(start_response, "403 Forbidden",
- "Invalid password")
-
- logpath = "%s/retrace_log" % taskdir
- if not os.path.isfile(logpath):
- return response(start_response, "404 Not Found",
- "There is no log for the specified task")
-
- try:
- logfile = open(logpath, "r")
- output = logfile.read()
- logfile.close()
- except:
- return response(start_response, "500 Internal Server Error",
- "Unable to read log file")
-
- return response(start_response, "200 OK", output)
diff --git a/src/retrace/plugins/Makefile.am b/src/retrace/plugins/Makefile.am
deleted file mode 100644
index 251efbff..00000000
--- a/src/retrace/plugins/Makefile.am
+++ /dev/null
@@ -1,2 +0,0 @@
-plugins_PYTHON = __init__.py fedora.py
-pluginsdir = $(datadir)/abrt-retrace/plugins
diff --git a/src/retrace/plugins/__init__.py b/src/retrace/plugins/__init__.py
deleted file mode 100644
index 5c041b0c..00000000
--- a/src/retrace/plugins/__init__.py
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/usr/bin/python
-
-import os
-
-PLUGIN_DIR = "/usr/share/abrt-retrace/plugins"
-PLUGINS = []
-
-try:
- files = os.listdir(PLUGIN_DIR)
-except Exception as ex:
- print "Unable to list directory '%s': %s" % (PLUGIN_DIR, ex)
- raise ImportError, ex
-
-for filename in files:
- if not filename.startswith("_") and filename.endswith(".py"):
- pluginname = filename.replace(".py", "")
- try:
- this = __import__("%s.%s" % (__name__, pluginname))
- except:
- continue
-
- plugin = this.__getattribute__(pluginname)
- if plugin.__dict__.has_key("distribution") and plugin.__dict__.has_key("repos"):
- PLUGINS.append(plugin)
diff --git a/src/retrace/plugins/fedora.py b/src/retrace/plugins/fedora.py
deleted file mode 100644
index 6bf2f7b9..00000000
--- a/src/retrace/plugins/fedora.py
+++ /dev/null
@@ -1,29 +0,0 @@
-#!/usr/bin/python
-
-import re
-
-distribution = "fedora"
-abrtparser = re.compile("^Fedora release ([0-9]+) \(([^\)]+)\)$")
-guessparser = re.compile("\.fc([0-9]+)")
-repos = [
- [
- "rsync://ftp.sh.cvut.cz/fedora/linux/releases/$VER/Everything/$ARCH/os/Packages/*",
- "rsync://ftp.sh.cvut.cz/fedora/linux/development/$VER/$ARCH/os/Packages/*",
- ],
- [
- "rsync://ftp.sh.cvut.cz/fedora/linux/releases/$VER/Everything/$ARCH/debug/*",
- "rsync://ftp.sh.cvut.cz/fedora/linux/development/$VER/$ARCH/debug/*",
- ],
- [
- "rsync://ftp.sh.cvut.cz/fedora/linux/updates/$VER/$ARCH/*",
- ],
- [
- "rsync://ftp.sh.cvut.cz/fedora/linux/updates/$VER/$ARCH/debug/*",
- ],
- [
- "rsync://ftp.sh.cvut.cz/fedora/linux/updates/testing/$VER/$ARCH/*",
- ],
- [
- "rsync://ftp.sh.cvut.cz/fedora/linux/updates/testing/$VER/$ARCH/debug/*",
- ],
-]
diff --git a/src/retrace/retrace.conf b/src/retrace/retrace.conf
deleted file mode 100644
index e97e7cfc..00000000
--- a/src/retrace/retrace.conf
+++ /dev/null
@@ -1,33 +0,0 @@
-[retrace]
-# Maximum tasks running at one moment
-MaxParallelTasks = 5
-
-# Maximum size of archive uploaded by user (MB)
-MaxPackedSize = 50
-
-# Maximum size of archive contents (MB)
-MaxUnpackedSize = 1024
-
-# Minimal storage left on WorkDir FS after unpacking archive (MB)
-MinStorageLeft = 1024
-
-# Delete old tasks after (hours)
-DeleteTaskAfter = 120
-
-# SQLite statistics DB filename
-DBFile = stats.db
-
-# Log directory
-LogDir = /var/log/abrt-retrace
-
-# Local repos directory
-RepoDir = /var/cache/abrt-retrace
-
-# Directory where the crashes and results are saved
-SaveDir = /var/spool/abrt-retrace
-
-# Whether to use explicit working directory, otherwise SaveDir is used
-UseWorkDir = 0
-
-# Working directory
-WorkDir = /tmp/abrt-retrace
diff --git a/src/retrace/retrace.py b/src/retrace/retrace.py
deleted file mode 100644
index 56bfd46e..00000000
--- a/src/retrace/retrace.py
+++ /dev/null
@@ -1,412 +0,0 @@
-#!/usr/bin/python
-
-import os
-import re
-import ConfigParser
-import random
-import sqlite3
-from webob import Request
-from subprocess import *
-
-REQUIRED_FILES = ["coredump", "executable", "package"]
-
-DF_BIN = "/bin/df"
-DU_BIN = "/usr/bin/du"
-GZIP_BIN = "/usr/bin/gzip"
-TAR_BIN = "/bin/tar"
-XZ_BIN = "/usr/bin/xz"
-
-TASKID_PARSER = re.compile("^.*/([0-9]+)/*$")
-PACKAGE_PARSER = re.compile("^(.+)-([0-9]+(\.[0-9]+)*-[0-9]+)\.([^-]+)$")
-DF_OUTPUT_PARSER = re.compile("^([^ ^\t]*)[ \t]+([0-9]+)[ \t]+([0-9]+)[ \t]+([0-9]+)[ \t]+([0-9]+%)[ \t]+(.*)$")
-DU_OUTPUT_PARSER = re.compile("^([0-9]+)")
-URL_PARSER = re.compile("^/([0-9]+)/?")
-WORKER_RUNNING_PARSER = re.compile("^[ \t]*([0-9]+)[ \t]+[0-9]+[ \t]+([^ ^\t]+)[ \t]+.*abrt-retrace-worker ([0-9]+)$")
-
-HANDLE_ARCHIVE = {
- "application/x-xz-compressed-tar": {
- "unpack": [TAR_BIN, "xJf"],
- "size": ([XZ_BIN, "--list", "--robot"], re.compile("^totals[ \t]+[0-9]+[ \t]+[0-9]+[ \t]+[0-9]+[ \t]+([0-9]+).*")),
- },
-
- "application/x-gzip": {
- "unpack": [TAR_BIN, "xzf"],
- "size": ([GZIP_BIN, "--list"], re.compile("^[^0-9]*[0-9]+[^0-9]+([0-9]+).*$")),
- },
-
- "application/x-tar": {
- "unpack": [TAR_BIN, "xf"],
- "size": (["ls", "-l"], re.compile("^[ \t]*[^ ^\t]+[ \t]+[^ ^\t]+[ \t]+[^ ^\t]+[ \t]+[^ ^\t]+[ \t]+([0-9]+).*$")),
- },
-}
-
-TASKPASS_ALPHABET = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
-
-CONFIG_FILE = "/etc/abrt/retrace.conf"
-CONFIG = {
- "TaskIdLength": 9,
- "TaskPassLength": 32,
- "MaxParallelTasks": 10,
- "MaxPackedSize": 30,
- "MaxUnpackedSize": 600,
- "MinStorageLeft": 10240,
- "DeleteTaskAfter": 120,
- "LogDir": "/var/log/abrt-retrace",
- "RepoDir": "/var/cache/abrt-retrace",
- "SaveDir": "/var/spool/abrt-retrace",
- "WorkDir": "/tmp/abrt-retrace",
- "UseWorkDir": False,
- "DBFile": "stats.db",
-}
-
-STATUS_ANALYZE, STATUS_INIT, STATUS_BACKTRACE, STATUS_CLEANUP, \
-STATUS_STATS, STATUS_FINISHING, STATUS_SUCCESS, STATUS_FAIL = xrange(8)
-
-STATUS = [
- "Analyzing crash data",
- "Initializing virtual root",
- "Generating backtrace",
- "Cleaning up virtual root",
- "Saving crash statistics",
- "Finishing task",
- "Retrace job finished successfully",
- "Retrace job failed",
-]
-
-
-def lock(lockfile):
- try:
- if not os.path.isfile(lockfile):
- open(lockfile, "w").close()
- except:
- return False
-
- return True
-
-def unlock(lockfile):
- try:
- if os.path.getsize(lockfile) == 0:
- os.unlink(lockfile)
- except:
- return False
-
- return True
-
-def read_config():
- parser = ConfigParser.ConfigParser()
- parser.read(CONFIG_FILE)
- for key in CONFIG.keys():
- vartype = type(CONFIG[key])
- if vartype is int:
- get = parser.getint
- elif vartype is bool:
- get = parser.getboolean
- elif vartype is float:
- get = parser.getfloat
- else:
- get = parser.get
-
- try:
- CONFIG[key] = get("retrace", key)
- except:
- pass
-
-def free_space(path):
- pipe = Popen([DF_BIN, path], stdout=PIPE).stdout
- for line in pipe.readlines():
- match = DF_OUTPUT_PARSER.match(line)
- if match:
- pipe.close()
- return 1024 * int(match.group(4))
-
- pipe.close()
- return None
-
-def dir_size(path):
- pipe = Popen([DU_BIN, "-s", path], stdout=PIPE).stdout
- for line in pipe.readlines():
- match = DU_OUTPUT_PARSER.match(line)
- if match:
- pipe.close()
- return 1024 * int(match.group(1))
-
- pipe.close()
- return 0
-
-def unpacked_size(archive, mime):
- command, parser = HANDLE_ARCHIVE[mime]["size"]
- pipe = Popen(command + [archive], stdout=PIPE).stdout
- for line in pipe.readlines():
- match = parser.match(line)
- if match:
- pipe.close()
- return int(match.group(1))
-
- pipe.close()
- return None
-
-def guess_arch(coredump_path):
- pipe = Popen(["file", coredump_path], stdout=PIPE).stdout
- output = pipe.read()
- pipe.close()
-
- if "x86-64" in output:
- return "x86_64"
-
- if "80386" in output:
- return "i386"
-
- return None
-
-def guess_release(package):
- for plugin in PLUGINS:
- match = plugin.guessparser.search(package)
- if match:
- return plugin.distribution, match.group(1)
-
- return None, None
-
-def run_gdb(savedir):
- try:
- exec_file = open("%s/crash/executable" % savedir, "r")
- executable = exec_file.read().replace("'", "").replace("\"", "")
- exec_file.close()
- except:
- return ""
-
- mockr = "../../%s/mock" % savedir
-
- chmod = Popen(["mock", "shell", "-r", mockr, "--",
- "/bin/chmod", "777", executable])
- if chmod.wait() != 0:
- return ""
-
- pipe = Popen(["mock", "shell", "-r", mockr, "--",
- "su", "mockbuild", "-c",
- "\" gdb -batch"
- " -ex 'file %s'"
- " -ex 'core-file /var/spool/abrt/crash/coredump'"
- " -ex 'thread apply all backtrace 2048 full'"
- " -ex 'info sharedlib'"
- " -ex 'print (char*)__abort_msg'"
- " -ex 'print (char*)__glib_assert_msg'"
- " -ex 'info registers'"
- " -ex 'disassemble' \"" % executable,
- # redirect GDB's stderr, ignore mock's stderr
- "2>&1"], stdout=PIPE).stdout
-
- backtrace = pipe.read()
- pipe.close()
-
- return backtrace
-
-def gen_task_password(taskdir):
- generator = random.SystemRandom()
- taskpass = ""
- for j in xrange(CONFIG["TaskPassLength"]):
- taskpass += generator.choice(TASKPASS_ALPHABET)
-
- try:
- passfile = open("%s/password" % taskdir, "w")
- passfile.write(taskpass)
- passfile.close()
- except:
- return None
-
- return taskpass
-
-def get_task_est_time(taskdir):
- return 180
-
-def new_task():
- i = 0
- newdir = CONFIG["SaveDir"]
- while os.path.exists(newdir) and i < 50:
- i += 1
- taskid = random.randint(pow(10, CONFIG["TaskIdLength"] - 1), pow(10, CONFIG["TaskIdLength"]) - 1)
- newdir = "%s/%d" % (CONFIG["SaveDir"], taskid)
-
- try:
- os.mkdir(newdir)
- taskpass = gen_task_password(newdir)
- if not taskpass:
- Popen(["rm", "-rf", newdir])
- raise Exception
-
- return taskid, taskpass, newdir
- except:
- return None, None, None
-
-def unpack(archive, mime):
- pipe = Popen(HANDLE_ARCHIVE[mime]["unpack"] + [archive])
- pipe.wait()
- return pipe.returncode
-
-def response(start_response, status, body="", extra_headers=[]):
- start_response(status, [("Content-Type", "text/plain"), ("Content-Length", "%d" % len(body))] + extra_headers)
- return [body]
-
-def get_active_tasks():
- tasks = []
- if CONFIG["UseWorkDir"]:
- tasksdir = CONFIG["WorkDir"]
- else:
- tasksdir = CONFIG["SaveDir"]
-
- for filename in os.listdir(tasksdir):
- if len(filename) != CONFIG["TaskIdLength"]:
- continue
-
- try:
- taskid = int(filename)
- except:
- continue
-
- path = "%s/%s" % (tasksdir, filename)
- if os.path.isdir(path) and not os.path.isfile("%s/retrace_log" % path):
- tasks.append(taskid)
-
- return tasks
-
-def run_ps():
- pipe = Popen(["ps", "-eo", "pid,ppid,etime,cmd"], stdout=PIPE).stdout
- lines = pipe.readlines()
- pipe.close()
-
- return lines
-
-def get_running_tasks(ps_output=None):
- if not ps_output:
- ps_output = run_ps()
-
- result = []
-
- for line in ps_output:
- match = WORKER_RUNNING_PARSER.match(line)
- if match:
- result.append((int(match.group(1)), int(match.group(3)), match.group(2)))
-
- return result
-
-def get_process_tree(pid, ps_output):
- result = [pid]
-
- parser = re.compile("^([0-9]+)[ \t]+(%d).*$" % pid)
-
- for line in ps_output:
- match = parser.match(line)
- if match:
- pid = int(match.group(1))
- result.extend(get_process_tree(pid, ps_output))
-
- return result
-
-def kill_process_and_childs(process_id, ps_output=None):
- result = True
-
- if not ps_output:
- ps_output = run_ps()
-
- for pid in get_process_tree(process_id, ps_output):
- try:
- os.kill(pid, 9)
- except OSError, ex:
- result = False
-
- return result
-
-def cleanup_task(taskid, gc=True):
- null = open("/dev/null", "w")
-
- savedir = "%s/%d" % (CONFIG["SaveDir"], taskid)
- if os.path.isfile("%s/mock.cfg" % savedir):
- call(["mock", "-r", "../../%s/mock" % savedir, "--scrub=all"],
- stdout=null, stderr=null)
-
- call(["rm", "-rf", "%s/crash" % savedir, "%s/mock.cfg" % savedir],
- stdout=null, stderr=null)
-
- rawlog = "%s/log" % savedir
- newlog = "%s/retrace_log" % savedir
- if os.path.isfile(rawlog):
- try:
- os.rename(rawlog, newlog)
- except:
- pass
-
- if gc:
- try:
- log = open(newlog, "a")
- log.write("Killed by garbage collector\n")
- log.close()
- except:
- pass
-
- null.close()
-
-def init_crashstats_db():
- try:
- con = sqlite3.connect("%s/%s" % (CONFIG["SaveDir"], CONFIG["DBFile"]))
- query = con.cursor()
- query.execute("""
- CREATE TABLE IF NOT EXISTS
- retracestats(
- taskid INT NOT NULL,
- package VARCHAR(255) NOT NULL,
- version VARCHAR(16) NOT NULL,
- release VARCHAR(16) NOT NULL,
- arch VARCHAR(8) NOT NULL,
- starttime INT NOT NULL,
- duration INT NOT NULL,
- prerunning TINYINT NOT NULL,
- postrunning TINYINT NOT NULL,
- chrootsize BIGINT NOT NULL
- )
- """)
- con.commit()
- con.close()
-
- return True
- except:
- return False
-
-def save_crashstats(crashstats):
- try:
- con = sqlite3.connect("%s/%s" % (CONFIG["SaveDir"], CONFIG["DBFile"]))
- query = con.cursor()
- query.execute("""
- INSERT INTO retracestats(taskid, package, version, release, arch,
- starttime, duration, prerunning, postrunning, chrootsize)
- VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
- """,
- (crashstats["taskid"], crashstats["package"], crashstats["version"],
- crashstats["release"], crashstats["arch"], crashstats["starttime"],
- crashstats["duration"], crashstats["prerunning"],
- crashstats["postrunning"], crashstats["chrootsize"])
- )
- con.commit()
- con.close()
-
- return True
- except:
- return False
-
-class logger():
- def __init__(self, taskid):
- "Starts logging into savedir."
- self._logfile = open("%s/%s/log" % (CONFIG["SaveDir"], taskid), "w")
-
- def write(self, msg):
- "Writes msg into log file."
- if not self._logfile.closed:
- self._logfile.write(msg)
- self._logfile.flush()
-
- def close(self):
- "Finishes logging and renames file to retrace_log."
- if not self._logfile.closed:
- self._logfile.close()
- os.rename(self._logfile.name, self._logfile.name.replace("/log", "/retrace_log"))
-
-### read config on import ###
-read_config()
diff --git a/src/retrace/retrace.repo b/src/retrace/retrace.repo
deleted file mode 100644
index ccc224be..00000000
--- a/src/retrace/retrace.repo
+++ /dev/null
@@ -1,23 +0,0 @@
-[retrace-fedora-14-i386]
-name=Fedora 14 - i386
-failovermethod=priority
-baseurl=file:///var/cache/abrt-retrace/fedora-14-i386/
-enabled=0
-
-[retrace-fedora-14-x86_64]
-name=Fedora 14 - x86_64
-failovermethod=priority
-baseurl=file:///var/cache/abrt-retrace/fedora-14-x86_64/
-enabled=0
-
-[retrace-fedora-15-i386]
-name=Fedora 15 - i386
-failovermethod=priority
-baseurl=file:///var/cache/abrt-retrace/fedora-15-i386/
-enabled=0
-
-[retrace-fedora-15-x86_64]
-name=Fedora 15 - x86_64
-failovermethod=priority
-baseurl=file:///var/cache/abrt-retrace/fedora-15-x86_64/
-enabled=0
diff --git a/src/retrace/retrace_httpd.conf b/src/retrace/retrace_httpd.conf
deleted file mode 100644
index bd282471..00000000
--- a/src/retrace/retrace_httpd.conf
+++ /dev/null
@@ -1,5 +0,0 @@
-WSGIScriptAliasMatch ^/settings$ /usr/share/abrt-retrace/settings.wsgi
-WSGIScriptAliasMatch ^/create$ /usr/share/abrt-retrace/create.wsgi
-WSGIScriptAliasMatch ^/[0-9]+/?$ /usr/share/abrt-retrace/status.wsgi
-WSGIScriptAliasMatch ^/[0-9]+/log$ /usr/share/abrt-retrace/log.wsgi
-WSGIScriptAliasMatch ^/[0-9]+/backtrace$ /usr/share/abrt-retrace/backtrace.wsgi
diff --git a/src/retrace/settings.wsgi b/src/retrace/settings.wsgi
deleted file mode 100644
index 9c906c49..00000000
--- a/src/retrace/settings.wsgi
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/usr/bin/python
-
-from retrace import *
-
-def application(environ, start_response):
- formats = ""
- for format in HANDLE_ARCHIVE.keys():
- formats += " %s" % format
-
- output = [
- "running_tasks %d" % len(get_active_tasks()),
- "max_running_tasks %d" % CONFIG["MaxParallelTasks"],
- "max_packed_size %d" % CONFIG["MaxPackedSize"],
- "max_unpacked_size %d" % CONFIG["MaxUnpackedSize"],
- "supported_formats%s" % formats,
- ]
-
- return response(start_response, "200 OK", "\n".join(output))
diff --git a/src/retrace/status.wsgi b/src/retrace/status.wsgi
deleted file mode 100644
index 50334772..00000000
--- a/src/retrace/status.wsgi
+++ /dev/null
@@ -1,49 +0,0 @@
-#!/usr/bin/python
-
-from retrace import *
-
-def application(environ, start_response):
- request = Request(environ)
-
- match = URL_PARSER.match(request.script_name)
- if not match:
- return response(start_response, "404 Not Found",
- "Invalid URL")
-
- taskdir = "%s/%s" % (CONFIG["SaveDir"], match.group(1))
-
- if not os.path.isdir(taskdir):
- return response(start_response, "404 Not Found",
- "There is no such task")
-
- pwdpath = "%s/password" % taskdir
- try:
- pwdfile = open(pwdpath, "r")
- pwd = pwdfile.read()
- pwdfile.close()
- except:
- return response(start_response, "500 Internal Server Error",
- "Unable to verify password")
-
- if not "X-Task-Password" in request.headers or \
- request.headers["X-Task-Password"] != pwd:
- return response(start_response, "403 Forbidden",
- "Invalid password")
-
- status = "PENDING"
- if os.path.isfile("%s/retrace_log" % taskdir):
- if os.path.isfile("%s/retrace_backtrace" % taskdir):
- status = "FINISHED_SUCCESS"
- else:
- status = "FINISHED_FAILURE"
-
- statusmsg = status
- try:
- statusfile = open("%s/status" % taskdir, "r")
- statusmsg = statusfile.read()
- statusfile.close()
- except:
- pass
-
- return response(start_response, "200 OK",
- statusmsg, [("X-Task-Status", status)])
diff --git a/src/retrace/worker.c b/src/retrace/worker.c
deleted file mode 100644
index a49f74c3..00000000
--- a/src/retrace/worker.c
+++ /dev/null
@@ -1,77 +0,0 @@
-#include <stdio.h>
-#include <ctype.h>
-#include <pwd.h>
-#include <stdlib.h>
-#include <unistd.h>
-
-/*
- Launches Retrace Server worker (worker.py) with root permissions.
- Binary needs to be owned by root and needs to set SUID bit.
-*/
-
-int main(int argc, char **argv)
-{
- char command[256];
- FILE *pipe;
- int i;
- struct passwd *apache_user;
- const char *apache_username = "apache";
- pid_t pid;
-
- if (argc != 2)
- {
- fprintf(stderr, "Usage: %s task_id\n", argv[0]);
- return 1;
- }
-
- if (setuid(0) != 0)
- {
- fprintf(stderr, "You must run %s with root permissions.\n", argv[0]);
- return 2;
- }
-
- for (i = 0; argv[1][i]; ++i)
- if (!isdigit(argv[1][i]))
- {
- fputs("Task ID may only contain digits.", stderr);
- return 3;
- }
-
- apache_user = getpwnam(apache_username);
- if (!apache_user)
- {
- fprintf(stderr, "User \"%s\" not found.\n", apache_username);
- return 4;
- }
-
- sprintf(command, "%d", apache_user->pw_uid);
-
- setenv("SUDO_USER", apache_username, 1);
- setenv("SUDO_UID", command, 1);
- /* required by mock to be able to write into result directory */
- setenv("SUDO_GID", "0", 1);
-
- /* fork and launch worker.py */
- pid = fork();
-
- if (pid < 0)
- {
- fputs("Unable to fork.", stderr);
- return 6;
- }
-
- /* parent - exit */
- if (pid > 0)
- return 0;
-
- /* child */
- sprintf(command, "/usr/bin/python /usr/share/abrt-retrace/worker.py \"%s\"", argv[1]);
- pipe = popen(command, "r");
- if (pipe == NULL)
- {
- fputs("Unable to run 'worker.py'.", stderr);
- return 5;
- }
-
- return pclose(pipe) >> 8;
-}
diff --git a/src/retrace/worker.py b/src/retrace/worker.py
deleted file mode 100755
index 24defa66..00000000
--- a/src/retrace/worker.py
+++ /dev/null
@@ -1,305 +0,0 @@
-#!/usr/bin/python
-
-import sys
-import time
-from retrace import *
-
-sys.path = ["/usr/share/abrt-retrace/"] + sys.path
-from plugins import *
-
-LOG = None
-taskid = None
-
-def set_status(statusid):
- "Sets status for the task"
- if not LOG or not taskid:
- return
-
- filepath = "%s/%s/status" % (CONFIG["SaveDir"], taskid)
- try:
- statusfile = open(filepath, "w")
- statusfile.write(STATUS[statusid])
- statusfile.close()
- except:
- pass
-
- LOG.write("%s " % STATUS[statusid])
-
-def fail(exitcode):
- "Kills script with given exitcode"
- set_status(STATUS_FAIL)
- LOG.close()
- cleanup_task(int(taskid), False)
- sys.exit(exitcode)
-
-def retrace_run(errorcode, cmd):
- "Runs cmd using subprocess.Popen and kills script with errorcode on failure"
- try:
- process = Popen(cmd, stdout=PIPE, stderr=STDOUT)
- process.wait()
- output = process.stdout.read()
- process.stdout.close()
- except Exception as ex:
- process = None
- output = "An unhandled exception occured: %s" % ex
-
- if not process or process.returncode != 0:
- LOG.write("Error %d:\n=== OUTPUT ===\n%s\n" % (errorcode, output))
- fail(errorcode)
-
- return output
-
-if __name__ == "__main__":
- starttime = time.time()
-
- if len(sys.argv) != 2:
- sys.stderr.write("Usage: %s task_id\n" % sys.argv[0])
- sys.exit(11)
-
- taskid = sys.argv[1]
- try:
- taskid_int = int(sys.argv[1])
- except:
- sys.stderr.write("Task ID may only contain digits.\n")
- sys.exit(12)
-
- savedir = workdir = "%s/%s" % (CONFIG["SaveDir"], taskid)
-
- if CONFIG["UseWorkDir"]:
- workdir = "%s/%s" % (CONFIG["WorkDir"], taskid)
-
- if not os.path.isdir(savedir):
- sys.stderr.write("Task '%s' does not exist.\n" % taskid)
- sys.exit(13)
-
- try:
- LOG = logger(taskid)
- except Exception as ex:
- sys.stderr.write("Unable to start logging for task '%s': %s.\n" % (taskid, ex))
- sys.exit(14)
-
- set_status(STATUS_ANALYZE)
-
- # check the crash directory for required files
- for required_file in REQUIRED_FILES:
- if not os.path.isfile("%s/crash/%s" % (savedir, required_file)):
- LOG.write("Crash directory does not contain required file '%s'.\n" % required_file)
- fail(15)
-
- # read architecture from coredump
- arch = guess_arch("%s/crash/coredump" % savedir)
-
- if not arch:
- LOG.write("Unable to read architecture from 'coredump' file.\n")
- fail(16)
-
- # read package file
- try:
- package_file = open("%s/crash/package" % savedir, "r")
- crash_package = package_file.read()
- package_file.close()
- except Exception as ex:
- LOG.write("Unable to read crash package from 'package' file: %s.\n" % ex)
- fail(17)
-
- # read release, distribution and version from release file
- release_path = "%s/crash/os_release" % savedir
- if not os.path.isfile(release_path):
- release_path = "%s/crash/release" % savedir
-
- try:
- release_file = open(release_path, "r")
- release = release_file.read()
- release_file.close()
-
- version = distribution = None
- for plugin in PLUGINS:
- match = plugin.abrtparser.match(release)
- if match:
- version = match.group(1)
- distribution = plugin.distribution
- break
-
- if not version or not distribution:
- raise Exception, "Release '%s' is not supported.\n" % release
-
- except Exception as ex:
- LOG.write("Unable to read distribution and version from 'release' file: %s.\n" % ex)
- LOG.write("Trying to guess distribution and version... ")
- distribution, version = guess_release(crash_package)
- if distribution and version:
- LOG.write("%s-%s\n" % (distribution, version))
- else:
- LOG.write("Failure\n")
- fail(18)
-
- # read package file
- try:
- package_file = open("%s/crash/package" % savedir, "r")
- crash_package = package_file.read()
- package_file.close()
- except Exception as ex:
- LOG.write("Unable to read crash package from 'package' file: %s.\n" % ex)
- fail(19)
-
- packages = crash_package
-
- # read required packages from coredump
- try:
- # ToDo: deal with not found build-ids
- pipe = Popen(["coredump2packages", "%s/crash/coredump" % savedir,
- "--repos=retrace-%s-%s-%s*" % (distribution, version, arch)],
- stdout=PIPE).stdout
- section = 0
- crash_package_or_component = None
- for line in pipe.readlines():
- if line == "\n":
- section += 1
- continue
- elif 0 == section:
- crash_package_or_component = line.strip()
- elif 1 == section:
- packages += " %s" % line.rstrip("\n")
- elif 2 == section:
- # Missing build ids
- pass
- pipe.close()
- except Exception as ex:
- LOG.write("Unable to obtain packages from 'coredump' file: %s.\n" % ex)
- fail(20)
-
- # create mock config file
- try:
- mockcfg = open("%s/mock.cfg" % savedir, "w")
- mockcfg.write("config_opts['root'] = '%s'\n" % taskid)
- mockcfg.write("config_opts['target_arch'] = '%s'\n" % arch)
- mockcfg.write("config_opts['chroot_setup_cmd'] = '--skip-broken install %s shadow-utils gdb rpm'\n" % packages)
- mockcfg.write("config_opts['plugin_conf']['ccache_enable'] = False\n")
- mockcfg.write("config_opts['plugin_conf']['yum_cache_enable'] = False\n")
- mockcfg.write("config_opts['plugin_conf']['root_cache_enable'] = False\n")
- mockcfg.write("\n")
- mockcfg.write("config_opts['yum.conf'] = \"\"\"\n")
- mockcfg.write("[main]\n")
- mockcfg.write("cachedir=/var/cache/yum\n")
- mockcfg.write("debuglevel=1\n")
- mockcfg.write("reposdir=/dev/null\n")
- mockcfg.write("logfile=/var/log/yum.log\n")
- mockcfg.write("retries=20\n")
- mockcfg.write("obsoletes=1\n")
- mockcfg.write("gpgcheck=0\n")
- mockcfg.write("assumeyes=1\n")
- mockcfg.write("syslog_ident=mock\n")
- mockcfg.write("syslog_device=\n")
- mockcfg.write("\n")
- mockcfg.write("#repos\n")
- mockcfg.write("\n")
- mockcfg.write("[%s]\n" % distribution)
- mockcfg.write("name=%s\n" % distribution)
- mockcfg.write("baseurl=file://%s/%s-%s-%s/\n" % (CONFIG["RepoDir"], distribution, version, arch))
- mockcfg.write("failovermethod=priority\n")
- mockcfg.write("\"\"\"\n")
- mockcfg.close()
- except Exception as ex:
- LOG.write("Unable to create mock config file: %s.\n" % ex)
- fail(21)
-
- LOG.write("OK\n")
-
- # get count of tasks running before starting
- prerunning = len(get_active_tasks()) - 1
-
- # run retrace
- mockr = "../../%s/mock" % savedir
-
- set_status(STATUS_INIT)
-
- retrace_run(25, ["mock", "init", "-r", mockr])
- retrace_run(26, ["mock", "-r", mockr, "--copyin", "%s/crash" % savedir, "/var/spool/abrt/crash"])
- retrace_run(27, ["mock", "-r", mockr, "shell", "--", "chgrp", "-R", "mockbuild", "/var/spool/abrt/crash"])
-
- LOG.write("OK\n")
-
- # generate backtrace
- set_status(STATUS_BACKTRACE)
-
- backtrace = run_gdb(savedir)
-
- if not backtrace:
- LOG.write("Error\n")
- fail(29)
-
- try:
- bt_file = open("%s/backtrace" % savedir, "w")
- bt_file.write(backtrace)
- bt_file.close()
- except Exception as ex:
- LOG.write("Error: %s.\n" % ex)
- fail(30)
-
- LOG.write("OK\n")
-
- chroot_size = dir_size("%s/chroot/root" % workdir)
-
- # clean up temporary data
- set_status(STATUS_CLEANUP)
-
- retrace_run(31, ["mock", "-r", mockr, "--scrub=all"])
- retrace_run(32, ["rm", "-rf", "%s/mock.cfg" % savedir, "%s/crash" % savedir])
-
- # ignore error: workdir = savedir => workdir is not empty
- if CONFIG["UseWorkDir"]:
- try:
- os.rmdir(workdir)
- except:
- pass
-
- LOG.write("OK\n")
-
- # save crash statistics
- set_status(STATUS_STATS)
-
- duration = int(time.time() - starttime)
-
- package_match = PACKAGE_PARSER.match(crash_package)
- if not package_match:
- package = crash_package
- version = "unknown"
- release = "unknown"
- else:
- package = package_match.group(1)
- version = package_match.group(2)
- release = package_match.group(4)
-
- crashstats = {
- "taskid": taskid_int,
- "package": package,
- "version": version,
- "release": release,
- "arch": arch,
- "starttime": int(starttime),
- "duration": duration,
- "prerunning": prerunning,
- "postrunning": len(get_active_tasks()) - 1,
- "chrootsize": chroot_size
- }
-
- if not init_crashstats_db() or not save_crashstats(crashstats):
- LOG.write("Error: %s\n" % crashstats)
- else:
- LOG.write("OK\n")
-
- # publish backtrace and log
- set_status(STATUS_FINISHING)
-
- try:
- os.rename("%s/backtrace" % savedir, "%s/retrace_backtrace" % savedir)
- except Exception as ex:
- LOG.write("Error: %s\n" % ex)
- fail(35)
-
- LOG.write("OK\n")
- LOG.write("Retrace took %d seconds.\n" % duration)
-
- set_status(STATUS_SUCCESS)
- LOG.write("\n")
- LOG.close()