summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJiri Moskovcak <jmoskovc@redhat.com>2011-05-13 09:52:51 +0200
committerJiri Moskovcak <jmoskovc@redhat.com>2011-05-13 09:52:51 +0200
commit3566c737ecc35b17a09430eca8b6cb5fcc187245 (patch)
tree30cf569ba80d12afb158baba9c6257639eeead7a
parent7cfbac7966d99568565abea25a57522288d9a279 (diff)
parente123c5f3b4bdd10f3b495a4a948f6c452ed6205f (diff)
downloadabrt-3566c737ecc35b17a09430eca8b6cb5fcc187245.tar.gz
abrt-3566c737ecc35b17a09430eca8b6cb5fcc187245.tar.xz
abrt-3566c737ecc35b17a09430eca8b6cb5fcc187245.zip
Merge branch 'master' into report_api
-rw-r--r--abrt.spec.in1
-rw-r--r--configure.ac5
-rw-r--r--doc/abrt-retrace-server.texi648
-rw-r--r--po/POTFILES.in3
-rw-r--r--src/cli/report.c4
-rw-r--r--src/daemon/Makefile.am5
-rw-r--r--src/daemon/smart_event.conf38
-rw-r--r--src/gui-gtk/main.c2
-rw-r--r--src/gui-wizard-gtk/wizard.c41
-rw-r--r--src/gui-wizard-gtk/wizard.glade40
-rw-r--r--src/include/abrtlib.h2
-rw-r--r--src/lib/Makefile.am2
-rw-r--r--src/lib/abrt_curl.c50
-rw-r--r--src/lib/abrt_xmlrpc.c137
-rw-r--r--src/lib/abrt_xmlrpc.cpp102
-rw-r--r--src/lib/abrt_xmlrpc.h39
-rw-r--r--src/lib/hooklib.c2
-rw-r--r--src/lib/hooklib.h2
-rw-r--r--src/lib/read_write.c2
-rw-r--r--src/plugins/Makefile.am4
-rw-r--r--src/plugins/abrt-action-bugzilla.c340
-rw-r--r--src/plugins/abrt-action-bugzilla.cpp958
-rwxr-xr-xsrc/plugins/abrt-action-install-debuginfo.py4
-rw-r--r--src/plugins/abrt-action-mailx.txt2
-rw-r--r--src/plugins/abrt-retrace-client.c4
-rw-r--r--src/plugins/abrt_rh_support.c2
-rw-r--r--src/plugins/ccpp_events.conf4
-rw-r--r--src/plugins/rhbz.c482
-rw-r--r--src/plugins/rhbz.h100
-rw-r--r--src/report-python/reportmodule.c2
30 files changed, 1580 insertions, 1447 deletions
diff --git a/abrt.spec.in b/abrt.spec.in
index 99c19684..c2519021 100644
--- a/abrt.spec.in
+++ b/abrt.spec.in
@@ -458,6 +458,7 @@ gtk-update-icon-cache %{_datadir}/icons/hicolor &>/dev/null || :
%config(noreplace) %{_sysconfdir}/%{name}/abrt_event.conf
%config(noreplace) %{_sysconfdir}/%{name}/gpg_keys
%config(noreplace) %{_sysconfdir}/dbus-1/system.d/dbus-abrt.conf
+%{_sysconfdir}/%{name}/events.d/smart_event.conf
%{_initrddir}/abrtd
%dir %attr(0755, abrt, abrt) %{_localstatedir}/spool/%{name}
%dir %attr(0700, abrt, abrt) %{_localstatedir}/spool/%{name}-upload
diff --git a/configure.ac b/configure.ac
index 740ccc90..c568c43d 100644
--- a/configure.ac
+++ b/configure.ac
@@ -9,19 +9,16 @@ m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES])
AC_DISABLE_STATIC
AC_PROG_LIBTOOL
AC_PROG_CC
-AC_PROG_CXX
AC_PROG_LN_S
AC_SYS_LARGEFILE
-CXXFLAGS="$CXXFLAGS -D_LARGEFILE_SOURCE -D_LARGEFILE64_SOURCE \
- -D_FILE_OFFSET_BITS=64 -fno-strict-aliasing "
CFLAGS="$CFLAGS -D_LARGEFILE_SOURCE -D_LARGEFILE64_SOURCE \
-D_FILE_OFFSET_BITS=64 -fno-strict-aliasing -std=gnu99 -Wall"
AC_ARG_ENABLE(debug,
[AC_HELP_STRING([--enable-debug],
[Enable debug information])],
- [CXXFLAGS="$CXXFLAGS -DDEBUG -ggdb -g" CFLAGS="$CFLAGS -DDEBUG -ggdb -g"])
+ [CFLAGS="$CFLAGS -DDEBUG -ggdb -g"])
dnl ****** INTERNATIONALIZATION **********************
GETTEXT_PACKAGE=abrt
diff --git a/doc/abrt-retrace-server.texi b/doc/abrt-retrace-server.texi
index 0e2a9a2b..814e9f04 100644
--- a/doc/abrt-retrace-server.texi
+++ b/doc/abrt-retrace-server.texi
@@ -27,7 +27,7 @@
@top Retrace server
This manual is for retrace server for ABRT version @value{VERSION},
-@value{UPDATED}. The retrace server provides a coredump analysis and
+@value{UPDATED}. The retrace server provides coredump analysis and
backtrace generation service over a network using HTTP protocol.
@end ifnottex
@@ -35,6 +35,7 @@ backtrace generation service over a network using HTTP protocol.
* Overview::
* HTTP interface::
* Retrace worker::
+* Task cleanup::
* Package repository::
* Traffic and load estimation::
* Security::
@@ -44,34 +45,42 @@ backtrace generation service over a network using HTTP protocol.
@node Overview
@chapter Overview
-A client sends a coredump (created by Linux kernel) together with
-some additional information to the server, and gets a backtrace
-generation task ID in response. Then the client, after some time, asks
-the server for the task status, and when the task is done (backtrace
-has been generated from the coredump), the client downloads the
-backtrace. If the backtrace generation fails, the client gets an error
-code and downloads a log indicating what happened. Alternatively, the
-client sends a coredump, and keeps receiving the server response
-message. Server then, via the response's body, periodically sends
-status of the task, and delivers the resulting backtrace as soon as
-it's ready.
-
-The retrace server must be able to support multiple operating
-systems and their releases (Fedora N-1, N, Rawhide, Branched Rawhide,
-RHEL), and multiple architectures within a single installation.
-
-The retrace server consists of the following parts:
+Analyzing a program crash from a coredump is a difficult task. The GNU
+Debugger (GDB), that is commonly used to analyze coredumps on free
+operating systems, expects that the system analyzing the coredump is
+identical to the system where the program crashed. Software updates
+often break this assumption even on the system where the crash occured,
+making the coredump analyzable only with significant effort.
+
+Retrace server solves this problem for Fedora 14+ and RHEL 6+ operating
+systems, and allows developers to analyze coredumps without having
+access to the machine where the crash occurred.
+
+Retrace server is usually run as a service on a local network, or on
+Internet. A user sends a coredump together with some additional
+information to a retrace server. The server reads the coredump and
+depending on its contents it installs necessary software dependencies to
+create a software environment which is, from the GDB point of view,
+identical to the environment where the crash happened. Then the server
+runs GDB to generate a backtrace from the coredump and provides it back
+to the user.
+
+Core dumps generated on i386 and x86_64 architectures are supported
+within a single x86_64 retrace server instance.
+
+The retrace server consists of the following major parts:
@enumerate
@item
-abrt-retrace-server: a HTTP interface script handling the
-communication with clients, task creation and management
+a HTTP interface, consisting of a set of scripts handling communication
+with clients
+@item
+a retrace worker, doing the coredump processing, environment
+preparation, and running the debugger to generate a backtrace
@item
-abrt-retrace-worker: a program doing the environment preparation
-and coredump processing
+a cleanup script, handling stalled retracing tasks and removing old data
@item
-package repository: a repository placed on the server containing
-all the application binaries, libraries, and debuginfo necessary for
-backtrace generation
+a package repository, providing the application binaries, libraries, and
+debuginfo necessary for generating backtraces from coredumps
@end enumerate
@node HTTP interface
@@ -82,94 +91,99 @@ backtrace generation
* Task status::
* Requesting a backtrace::
* Requesting a log::
-* Task cleanup::
* Limiting traffic::
@end menu
-The HTTP interface application is a script written in Python. The
-script is named @file{abrt-retrace-server}, and it uses the
-@uref{http://www.python.org/dev/peps/pep-0333/, Python Web Server
-Gateway Interface} (WSGI) to interact with the web server.
-Administrators may use
-@uref{http://code.google.com/p/modwsgi/, mod_wsgi} to run
-@command{abrt-retrace-server} on Apache. The mod_wsgi is a part of
-both Fedora 12 and RHEL 6. The Python language is a good choice for
-this application, because it supports HTTP handling well, and it is
-already used in ABRT.
-
-Only secure (HTTPS) communication must be allowed for the communication
-with @command{abrt-retrace-server}, because coredumps and backtraces are
+The client-server communication proceeds as follows:
+@enumerate
+@item
+Client uploads a coredump to a retrace server. Retrace server creates a
+task for processing the coredump, and sends the task ID and task
+password in response to the client.
+@item
+Client asks server for the task status using the task ID and password.
+Server responds with the status information (task finished successfully,
+task failed, task is still running).
+@item
+Client asks server for the backtrace from a successfully finished task
+using the task ID and password. Server sends the backtrace in response.
+@item
+Client asks server for a log from the finished task using the task ID
+and password, and server sends the log in response.
+@end enumerate
+
+The HTTP interface application is a set of script written in Python,
+using the @uref{http://www.python.org/dev/peps/pep-0333/, Python Web
+Server Gateway Interface} (WSGI) to interact with a web server. The only
+supported and tested configuration is the Apache HTTPD Server with
+@uref{http://code.google.com/p/modwsgi/, mod_wsgi}.
+
+Only secure (HTTPS) communication is allowed for communicating with a
+public instance of retrace server, because coredumps and backtraces are
private data. Users may decide to publish their backtraces in a bug
tracker after reviewing them, but the retrace server doesn't do
-that. The HTTPS requirement must be specified in the server's man
-page. The server must support HTTP persistent connections to to avoid
-frequent SSL renegotiations. The server's manual page should include a
-recommendation for administrator to check that the persistent
-connections are enabled.
+that. The server is supposed to use HTTP persistent connections to to
+avoid frequent SSL renegotiations.
@node Creating a new task
@section Creating a new task
A client might create a new task by sending a HTTP request to the
@indicateurl{https://server/create} URL, and providing an archive as the
-request content. The archive must contain crash data files. The crash
-data files are a subset of some local
-@file{/var/spool/abrt/ccpp-time-pid} directory contents, so the client
-must only pack and upload them.
+request content. The archive contains crash data files. The crash data
+files are a subset of some local @file{/var/spool/abrt/ccpp-time-pid}
+directory contents, so the client must only pack and upload them.
-The server must support uncompressed tar archives, and tar archives
+The server supports uncompressed tar archives, and tar archives
compressed with gzip and xz. Uncompressed archives are the most
-efficient way for local network delivery, and gzip can be used there
-as well because of its good compression speed.
+efficient way for local network delivery, and gzip can be used there as
+well because of its good compression speed.
The xz compression file format is well suited for public server setup
(slow network), as it provides good compression ratio, which is
important for compressing large coredumps, and it provides reasonable
compress/decompress speed and memory consumption. See @ref{Traffic and
-load estimation} for the measurements. The @uref{http://tukaani.org/xz/, XZ Utils}
-implementation with the compression level 2 should be used to compress
-the data.
+load estimation} for the measurements. The @uref{http://tukaani.org/xz/,
+XZ Utils} implementation with the compression level 2 is used to
+compress the data.
The HTTP request for a new task must use the POST method. It must
contain a proper @var{Content-Length} and @var{Content-Type} fields. If
-the method is not POST, the server must return the @code{405 Method Not
+the method is not POST, the server returns the @code{405 Method Not
Allowed} HTTP error code. If the @var{Content-Length} field is missing,
-the server must return the @code{411 Length Required} HTTP error
-code. If an @var{Content-Type} other than @samp{application/x-tar},
+the server returns the @code{411 Length Required} HTTP error code. If an
+@var{Content-Type} other than @samp{application/x-tar},
@samp{application/x-gzip}, @samp{application/x-xz} is used, the server
-must return the @code{415 unsupported Media Type} HTTP error code. If
-the @var{Content-Length} value is greater than a limit set in the server
+returns the @code{415 unsupported Media Type} HTTP error code. If the
+@var{Content-Length} value is greater than a limit set in the server
configuration file (50 MB by default), or the real HTTP request size
-gets larger than the limit + 10 KB for headers, then the server must
-return the @code{413 Request Entity Too Large} HTTP error code, and
-provide an explanation, including the limit, in the response body. The
-limit must be changeable from the server configuration file.
+gets larger than the limit + 10 KB for headers, then the server returns
+the @code{413 Request Entity Too Large} HTTP error code, and provides an
+explanation, including the limit, in the response body. The limit is
+changeable from the server configuration file.
If there is less than 20 GB of free disk space in the
-@file{/var/spool/abrt-retrace} directory, the server must return the
-@code{507 Insufficient Storage} HTTP error code. The server must return
-the same HTTP error code if decompressing the received archive would
-cause the free disk space to become less than 20 GB. The 20 GB limit
-must be changeable from the server configuration file.
+@file{/var/spool/abrt-retrace} directory, the server returns the
+@code{507 Insufficient Storage} HTTP error code. The server returns the
+same HTTP error code if decompressing the received archive would cause
+the free disk space to become less than 20 GB. The 20 GB limit is
+changeable from the server configuration file.
If the data from the received archive would take more than 500 MB of
-disk space when uncompressed, the server must return the @code{413
-Request Entity Too Large} HTTP error code, and provide an explanation,
-including the limit, in the response body. The size limit must be
-changeable from the server configuration file. It can be set pretty high
-because coredumps, that take most disk space, are stored on the server
-only temporarily until the backtrace is generated. When the backtrace is
+disk space when uncompressed, the server returns the @code{413 Request
+Entity Too Large} HTTP error code, and provides an explanation,
+including the limit, in the response body. The size limit is changeable
+from the server configuration file. It can be set pretty high because
+coredumps, that take most disk space, are stored on the server only
+temporarily until the backtrace is generated. When the backtrace is
generated the coredump is deleted by the @command{abrt-retrace-worker},
so most disk space is released.
-The uncompressed data size for xz archives can be obtained by calling
+The uncompressed data size for xz archives is obtained by calling
@code{`xz --list file.tar.xz`}. The @option{--list} option has been
-implemented only recently, so it might be necessary to implement a
-method to get the uncompressed data size by extracting the archive to
-the stdout, and counting the extracted bytes, and call this method if
-the @option{--list} doesn't work on the server. Likewise, the
-uncompressed data size for gzip archives can be obtained by calling
-@code{`gzip --list file.tar.gz`}.
+implemented only recently, so updating @command{xz} on your server might
+be necessary. Likewise, the uncompressed data size for gzip archives is
+obtained by calling @code{`gzip --list file.tar.gz`}.
If an upload from a client succeeds, the server creates a new directory
@file{/var/spool/abrt-retrace/@var{id}} and extracts the
@@ -178,30 +192,19 @@ the required files, checks their sizes, and then sends a HTTP
response. After that it spawns a subprocess with
@command{abrt-retrace-worker} on that directory.
-To support multiple architectures, the retrace server needs a GDB
-package compiled separately for every supported target architecture
-(see the avr-gdb package in Fedora for an example). This is
-technically and economically better solution than using a standalone
-machine for every supported architecture and resending coredumps
-depending on client's architecture. However, GDB's support for using a
-target architecture different from the host architecture seems to be
-fragile. If it doesn't work, the QEMU user mode emulation should be
-tried as an alternative approach.
-
The following files from the local crash directory are required to be
present in the archive: @file{coredump}, @file{architecture},
@file{release}, @file{packages} (this one does not exist yet). If one or
more files are not present in the archive, or some other file is present
-in the archive, the server must return the @code{403 Forbidden} HTTP
-error code. If the size of any file except the coredump exceeds 100 KB,
-the server must return the @code{413 Request Entity Too Large} HTTP
-error code, and provide an explanation, including the limit, in the
-response body. The 100 KB limit must be changeable from the server
-configuration file.
-
-If the file check succeeds, the server HTTP response must have the
-@code{201 Created} HTTP code. The response must include the following
-HTTP header fields:
+in the archive, the server returns the @code{403 Forbidden} HTTP error
+code. If the size of any file except the coredump exceeds 100 KB, the
+server returns the @code{413 Request Entity Too Large} HTTP error code,
+and provides an explanation, including the limit, in the response
+body. The 100 KB limit is changeable from the server configuration file.
+
+If the file check succeeds, the server HTTP response has the @code{201
+Created} HTTP code. The response includes the following HTTP header
+fields:
@itemize
@item
@var{X-Task-Id} containing a new server-unique numerical
@@ -209,20 +212,13 @@ task id
@item
@var{X-Task-Password} containing a newly generated
password, required to access the result
-@item
-@var{X-Task-Est-Time} containing a number of seconds the
-server estimates it will take to generate the backtrace
@end itemize
The @var{X-Task-Password} is a random alphanumeric (@samp{[a-zA-Z0-9]})
-sequence 22 characters long. 22 alphanumeric characters corresponds to
-128 bit password, because @samp{[a-zA-Z0-9]} = 62 characters, and
-@math{2^128} < @math{62^22}. The source of randomness must be,
-directly or indirectly, @file{/dev/urandom}. The @code{rand()} function
-from glibc and similar functions from other libraries cannot be used
-because of their poor characteristics (in several aspects). The password
-must be stored to the @file{/var/spool/abrt-retrace/@var{id}/password} file,
-so passwords sent by a client in subsequent requests can be verified.
+sequence 22 characters long. The password is stored in the
+@file{/var/spool/abrt-retrace/@var{id}/password} file, and passwords
+sent by a client in subsequent requests are verified by comparing with
+this file.
The task id is intentionally not used as a password, because it is
desirable to keep the id readable and memorable for
@@ -230,57 +226,6 @@ humans. Password-like ids would be a loss when an user authentication
mechanism is added, and server-generated password will no longer be
necessary.
-The algorithm for the @var{X-Task-Est-Time} time estimation
-should take the previous analyses of coredumps with the same
-corresponding package name into account. The server should store
-simple history in a SQLite database to know how long it takes to
-generate a backtrace for certain package. It could be as simple as
-this:
-@itemize
-@item
- initialization step one: @code{CREATE TABLE package_time (id INTEGER
- PRIMARY KEY AUTOINCREMENT, package, release, time)}; we need the
- @var{id} for the database cleanup - to know the insertion order of
- rows, so the @code{AUTOINCREMENT} is important here; the @var{package}
- is the package name without the version and release numbers, the
- @var{release} column stores the operating system, and the @var{time}
- is the number of seconds it took to generate the backtrace
-@item
- initialization step two: @code{CREATE INDEX package_release ON
- package_time (package, release)}; we compute the time only for single
- package on single supported OS release per query, so it makes sense to
- create an index to speed it up
-@item
- when a task is finished: @code{INSERT INTO package_time (package,
- release, time) VALUES ('??', '??', '??')}
-@item
- to get the average time: @code{SELECT AVG(time) FROM package_time
- WHERE package == '??' AND release == '??'}; the arithmetic mean seems
- to be sufficient here
-@end itemize
-
-So the server knows that crashes from an OpenOffice.org package
-take 5 minutes to process in average, and it can return the value 300
-(seconds) in the field. The client does not waste time asking about
-that task every 20 seconds, but the first status request comes after
-300 seconds. And even when the package changes (rebases etc.), the
-database provides good estimations after some time anyway
-(@ref{Task cleanup} chapter describes how the
-data are pruned).
-
-The server response HTTP body is generated and sent
-gradually as the task is performed. Client chooses either to receive
-the body, or terminate after getting all headers and ask the server
-for status and backtrace asynchronously.
-
-The server re-sends the output of abrt-retrace-worker (its stdout and
-stderr) to the response the body. In addition, a line with the task
-status is added in the form @code{X-Task-Status: PENDING} to the body
-every 5 seconds. When the worker process ends, either
-@samp{FINISHED_SUCCESS} or @samp{FINISHED_FAILURE} status line is
-sent. If it's @samp{FINISHED_SUCCESS}, the backtrace is attached after
-this line. Then the response body is closed.
-
@node Task status
@section Task status
@@ -324,19 +269,19 @@ A client might request a backtrace by sending a HTTP GET request to the
@indicateurl{https://someserver/@var{id}/backtrace} URL, where @var{id}
is the numerical task id returned in the @var{X-Task-Id} field by
@indicateurl{https://someserver/create}. If the @var{id} is not in the
-valid format, or the task @var{id} does not exist, the server must
-return the @code{404 Not Found} HTTP error code.
+valid format, or the task @var{id} does not exist, the server returns
+the @code{404 Not Found} HTTP error code.
The client request must contain the @var{X-Task-Password} field, and its
content must match the password stored in the
@file{/var/spool/abrt-retrace/@var{id}/password} file. If the password
-is not valid, the server must return the @code{403 Forbidden} HTTP error
+is not valid, the server returns the @code{403 Forbidden} HTTP error
code.
If the file @file{/var/spool/abrt-retrace/@var{id}/backtrace} does not
-exist, the server must return the @code{404 Not Found} HTTP error code.
+exist, the server returns the @code{404 Not Found} HTTP error code.
Otherwise it returns the file contents, and the @var{Content-Type} field
-must contain @samp{text/plain}.
+contains @samp{text/plain}.
@node Requesting a log
@section Requesting a log
@@ -345,27 +290,115 @@ A client might request a task log by sending a HTTP GET request to the
@indicateurl{https://someserver/@var{id}/log} URL, where @var{id} is the
numerical task id returned in the @var{X-Task-Id} field by
@indicateurl{https://someserver/create}. If the @var{id} is not in the
-valid format, or the task @var{id} does not exist, the server must
-return the @code{404 Not Found} HTTP error code.
+valid format, or the task @var{id} does not exist, the server returns
+the @code{404 Not Found} HTTP error code.
The client request must contain the @var{X-Task-Password} field, and its
content must match the password stored in the
-@file{/var/spool/abrt-retrace/@var{id}/password} file. If the password is
-not valid, the server must return the @code{403 Forbidden} HTTP error code.
+@file{/var/spool/abrt-retrace/@var{id}/password} file. If the password
+is not valid, the server returns the @code{403 Forbidden} HTTP error
+code.
If the file @file{/var/spool/abrt-retrace/@var{id}/retrace-log} does not
-exist, the server must return the @code{404 Not Found} HTTP error code.
-Otherwise it returns the file contents, and the "Content-Type" must
-contain "text/plain".
+exist, the server returns the @code{404 Not Found} HTTP error code.
+Otherwise it returns the file contents, and the @var{Content-Type}
+contains @samp{text/plain}.
+
+@node Limiting traffic
+@section Limiting traffic
+
+The maximum number of simultaneously running tasks is limited to 20 by
+the server. The limit is changeable from the server configuration
+file. If a new request comes when the server is fully occupied, the
+server returns the @code{503 Service Unavailable} HTTP error code.
+
+The archive extraction, chroot preparation, and gdb analysis is
+mostly limited by the hard drive size and speed.
+
+@node Retrace worker
+@chapter Retrace worker
+
+Retrace worker is a program (usually residing in
+@command{/usr/bin/abrt-retrace-worker}), which:
+@enumerate
+@item
+takes a task id as a parameter, and turns it into a directory containing
+a coredump
+@item
+determines which packages need to be installed from the coredump
+@item
+installs the packages in a newly created chroot environment together
+with @command{gdb}
+@item
+copies the coredump to the chroot environment
+@item
+runs @command{gdb} from inside the environment to generate a backtrace
+from the coredump
+@item
+copies the resulting backtrace from the environment to the directory
+@end enumerate
+
+The tasks reside in @file{/var/spool/abrt-retrace/@var{taskid}}
+directories.
+
+To determine which packages need to be installed,
+@command{abrt-retrace-worker} runs the @command{coredump2packages} tool.
+The tool reads build-ids from the coredump, and tries to find the best
+set of packages (epoch, name, version, release) matching the
+build-ids. Local yum repositories are used as the source of
+packages. GDB requirements are strict, and this is the reason why proper
+backtraces cannot be directly and reliably generated on systems whose
+software is updated:
+@itemize
+@item
+The exact binary which crashed needs to be available to GDB.
+@item
+All libraries which are linked to the binary need to be available in the
+same exact versions from the time of the crash.
+@item
+The binary plugins loaded by the binary or libraries via @code{dlopen}
+need to be present in proper versions.
+@item
+The files containing the debugging symbols for the binary and libraries
+(build-ids are used to find the pairs) need to be available to GDB.
+@end itemize
+
+The chroot environments are created and managed by @command{mock}, and
+they reside in @file{/var/lib/mock/@var{taskid}}. The retrace worker
+generates a mock configuration file and then invokes @command{mock} to
+create the chroot, and to run programs from inside the chroot.
+
+The chroot environment is populated by installing packages using
+@command{yum}. Package installation cannot be avoided, as GDB expects to
+operate on an installed system, and on crashes from that system. GDB
+uses plugins written in Python, that are shipped with packages (for
+example see @command{rpm -ql libstdc++}).
+
+Coredumps might be affected by @command{prelink}, which is used on
+Fedora to speed up dynamic linking by caching its results directly in
+binaries. The system installed by @command{mock} for the purpose of
+retracing doesn't use @command{prelink}, so the binaries differ between
+the system of origin and the mock environment. It has been tested that
+this is not an issue, but in the case some issue
+@uref{http://sourceware.org/ml/gdb/2009-05/msg00175.html, occurs}
+(GDB fails to work with a binary even if it's the right one), a bug
+should be filed on @code{prelink}, as its operation should not affect
+the area GDB operates on.
+
+No special care is taken to avoid the possibility that GDB will not run
+with the set of packages (fixed versions) as provided by coredump. It is
+expected that any combination of packages user might use in a released
+system should satisfy the needs of some version of GDB. Yum selects the
+newest possible version which has its requirements satisfied.
@node Task cleanup
-@section Task cleanup
+@chapter Task cleanup
-Tasks that were created more than 5 days ago must be deleted, because
-tasks occupy disk space (not so much space, as the coredumps are deleted
-after the retrace, and only backtraces and configuration remain). A
-shell script @command{abrt-retrace-clean} must check the creation time
-and delete the directories in @file{/var/spool/abrt-retrace/}. It is
+Tasks that were created more than 5 days ago are deleted, because tasks
+occupy disk space (not so much space, as the coredumps are deleted after
+the retrace, and only backtraces and configuration remain). A shell
+script @command{abrt-retrace-clean} must check the creation time and
+delete the directories in @file{/var/spool/abrt-retrace/}. It is
supposed that the server administrator sets @command{cron} to call the
script once a day. This assumption must be mentioned in the
@command{abrt-retrace-clean} manual page.
@@ -399,125 +432,6 @@ database:
@end itemize
@end enumerate
-@node Limiting traffic
-@section Limiting traffic
-
-The maximum number of simultaneously running tasks must be limited to 20
-by the server. The limit must be changeable from the server
-configuration file. If a new request comes when the server is fully
-occupied, the server must return the @code{503 Service Unavailable} HTTP
-error code.
-
-The archive extraction, chroot preparation, and gdb analysis is
-mostly limited by the hard drive size and speed.
-
-@node Retrace worker
-@chapter Retrace worker
-
-The worker (@command{abrt-retrace-worker} binary) gets a
-@file{/var/spool/abrt-retrace/@var{id}} directory as an input. The worker
-reads the operating system name and version, the coredump, and the list
-of packages needed for retracing (a package containing the binary which
-crashed, and packages with the libraries that are used by the binary).
-
-The worker prepares a new @file{chroot} subdirectory with the packages,
-their debuginfo, and gdb installed. In other words, a new directory
-@file{/var/spool/abrt-retrace/@var{id}/chroot} is created and
-the packages are unpacked or installed into this directory, so for
-example the gdb ends up as
-@file{/var/.../@var{id}/chroot/usr/bin/gdb}.
-
-After the @file{chroot} subdirectory is prepared, the worker moves the
-coredump there and changes root (using the chroot system function) of a
-child script there. The child script runs the gdb on the coredump, and
-the gdb sees the corresponding crashy binary, all the debuginfo and all
-the proper versions of libraries on right places.
-
-When the gdb run is finished, the worker copies the resulting backtrace
-to the @file{/var/spool/abrt-retrace/@var{id}/backtrace} file and stores a
-log from the whole chroot process to the @file{retrace-log} file in the
-same directory. Then it removes the @file{chroot} directory.
-
-The GDB installed into the chroot must:
-@itemize
-@item
-run on the server (same architecture, or we can use
-@uref{http://wiki.qemu.org/download/qemu-doc.html#QEMU-User-space-emulator, QEMU
-user space emulation})
-@item
-process the coredump (possibly from another architecture): that
-means we need a special GDB for every supported architecture
-@item
-be able to handle coredumps created in an environment with prelink
-enabled
-(@uref{http://sourceware.org/ml/gdb/2009-05/msg00175.html, should
-not} be a problem)
-@item
-use libc, zlib, readline, ncurses, expat and Python packages,
-while the version numbers required by the coredump might be different
-from what is required by the GDB
-@end itemize
-
-The gdb might fail to run with certain combinations of package
-dependencies. Nevertheless, we need to provide the libc/Python/*
-package versions which are required by the coredump. If we would not
-do that, the backtraces generated from such an environment would be of
-lower quality. Consider a coredump which was caused by a crash of
-Python application on a client, and which we analyze on the retrace
-server with completely different version of Python because the
-client's Python version is not compatible with our GDB.
-
-We can solve the issue by installing the GDB package dependencies first,
-move their binaries to some safe place (@file{/lib/gdb} in the chroot),
-and create the @file{/etc/ld.so.preload} file pointing to that place, or
-set @env{LD_LIBRARY_PATH}. Then we can unpack libc binaries and
-other packages and their versions as required by the coredump to the
-common paths, and the GDB would run happily, using the libraries from
-@file{/lib/gdb} and not those from @file{/lib} and @file{/usr/lib}. This
-approach can use standard GDB builds with various target architectures:
-gdb, gdb-i386, gdb-ppc64, gdb-s390 (nonexistent in Fedora/EPEL at the
-time of writing this).
-
-The GDB and its dependencies are stored separately from the packages
-used as data for coredump processing. A single combination of GDB and
-its dependencies can be used across all supported OS to generate
-backtraces.
-
-The retrace worker must be able to prepare a chroot-ready environment
-for certain supported operating system, which is different from the
-retrace server's operating system. It needs to fake the @file{/dev}
-directory and create some basic files in @file{/etc} like @file{passwd}
-and @file{hosts}. We can use the @uref{https://fedorahosted.org/mock/,
-mock} library to do that, as it does almost what we need (but not
-exactly as it has a strong focus on preparing the environment for
-rpmbuild and running it), or we can come up with our own solution, while
-stealing some code from the mock library. The @file{/usr/bin/mock}
-executable is entirely unuseful for the retrace server, but the
-underlying Python library can be used. So if would like to use mock, an
-ABRT-specific interface to the mock library must be written or the
-retrace worker must be written in Python and use the mock Python library
-directly.
-
-We should save some time and disk space by extracting only binaries
-and dynamic libraries from the packages for the coredump analysis, and
-omit all other files. We can save even more time and disk space by
-extracting only the libraries and binaries really referenced by the
-coredump (eu-unstrip tells us). Packages should not be
-@emph{installed} to the chroot, they should be @emph{extracted}
-only, because we use them as a data source, and we never run them.
-
-Another idea to be considered is that we can avoid the package
-extraction if we can teach GDB to read the dynamic libraries, the
-binary, and the debuginfo directly from the RPM packages. We would
-provide a backend to GDB which can do that, and provide tiny front-end
-program which tells the backend which RPMs it should use and then run
-the GDB command loop. The result would be a GDB wrapper/extension we
-need to maintain, but it should end up pretty small. We would use
-Python to write our extension, as we do not want to (inelegantly)
-maintain a patch against GDB core. We need to ask GDB people if the
-Python interface is capable of handling this idea, and how much work
-it would be to implement it.
-
@node Package repository
@chapter Package repository
@@ -739,11 +653,10 @@ provider in various important matters. So when the retrace server is
operated by the operating system provider, that might be acceptable by
users.
-We cannot avoid sending clients' coredumps to the retrace server, if
-we want to generate quality backtraces containing the values of
-variables. Minidumps are not acceptable solution, as they lower the
-quality of the resulting backtraces, while not improving user
-security.
+We cannot avoid sending clients' coredumps to the retrace server, if we
+want to generate quality backtraces containing the values of
+variables. Minidumps lower the quality of the resulting backtraces,
+while not improving user security.
Can the retrace server trust clients? We must know what can a
malicious client achieve by crafting a nonstandard coredump, which
@@ -760,6 +673,14 @@ generate the backtrace. Is it safe? We must know what can a malicious
client achieve by crafting a special binary and debuginfo, which will
be processed by server's GDB.
+As for attacker trying to steal users' backtraces from the retrace
+server, the passwords protecting the backtraces in the
+@var{X-Task-Password} header are random alphanumeric
+(@samp{[a-zA-Z0-9]}) sequences 22 characters long. 22 alphanumeric
+characters corresponds to 128 bit password, because @samp{[a-zA-Z0-9]}
+is 62 characters, and @math{2^{128}} < @math{62^{22}}. The source of
+randomness is @file{/dev/urandom}.
+
@node Packages and debuginfo
@section Packages and debuginfo
@@ -773,34 +694,113 @@ it, as the data will also be signed.
@node Future work
@chapter Future work
-1. Coredump stripping. Jan Kratochvil: With my test of OpenOffice.org
-presentation kernel core file has 181MB, xz -2 of it has 65MB.
-According to `set target debug 1' GDB reads only 131406 bytes of it
-(incl. the NOTE segment).
+@section Coredump stripping
+Jan Kratochvil: With my test of OpenOffice.org presentation kernel core
+file has 181MB, xz -2 of it has 65MB. According to `set target debug 1'
+GDB reads only 131406 bytes of it (incl. the NOTE segment).
-2. Use gdbserver instead of uploading whole coredump. GDB's
-gdbserver cannot process coredumps, but Jan Kratochvil's can:
-<pre> git://git.fedorahosted.org/git/elfutils.git
- branch: jankratochvil/gdbserver
+@section Supporting other architectures
+Three approaches:
+@itemize
+@item
+Use GDB builds with various target architectures: gdb-i386, gdb-ppc64,
+gdb-s390.
+@item
+Run
+@uref{http://wiki.qemu.org/download/qemu-doc.html#QEMU-User-space-emulator,
+QEMU user space emulation} on the server
+@item
+Run @code{abrt-retrace-worker} on a machine with right
+architecture. Introduce worker machines and tasks, similarly to Koji.
+@end itemize
+
+@section Use gdbserver instead of uploading whole coredump
+GDB's gdbserver cannot process coredumps, but Jan Kratochvil's can:
+@verbatim
+git://git.fedorahosted.org/git/elfutils.git
+branch: jankratochvil/gdbserver
src/gdbserver.c
* Currently threading is not supported.
* Currently only x86_64 is supported (the NOTE registers layout).
-</pre>
+@end verbatim
-3. User management for the HTTP interface. We need multiple
-authentication sources (x509 for RHEL).
+@section User management for the HTTP interface
+Multiple authentication sources (x509 for RHEL).
-4. Make @file{architecture}, @file{release},
-@file{packages} files, which must be included in the package
-when creating a task, optional. Allow uploading a coredump without
-involving tar: just coredump, coredump.gz, or coredump.xz.
+@section Make all files except coredump optional on the input
+Make @file{architecture}, @file{release}, @file{packages} files, which
+must be included in the package when creating a task, optional. Allow
+uploading a coredump without involving tar: just coredump, coredump.gz,
+or coredump.xz.
-5. Handle non-standard packages (provided by user)
+@section Handle non-standard packages (provided by user)
+This would make retrace server very vulnerable to attacks, it never can
+be enabled in a public instance.
-6. See @uref{https://fedorahosted.org/cas/, Core analysis system}, its
+@section Support vmcores
+See @uref{https://fedorahosted.org/cas/, Core analysis system}, its
features etc.
-7. Consider using @uref{http://git.fedorahosted.org/git/?p=kobo.git,
-kobo} for task management and worker handling (master/slaves arch).
+@section Do not refuse new tasks on a fully loaded server
+Consider using @uref{http://git.fedorahosted.org/git/?p=kobo.git, kobo}
+for task management and worker handling (master/slaves arch).
+
+@section Support synchronous operation
+Client sends a coredump, and keeps receiving the server response
+message. The server response HTTP body is generated and sent gradually
+as the task is performed. Client can choose to stop receiving the
+response body after getting all headers and ask the server for status
+and backtrace asynchronously.
+
+The server re-sends the output of abrt-retrace-worker (its stdout and
+stderr) to the response the body. In addition, a line with the task
+status is added in the form @code{X-Task-Status: PENDING} to the body
+every 5 seconds. When the worker process ends, either
+@samp{FINISHED_SUCCESS} or @samp{FINISHED_FAILURE} status line is
+sent. If it's @samp{FINISHED_SUCCESS}, the backtrace is attached after
+this line. Then the response body is closed.
+
+@section Provide task estimation time
+The response to the @code{/create} action should contain a header
+@var{X-Task-Est-Time}, that contains a number of seconds the server
+estimates it will take to generate the backtrace
+
+The algorithm for the @var{X-Task-Est-Time} time estimation
+should take the previous analyses of coredumps with the same
+corresponding package name into account. The server should store
+simple history in a SQLite database to know how long it takes to
+generate a backtrace for certain package. It could be as simple as
+this:
+@itemize
+@item
+ initialization step one: @code{CREATE TABLE package_time (id INTEGER
+ PRIMARY KEY AUTOINCREMENT, package, release, time)}; we need the
+ @var{id} for the database cleanup - to know the insertion order of
+ rows, so the @code{AUTOINCREMENT} is important here; the @var{package}
+ is the package name without the version and release numbers, the
+ @var{release} column stores the operating system, and the @var{time}
+ is the number of seconds it took to generate the backtrace
+@item
+ initialization step two: @code{CREATE INDEX package_release ON
+ package_time (package, release)}; we compute the time only for single
+ package on single supported OS release per query, so it makes sense to
+ create an index to speed it up
+@item
+ when a task is finished: @code{INSERT INTO package_time (package,
+ release, time) VALUES ('??', '??', '??')}
+@item
+ to get the average time: @code{SELECT AVG(time) FROM package_time
+ WHERE package == '??' AND release == '??'}; the arithmetic mean seems
+ to be sufficient here
+@end itemize
+
+So the server knows that crashes from an OpenOffice.org package
+take 5 minutes to process in average, and it can return the value 300
+(seconds) in the field. The client does not waste time asking about
+that task every 20 seconds, but the first status request comes after
+300 seconds. And even when the package changes (rebases etc.), the
+database provides good estimations after some time anyway
+(@ref{Task cleanup} chapter describes how the
+data are pruned).
@bye
diff --git a/po/POTFILES.in b/po/POTFILES.in
index 7a2ecacb..43961ada 100644
--- a/po/POTFILES.in
+++ b/po/POTFILES.in
@@ -25,7 +25,7 @@ src/plugins/abrt-action-analyze-backtrace.c
src/plugins/abrt-action-analyze-c.c
src/plugins/abrt-action-analyze-oops.c
src/plugins/abrt-action-analyze-python.c
-src/plugins/abrt-action-bugzilla.cpp
+src/plugins/abrt-action-bugzilla.c
src/plugins/abrt-action-generate-backtrace.c
src/plugins/abrt-action-install-debuginfo.py
src/plugins/abrt-action-kerneloops.c
@@ -42,3 +42,4 @@ src/plugins/report_Bugzilla.xml.in
src/plugins/report_Kerneloops.xml.in
src/plugins/report_Mailx.xml.in
src/plugins/report_RHTSupport.xml.in
+src/plugins/rhbz.c
diff --git a/src/cli/report.c b/src/cli/report.c
index 93d1abd9..7f722480 100644
--- a/src/cli/report.c
+++ b/src/cli/report.c
@@ -682,7 +682,7 @@ int report(const char *dump_dir_name, int flags)
/* Load problem_data from (possibly updated by analyze) dump dir */
struct dump_dir *dd = dd_opendir(dump_dir_name, /*flags:*/ 0);
if (!dd)
- return -1;
+ return -1;
char *analyze_events_as_lines = list_possible_events(dd, NULL, "analyze");
dd_close(dd);
@@ -705,7 +705,7 @@ int report(const char *dump_dir_name, int flags)
/* Load problem_data from (possibly updated by analyze) dump dir */
dd = dd_opendir(dump_dir_name, /*flags:*/ 0);
if (!dd)
- return -1;
+ return -1;
char *report_events_as_lines = list_possible_events(dd, NULL, "report");
problem_data_t *problem_data = create_problem_data_from_dump_dir(dd);
diff --git a/src/daemon/Makefile.am b/src/daemon/Makefile.am
index 6fb107cc..7d586796 100644
--- a/src/daemon/Makefile.am
+++ b/src/daemon/Makefile.am
@@ -1,3 +1,8 @@
+eventsconfdir = $(EVENTS_CONF_DIR)
+
+dist_eventsconf_DATA = \
+ smart_event.conf
+
bin_SCRIPTS = \
abrt-handle-upload
diff --git a/src/daemon/smart_event.conf b/src/daemon/smart_event.conf
new file mode 100644
index 00000000..e4b96b91
--- /dev/null
+++ b/src/daemon/smart_event.conf
@@ -0,0 +1,38 @@
+# Access to /dev/sda usually requires root.
+# skdump is usually in /usr/sbin.
+# Therefore we run it as post-create event, thus: under root.
+#
+# The code is identical for all three packages.
+#
+# To be moved to the packages, so that it is (de)installed
+# together with these packages.
+
+# FIXME: not working yet, because program selection code
+# looks at dump dir BEFORE running programs,
+# thus, component is not created yet
+# (it is created by 1st program in main .conf file)
+# when program selection code looks at this file.
+
+EVENT=post-create component=gnome-disk-utility
+ which skdump 2>/dev/null || exit 0
+ for f in /dev/[sh]d[a-z]; do
+ test -e "$f" || continue
+ skdump "$f"
+ echo
+ done >smart_data
+
+EVENT=post-create component=libatasmart
+ which skdump 2>/dev/null || exit 0
+ for f in /dev/[sh]d[a-z]; do
+ test -e "$f" || continue
+ skdump "$f"
+ echo
+ done >smart_data
+
+EVENT=post-create component=udisks
+ which skdump 2>/dev/null || exit 0
+ for f in /dev/[sh]d[a-z]; do
+ test -e "$f" || continue
+ skdump "$f"
+ echo
+ done >smart_data
diff --git a/src/gui-gtk/main.c b/src/gui-gtk/main.c
index 39d9f7ec..bb57ce7d 100644
--- a/src/gui-gtk/main.c
+++ b/src/gui-gtk/main.c
@@ -186,7 +186,7 @@ void scan_dirs_and_add_to_dirlist(void)
{
char **argv = s_dirs;
while (*argv)
- scan_directory_and_add_to_dirlist(*argv++);
+ scan_directory_and_add_to_dirlist(*argv++);
}
int main(int argc, char **argv)
diff --git a/src/gui-wizard-gtk/wizard.c b/src/gui-wizard-gtk/wizard.c
index 90a9c625..23d9141d 100644
--- a/src/gui-wizard-gtk/wizard.c
+++ b/src/gui-wizard-gtk/wizard.c
@@ -56,6 +56,7 @@ static GtkContainer *g_container_details2;
static GtkLabel *g_lbl_cd_reason;
static GtkTextView *g_tv_backtrace;
static GtkTextView *g_tv_comment;
+static GtkEventBox *g_eb_comment;
static GtkTreeView *g_tv_details;
static GtkWidget *g_widget_warnings_area;
static GtkBox *g_box_warning_labels;
@@ -540,7 +541,7 @@ static event_gui_data_t *add_event_buttons(GtkBox *box,
event_gui_data_t *event_gui_data = new_event_gui_data_t();
event_gui_data->event_name = xstrdup(event_name);
event_gui_data->toggle_button = GTK_TOGGLE_BUTTON(button);
- *p_event_list = g_list_append(*p_event_list, event_gui_data);
+ *p_event_list = g_list_append(*p_event_list, event_gui_data);
if (!first_button)
first_button = event_gui_data;
@@ -1105,6 +1106,20 @@ static void on_bt_approve_toggle(GtkToggleButton *togglebutton, gpointer user_da
check_backtrace_and_allow_send();
}
+static void on_comment_changed(GtkTextBuffer *buffer, gpointer user_data)
+{
+ bool good = gtk_text_buffer_get_char_count(buffer) >= 10;
+
+ /* Allow next page only when the comment has at least 10 chars */
+ gtk_assistant_set_page_complete(g_assistant, pages[PAGENO_COMMENT].page_widget, good);
+
+ /* And show the eventbox with label */
+ if (good)
+ gtk_widget_hide(GTK_WIDGET(g_eb_comment));
+ else
+ gtk_widget_show(GTK_WIDGET(g_eb_comment));
+}
+
/* Refresh button handling */
@@ -1204,6 +1219,9 @@ static void on_page_prepare(GtkAssistant *assistant, GtkWidget *page, gpointer u
w
);
}
+
+ if (pages[PAGENO_COMMENT].page_widget == page)
+ on_comment_changed(gtk_text_view_get_buffer(g_tv_comment), NULL);
}
static gint select_next_page_no(gint current_page_no, gpointer data)
@@ -1212,6 +1230,8 @@ static gint select_next_page_no(gint current_page_no, gpointer data)
if (g_report_only)
return current_page_no + 1;
+ gint prev_page_no = current_page_no;
+
again:
current_page_no++;
@@ -1241,16 +1261,20 @@ static gint select_next_page_no(gint current_page_no, gpointer data)
case PAGENO_ANALYZE_PROGRESS:
VERB2 log("%s: ANALYZE_PROGRESS: g_analyze_event_selected:'%s'",
__func__, g_analyze_event_selected);
- if (!g_analyze_event_selected || !g_analyze_event_selected[0])
+ if (!g_analyze_event_selected || !g_analyze_event_selected[0])
goto again; /* skip this page */
break;
case PAGENO_REPORTER_SELECTOR:
VERB2 log("%s: REPORTER_SELECTOR: g_black_event_count:%d",
__func__, g_black_event_count);
- if (g_black_event_count != 0)
- {
- /* Still have analyzers which didn't run? Go back */
+ /* if we _did_ run an event (didn't skip it)
+ * and still have analyzers which didn't run
+ */
+ if (prev_page_no == PAGENO_ANALYZE_PROGRESS
+ && g_black_event_count != 0
+ ) {
+ /* Go back to analyzer selectors */
current_page_no = PAGENO_ANALYZE_SELECTOR-1;
goto again;
}
@@ -1405,6 +1429,7 @@ static void add_pages()
g_tv_report_log = GTK_TEXT_VIEW( gtk_builder_get_object(builder, "tv_report_log"));
g_tv_backtrace = GTK_TEXT_VIEW( gtk_builder_get_object(builder, "tv_backtrace"));
g_tv_comment = GTK_TEXT_VIEW( gtk_builder_get_object(builder, "tv_comment"));
+ g_eb_comment = GTK_EVENT_BOX( gtk_builder_get_object(builder, "eb_comment"));
g_tv_details = GTK_TREE_VIEW( gtk_builder_get_object(builder, "tv_details"));
g_box_warning_labels = GTK_BOX( gtk_builder_get_object(builder, "box_warning_labels"));
g_tb_approve_bt = GTK_TOGGLE_BUTTON(gtk_builder_get_object(builder, "cb_approve_bt"));
@@ -1439,6 +1464,11 @@ static void add_pages()
config_btn = GTK_WIDGET(gtk_builder_get_object(builder, "button_cfg2"));
if (config_btn)
g_signal_connect(G_OBJECT(config_btn), "clicked", G_CALLBACK(on_show_event_list_cb), NULL);
+
+ /* Set color of the comment evenbox */
+ GdkColor color;
+ gdk_color_parse("#CC3333", &color);
+ gtk_widget_modify_bg(GTK_WIDGET(g_eb_comment), GTK_STATE_NORMAL, &color);
}
void create_assistant(void)
@@ -1475,6 +1505,7 @@ void create_assistant(void)
g_signal_connect(g_tb_approve_bt, "toggled", G_CALLBACK(on_bt_approve_toggle), NULL);
g_signal_connect(g_btn_refresh, "clicked", G_CALLBACK(on_btn_refresh_clicked), NULL);
+ g_signal_connect(gtk_text_view_get_buffer(g_tv_comment), "changed", G_CALLBACK(on_comment_changed), NULL);
g_signal_connect(g_tv_details, "row-activated", G_CALLBACK(tv_details_row_activated), NULL);
/* [Enter] on a row: g_signal_connect(g_tv_details, "select-cursor-row", G_CALLBACK(tv_details_select_cursor_row), NULL); */
g_signal_connect(g_tv_details, "cursor-changed", G_CALLBACK(tv_details_cursor_changed), NULL);
diff --git a/src/gui-wizard-gtk/wizard.glade b/src/gui-wizard-gtk/wizard.glade
index 14aabdaf..f79950ee 100644
--- a/src/gui-wizard-gtk/wizard.glade
+++ b/src/gui-wizard-gtk/wizard.glade
@@ -101,16 +101,46 @@
</packing>
</child>
<child>
- <object class="GtkScrolledWindow" id="scrolledwindow4">
+ <object class="GtkVBox" id="vbox1">
<property name="visible">True</property>
- <property name="can_focus">True</property>
- <property name="shadow_type">out</property>
+ <property name="can_focus">False</property>
<child>
- <object class="GtkTextView" id="tv_comment">
+ <object class="GtkScrolledWindow" id="scrolledwindow4">
<property name="visible">True</property>
<property name="can_focus">True</property>
- <property name="wrap_mode">word</property>
+ <property name="hscrollbar_policy">never</property>
+ <property name="shadow_type">GTK_SHADOW_OUT</property>
+ <child>
+ <object class="GtkTextView" id="tv_comment">
+ <property name="visible">True</property>
+ <property name="can_focus">True</property>
+ <property name="wrap_mode">word</property>
+ </object>
+ </child>
+ </object>
+ <packing>
+ <property name="expand">True</property>
+ <property name="fill">True</property>
+ <property name="position">0</property>
+ </packing>
+ </child>
+ <child>
+ <object class="GtkEventBox" id="eb_comment">
+ <property name="can_focus">False</property>
+ <child>
+ <object class="GtkLabel" id="label5">
+ <property name="visible">True</property>
+ <property name="can_focus">False</property>
+ <property name="label" translatable="yes">You need to fill the how to before you can proceed...</property>
+ <property name="single_line_mode">True</property>
+ </object>
+ </child>
</object>
+ <packing>
+ <property name="expand">False</property>
+ <property name="fill">True</property>
+ <property name="position">1</property>
+ </packing>
</child>
</object>
<packing>
diff --git a/src/include/abrtlib.h b/src/include/abrtlib.h
index ab12ea03..294afdda 100644
--- a/src/include/abrtlib.h
+++ b/src/include/abrtlib.h
@@ -118,7 +118,7 @@ char *xmalloc_fgetline(FILE *file);
/* On error, copyfd_XX prints error messages and returns -1 */
enum {
- COPYFD_SPARSE = 1 << 0,
+ COPYFD_SPARSE = 1 << 0,
};
#define copyfd_eof abrt_copyfd_eof
off_t copyfd_eof(int src_fd, int dst_fd, int flags);
diff --git a/src/lib/Makefile.am b/src/lib/Makefile.am
index 6493db99..14220c99 100644
--- a/src/lib/Makefile.am
+++ b/src/lib/Makefile.am
@@ -89,7 +89,7 @@ libabrt_dbus_la_LIBADD = \
libabrt_web_la_SOURCES = \
abrt_curl.h abrt_curl.c \
- abrt_xmlrpc.h abrt_xmlrpc.cpp
+ abrt_xmlrpc.h abrt_xmlrpc.c
libabrt_web_la_CPPFLAGS = \
-Wall -Wwrite-strings -Werror \
-I$(srcdir)/../include/report -I$(srcdir)/../include \
diff --git a/src/lib/abrt_curl.c b/src/lib/abrt_curl.c
index 1cb9391d..0802a49b 100644
--- a/src/lib/abrt_curl.c
+++ b/src/lib/abrt_curl.c
@@ -175,6 +175,40 @@ static size_t fread_with_reporting(void *ptr, size_t size, size_t nmemb, void *u
return fread(ptr, size, nmemb, fp);
}
+static int curl_debug(CURL *handle, curl_infotype it, char *buf, size_t bufsize, void *unused)
+{
+ if (logmode == 0)
+ return 0;
+
+ switch (it) {
+ case CURLINFO_TEXT: /* The data is informational text. */
+ log("curl: %.*s", (int) bufsize, buf);
+ break;
+ case CURLINFO_HEADER_IN: /* The data is header (or header-like) data received from the peer. */
+ log("curl rcvd header: '%.*s'", (int) bufsize, buf);
+ break;
+ case CURLINFO_HEADER_OUT: /* The data is header (or header-like) data sent to the peer. */
+ log("curl sent header: '%.*s'", (int) bufsize, buf);
+ break;
+ case CURLINFO_DATA_IN: /* The data is protocol data received from the peer. */
+ if (g_verbose >= 3)
+ log("curl rcvd data: '%.*s'", (int) bufsize, buf);
+ else
+ log("curl rcvd data %u bytes", (int) bufsize);
+ break;
+ case CURLINFO_DATA_OUT: /* The data is protocol data sent to the peer. */
+ if (g_verbose >= 3)
+ log("curl sent data: '%.*s'", (int) bufsize, buf);
+ else
+ log("curl sent data %u bytes", (int) bufsize);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
int
abrt_post(abrt_post_state_t *state,
const char *url,
@@ -203,14 +237,18 @@ abrt_post(abrt_post_state_t *state,
// curl will need it until curl_easy_cleanup.
state->errmsg[0] = '\0';
xcurl_easy_setopt_ptr(handle, CURLOPT_ERRORBUFFER, state->errmsg);
- // "Display a lot of verbose information about its operations.
- // Very useful for libcurl and/or protocol debugging and understanding.
- // The verbose information will be sent to stderr, or the stream set
- // with CURLOPT_STDERR"
- //xcurl_easy_setopt_long(handle, CURLOPT_VERBOSE, 1);
// Shut off the built-in progress meter completely
xcurl_easy_setopt_long(handle, CURLOPT_NOPROGRESS, 1);
+ if (g_verbose >= 2) {
+ // "Display a lot of verbose information about its operations.
+ // Very useful for libcurl and/or protocol debugging and understanding.
+ // The verbose information will be sent to stderr, or the stream set
+ // with CURLOPT_STDERR"
+ xcurl_easy_setopt_long(handle, CURLOPT_VERBOSE, 1);
+ xcurl_easy_setopt_ptr(handle, CURLOPT_DEBUGFUNCTION, curl_debug);
+ }
+
// TODO: do we need to check for CURLE_URL_MALFORMAT error *here*,
// not in curl_easy_perform?
xcurl_easy_setopt_ptr(handle, CURLOPT_URL, url);
@@ -246,7 +284,7 @@ abrt_post(abrt_post_state_t *state,
if (basename) basename++;
else basename = data;
#if 0
- // Simple way, without custom reader function
+ // Simple way, without custom reader function
CURLFORMcode curlform_err = curl_formadd(&post, &last,
CURLFORM_PTRNAME, "file", // element name
CURLFORM_FILE, data, // filename to read from
diff --git a/src/lib/abrt_xmlrpc.c b/src/lib/abrt_xmlrpc.c
new file mode 100644
index 00000000..28d42325
--- /dev/null
+++ b/src/lib/abrt_xmlrpc.c
@@ -0,0 +1,137 @@
+/*
+ Copyright (C) 2010 ABRT team
+ Copyright (C) 2010 RedHat Inc
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License along
+ with this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+*/
+#include "abrtlib.h"
+#include "abrt_xmlrpc.h"
+
+void abrt_xmlrpc_die(xmlrpc_env *env)
+{
+ error_msg_and_die("fatal: XML-RPC(%d): %s", env->fault_code, env->fault_string);
+}
+
+void abrt_xmlrpc_error(xmlrpc_env *env)
+{
+ error_msg("error: XML-RPC (%d): %s", env->fault_code, env->fault_string);
+}
+
+struct abrt_xmlrpc *abrt_xmlrpc_new_client(const char *url, int ssl_verify)
+{
+ xmlrpc_env env;
+ xmlrpc_env_init(&env);
+
+ struct abrt_xmlrpc *ax = xzalloc(sizeof(struct abrt_xmlrpc));
+
+ /* This should be done at program startup, once. We do it in main */
+ /* xmlrpc_client_setup_global_const(&env); */
+
+ /* URL - bugzilla.redhat.com/show_bug.cgi?id=666893 Unable to make sense of
+ * XML-RPC response from server
+ *
+ * By default, XML data from the network may be no larger than 512K.
+ * XMLRPC_XML_SIZE_LIMIT_DEFAULT is #defined to (512*1024) in xmlrpc-c/base.h
+ *
+ * Users reported trouble with 733402 byte long responses, hope raising the
+ * limit to 2*512k is enough
+ */
+ xmlrpc_limit_set(XMLRPC_XML_SIZE_LIMIT_ID, 2 * XMLRPC_XML_SIZE_LIMIT_DEFAULT);
+
+ struct xmlrpc_curl_xportparms curl_parms;
+ memset(&curl_parms, 0, sizeof(curl_parms));
+ /* curlParms.network_interface = NULL; - done by memset */
+ curl_parms.no_ssl_verifypeer = !ssl_verify;
+ curl_parms.no_ssl_verifyhost = !ssl_verify;
+#ifdef VERSION
+ curl_parms.user_agent = PACKAGE_NAME"/"VERSION;
+#else
+ curl_parms.user_agent = "abrt";
+#endif
+
+ struct xmlrpc_clientparms client_parms;
+ memset(&client_parms, 0, sizeof(client_parms));
+ client_parms.transport = "curl";
+ client_parms.transportparmsP = &curl_parms;
+ client_parms.transportparm_size = XMLRPC_CXPSIZE(user_agent);
+
+ xmlrpc_client_create(&env, XMLRPC_CLIENT_NO_FLAGS,
+ PACKAGE_NAME, VERSION,
+ &client_parms, XMLRPC_CPSIZE(transportparm_size),
+ &ax->ax_client);
+
+ if (env.fault_occurred)
+ abrt_xmlrpc_die(&env);
+
+ ax->ax_server_info = xmlrpc_server_info_new(&env, url);
+ if (env.fault_occurred)
+ {
+ xmlrpc_client_destroy(ax->ax_client);
+ abrt_xmlrpc_die(&env);
+ }
+
+ return ax;
+}
+
+void abrt_xmlrpc_free_client(struct abrt_xmlrpc *ax)
+{
+ if (!ax)
+ return;
+
+ if (ax->ax_server_info)
+ xmlrpc_server_info_free(ax->ax_server_info);
+
+ if (ax->ax_client)
+ xmlrpc_client_destroy(ax->ax_client);
+
+ free(ax);
+}
+
+/* die or return expected results */
+xmlrpc_value *abrt_xmlrpc_call(struct abrt_xmlrpc *ax,
+ const char* method, const char* format, ...)
+{
+ xmlrpc_env env;
+ xmlrpc_env_init(&env);
+
+ xmlrpc_value* param = NULL;
+ const char* suffix;
+ va_list args;
+
+ va_start(args, format);
+ xmlrpc_build_value_va(&env, format, args, &param, &suffix);
+ va_end(args);
+ if (env.fault_occurred)
+ abrt_xmlrpc_die(&env);
+
+ xmlrpc_value* result = NULL;
+ if (*suffix != '\0')
+ {
+ xmlrpc_env_set_fault_formatted(
+ &env, XMLRPC_INTERNAL_ERROR, "Junk after the argument "
+ "specifier: '%s'. There must be exactly one argument.",
+ suffix);
+ }
+ else
+ {
+ xmlrpc_client_call2(&env, ax->ax_client, ax->ax_server_info, method,
+ param, &result);
+ }
+ xmlrpc_DECREF(param);
+ if (env.fault_occurred)
+ abrt_xmlrpc_die(&env);
+
+ return result;
+}
diff --git a/src/lib/abrt_xmlrpc.cpp b/src/lib/abrt_xmlrpc.cpp
deleted file mode 100644
index ae75a47f..00000000
--- a/src/lib/abrt_xmlrpc.cpp
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- Copyright (C) 2010 ABRT team
- Copyright (C) 2010 RedHat Inc
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License along
- with this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-*/
-#include "abrtlib.h"
-#include "abrt_xmlrpc.h"
-
-void throw_xml_fault(xmlrpc_env *env)
-{
- error_msg_and_die("XML-RPC Fault(%d): %s", env->fault_code, env->fault_string);
-}
-
-void throw_if_xml_fault_occurred(xmlrpc_env *env)
-{
- if (env->fault_occurred)
- {
- throw_xml_fault(env);
- }
-}
-
-void abrt_xmlrpc_conn::new_xmlrpc_client(const char* url, bool ssl_verify)
-{
- m_pClient = NULL;
- m_pServer_info = NULL;
-
- xmlrpc_env env;
- xmlrpc_env_init(&env);
-
- /* This should be done at program startup, once. We do it in main */
- /* xmlrpc_client_setup_global_const(&env); */
-
- /* URL - bugzilla.redhat.com/show_bug.cgi?id=666893 Unable to make sense of
- * XML-RPC response from server
- *
- * By default, XML data from the network may be no larger than 512K.
- * XMLRPC_XML_SIZE_LIMIT_DEFAULT is #defined to (512*1024) in xmlrpc-c/base.h
- *
- * Users reported trouble with 733402 byte long responses, hope raising the
- * limit to 2*512k is enough
- */
- xmlrpc_limit_set(XMLRPC_XML_SIZE_LIMIT_ID, 2 * XMLRPC_XML_SIZE_LIMIT_DEFAULT);
-
- struct xmlrpc_curl_xportparms curlParms;
- memset(&curlParms, 0, sizeof(curlParms));
- /* curlParms.network_interface = NULL; - done by memset */
- curlParms.no_ssl_verifypeer = !ssl_verify;
- curlParms.no_ssl_verifyhost = !ssl_verify;
-#ifdef VERSION
- curlParms.user_agent = PACKAGE_NAME"/"VERSION;
-#else
- curlParms.user_agent = "abrt";
-#endif
-
- struct xmlrpc_clientparms clientParms;
- memset(&clientParms, 0, sizeof(clientParms));
- clientParms.transport = "curl";
- clientParms.transportparmsP = &curlParms;
- clientParms.transportparm_size = XMLRPC_CXPSIZE(user_agent);
-
- xmlrpc_client_create(&env, XMLRPC_CLIENT_NO_FLAGS,
- PACKAGE_NAME, VERSION,
- &clientParms, XMLRPC_CPSIZE(transportparm_size),
- &m_pClient);
- if (env.fault_occurred)
- throw_xml_fault(&env);
-
- m_pServer_info = xmlrpc_server_info_new(&env, url);
- if (env.fault_occurred)
- {
- xmlrpc_client_destroy(m_pClient);
- m_pClient = NULL;
- throw_xml_fault(&env);
- }
-}
-
-void abrt_xmlrpc_conn::destroy_xmlrpc_client()
-{
- if (m_pServer_info)
- {
- xmlrpc_server_info_free(m_pServer_info);
- m_pServer_info = NULL;
- }
- if (m_pClient)
- {
- xmlrpc_client_destroy(m_pClient);
- m_pClient = NULL;
- }
-}
diff --git a/src/lib/abrt_xmlrpc.h b/src/lib/abrt_xmlrpc.h
index 93c5a9d6..5c94360f 100644
--- a/src/lib/abrt_xmlrpc.h
+++ b/src/lib/abrt_xmlrpc.h
@@ -19,37 +19,30 @@
#ifndef ABRT_XMLRPC_H_
#define ABRT_XMLRPC_H_ 1
-#include <curl/curl.h>
+/* include/stdint.h: typedef int int32_t;
+ * include/xmlrpc-c/base.h: typedef int32_t xmlrpc_int32;
+ */
+
#include <xmlrpc-c/base.h>
#include <xmlrpc-c/client.h>
#ifdef __cplusplus
-/*
- * Simple class holding XMLRPC connection data.
- * Used mainly to ensure we always destroy xmlrpc client and server_info
- * on return or throw.
- */
-struct abrt_xmlrpc_conn {
- xmlrpc_client* m_pClient;
- xmlrpc_server_info* m_pServer_info;
-
- abrt_xmlrpc_conn(const char* url, bool ssl_verify) { new_xmlrpc_client(url, ssl_verify); }
- /* this never throws exceptions - calls C functions only */
- ~abrt_xmlrpc_conn() { destroy_xmlrpc_client(); }
-
- void new_xmlrpc_client(const char* url, bool ssl_verify);
- void destroy_xmlrpc_client();
-};
+extern "C" {
#endif
+struct abrt_xmlrpc {
+ xmlrpc_client *ax_client;
+ xmlrpc_server_info *ax_server_info;
+};
-#ifdef __cplusplus
-extern "C" {
-#endif
+struct abrt_xmlrpc *abrt_xmlrpc_new_client(const char *url, int ssl_verify);
+void abrt_xmlrpc_free_client(struct abrt_xmlrpc *ax);
+void abrt_xmlrpc_die(xmlrpc_env *env) __attribute__((noreturn));
+void abrt_xmlrpc_error(xmlrpc_env *env);
-/* Utility functions */
-void throw_xml_fault(xmlrpc_env *env);
-void throw_if_xml_fault_occurred(xmlrpc_env *env);
+/* die or return expected results */
+xmlrpc_value *abrt_xmlrpc_call(struct abrt_xmlrpc *ax,
+ const char *method, const char *format, ...);
#ifdef __cplusplus
}
diff --git a/src/lib/hooklib.c b/src/lib/hooklib.c
index 3bde4dfa..804a2394 100644
--- a/src/lib/hooklib.c
+++ b/src/lib/hooklib.c
@@ -1,5 +1,5 @@
/*
- Copyright (C) 2009 RedHat inc.
+ Copyright (C) 2009 RedHat inc.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
diff --git a/src/lib/hooklib.h b/src/lib/hooklib.h
index c140f951..1add7d09 100644
--- a/src/lib/hooklib.h
+++ b/src/lib/hooklib.h
@@ -1,5 +1,5 @@
/*
- Copyright (C) 2009 RedHat inc.
+ Copyright (C) 2009 RedHat inc.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
diff --git a/src/lib/read_write.c b/src/lib/read_write.c
index da067f78..fe85fcfb 100644
--- a/src/lib/read_write.c
+++ b/src/lib/read_write.c
@@ -88,7 +88,7 @@ ssize_t full_write(int fd, const void *buf, size_t len)
/* user can do another write to know the error code */
return total;
}
- return cc; /* write() returns -1 on failure. */
+ return cc; /* write() returns -1 on failure. */
}
total += cc;
diff --git a/src/plugins/Makefile.am b/src/plugins/Makefile.am
index 1ef7fbc2..df0ad944 100644
--- a/src/plugins/Makefile.am
+++ b/src/plugins/Makefile.am
@@ -239,7 +239,7 @@ abrt_action_analyze_backtrace_LDADD = \
../btparser/libbtparser.la
abrt_action_bugzilla_SOURCES = \
- abrt-action-bugzilla.cpp
+ abrt-action-bugzilla.c rhbz.c rhbz.h
abrt_action_bugzilla_CPPFLAGS = \
-I$(srcdir)/../include/report -I$(srcdir)/../include \
-I$(srcdir)/../lib \
@@ -253,7 +253,7 @@ abrt_action_bugzilla_CPPFLAGS = \
-DPLUGINS_CONF_DIR=\"$(PLUGINS_CONF_DIR)\" \
$(GLIB_CFLAGS) \
-D_GNU_SOURCE \
- -Wall -Wwrite-strings -Werror
+ -Wall -Wwrite-strings
abrt_action_bugzilla_LDADD = \
$(GLIB_LIBS) \
../lib/libabrt_web.la \
diff --git a/src/plugins/abrt-action-bugzilla.c b/src/plugins/abrt-action-bugzilla.c
new file mode 100644
index 00000000..91bc26f8
--- /dev/null
+++ b/src/plugins/abrt-action-bugzilla.c
@@ -0,0 +1,340 @@
+/*
+ Copyright (C) 2010 ABRT team
+ Copyright (C) 2010 RedHat Inc
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License along
+ with this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+*/
+#include "abrtlib.h"
+#include "abrt_problem_data.h"
+#include "parse_options.h"
+#include "abrt_xmlrpc.h"
+#include "rhbz.h"
+
+#define XML_RPC_SUFFIX "/xmlrpc.cgi"
+
+/* From RHEL6 kernel/panic.c:
+ * { TAINT_PROPRIETARY_MODULE, 'P', 'G' },
+ * { TAINT_FORCED_MODULE, 'F', ' ' },
+ * { TAINT_UNSAFE_SMP, 'S', ' ' },
+ * { TAINT_FORCED_RMMOD, 'R', ' ' },
+ * { TAINT_MACHINE_CHECK, 'M', ' ' },
+ * { TAINT_BAD_PAGE, 'B', ' ' },
+ * { TAINT_USER, 'U', ' ' },
+ * { TAINT_DIE, 'D', ' ' },
+ * { TAINT_OVERRIDDEN_ACPI_TABLE, 'A', ' ' },
+ * { TAINT_WARN, 'W', ' ' },
+ * { TAINT_CRAP, 'C', ' ' },
+ * { TAINT_FIRMWARE_WORKAROUND, 'I', ' ' },
+ * entries 12 - 27 are unused
+ * { TAINT_HARDWARE_UNSUPPORTED, 'H', ' ' },
+ * entries 29 - 31 are unused
+ */
+
+static const char * const taint_warnings[] = {
+ "Proprietary Module",
+ "Forced Module",
+ "Unsafe SMP",
+ "Forced rmmod",
+ "Machine Check",
+ "Bad Page",
+ "User",
+ "Die",
+ "Overriden ACPI Table",
+ "Warning Issued",
+ "Experimental Module Loaded",
+ "Firmware Workaround",
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ "Hardware Unsupported",
+ NULL,
+ NULL,
+};
+
+/* TODO: npajkovs: fix tainted string */
+static const char *tainted_string(unsigned tainted)
+{
+ unsigned idx = 0;
+ while ((tainted >>= 1) != 0)
+ idx++;
+
+ return taint_warnings[idx];
+}
+
+static void report_to_bugzilla(const char *dump_dir_name, map_string_h *settings)
+{
+ struct dump_dir *dd = dd_opendir(dump_dir_name, /*flags:*/ 0);
+ if (!dd)
+ xfunc_die(); /* dd_opendir already emitted error msg */
+ problem_data_t *problem_data = create_problem_data_from_dump_dir(dd);
+ dd_close(dd);
+
+ const char *env;
+ const char *login;
+ const char *password;
+ const char *bugzilla_xmlrpc;
+ const char *bugzilla_url;
+ bool ssl_verify;
+
+ env = getenv("Bugzilla_Login");
+ login = env ? env : get_map_string_item_or_empty(settings, "Login");
+ env = getenv("Bugzilla_Password");
+ password = env ? env : get_map_string_item_or_empty(settings, "Password");
+ if (!login[0] || !password[0])
+ error_msg_and_die(_("Empty login or password, please check your configuration"));
+
+ env = getenv("Bugzilla_BugzillaURL");
+ bugzilla_url = env ? env : get_map_string_item_or_empty(settings, "BugzillaURL");
+ if (!bugzilla_url[0])
+ bugzilla_url = "https://bugzilla.redhat.com";
+ bugzilla_xmlrpc = xasprintf("%s"XML_RPC_SUFFIX, bugzilla_url);
+
+ env = getenv("Bugzilla_SSLVerify");
+ ssl_verify = string_to_bool(env ? env : get_map_string_item_or_empty(settings, "SSLVerify"));
+
+ const char *component = get_problem_item_content_or_NULL(problem_data, FILENAME_COMPONENT);
+ const char *duphash = get_problem_item_content_or_NULL(problem_data, FILENAME_DUPHASH);
+ if (!duphash)
+ error_msg_and_die(_("Essential file '%s' is missing, can't continue.."),
+ FILENAME_DUPHASH);
+
+ if (!*duphash)
+ error_msg_and_die(_("Essential file '%s' is empty, can't continue.."),
+ FILENAME_DUPHASH);
+
+ const char *release = get_problem_item_content_or_NULL(problem_data, FILENAME_OS_RELEASE);
+ if (!release) /* Old dump dir format compat. Remove in abrt-2.1 */
+ release = get_problem_item_content_or_NULL(problem_data, "release");
+
+ struct abrt_xmlrpc *client = abrt_xmlrpc_new_client(bugzilla_xmlrpc, ssl_verify);
+
+ log(_("Logging into Bugzilla at %s"), bugzilla_url);
+ rhbz_login(client, login, password);
+
+ log(_("Checking for duplicates"));
+ char *product = NULL;
+ char *version = NULL;
+ parse_release_for_bz(release, &product, &version);
+ free(version);
+
+ xmlrpc_value *result;
+ if (strcmp(product, "Fedora") == 0)
+ result = rhbz_search_duphash(client, component, product, duphash);
+ else
+ result = rhbz_search_duphash(client, component, NULL, duphash);
+
+ xmlrpc_value *all_bugs = rhbz_get_member("bugs", result);
+ xmlrpc_DECREF(result);
+
+ if (!all_bugs)
+ error_msg_and_die(_("Missing mandatory member 'bugs'"));
+
+ int all_bugs_size = rhbz_array_size(all_bugs);
+ // When someone clones bug it has same duphash, so we can find more than 1.
+ // Need to be checked if component is same.
+ VERB3 log("Bugzilla has %i reports with same duphash '%s'",
+ all_bugs_size, duphash);
+
+ int bug_id = -1, dependent_bug = -1;
+ struct bug_info *bz = NULL;
+ if (all_bugs_size > 0)
+ {
+ bug_id = rhbz_bug_id(all_bugs);
+ xmlrpc_DECREF(all_bugs);
+ bz = rhbz_bug_info(client, bug_id);
+
+ if (strcmp(bz->bi_product, product) != 0)
+ {
+ dependent_bug = bug_id;
+ /* found something, but its a different product */
+ free_bug_info(bz);
+
+ xmlrpc_value *result = rhbz_search_duphash(client, component,
+ product, duphash);
+ xmlrpc_value *all_bugs = rhbz_get_member("bugs", result);
+ xmlrpc_DECREF(result);
+
+ all_bugs_size = rhbz_array_size(all_bugs);
+ if (all_bugs_size > 0)
+ {
+ bug_id = rhbz_bug_id(all_bugs);
+ bz = rhbz_bug_info(client, bug_id);
+ }
+ xmlrpc_DECREF(all_bugs);
+ }
+
+ }
+ free(product);
+
+ if (all_bugs_size == 0) // Create new bug
+ {
+ log(_("Creating a new bug"));
+ bug_id = rhbz_new_bug(client, problem_data, bug_id);
+
+ log("Adding attachments to bug %i", bug_id);
+ char bug_id_str[sizeof(int)*3 + 2];
+ sprintf(bug_id_str, "%i", bug_id);
+
+ rhbz_attachments(client, bug_id_str, problem_data);
+
+ log(_("Logging out"));
+ rhbz_logout(client);
+
+ log("Status: NEW %s/show_bug.cgi?id=%u", bugzilla_url, bug_id);
+ abrt_xmlrpc_free_client(client);
+ return;
+ }
+
+ // decision based on state
+ log(_("Bug is already reported: %i"), bz->bi_id);
+ if ((strcmp(bz->bi_status, "CLOSED") == 0)
+ && (strcmp(bz->bi_resolution, "DUPLICATE") == 0))
+ {
+ struct bug_info *origin;
+ origin = rhbz_find_origin_bug_closed_duplicate(client, bz);
+ if (origin)
+ {
+ free_bug_info(bz);
+ bz = origin;
+ }
+ }
+
+ if (strcmp(bz->bi_status, "CLOSED") != 0)
+ {
+ if ((strcmp(bz->bi_reporter, login) != 0)
+ && (!g_list_find_custom(bz->bi_cc_list, login, (GCompareFunc)g_strcmp0)))
+ {
+ log(_("Add %s to CC list"), login);
+ rhbz_mail_to_cc(client, bz->bi_id, login);
+ }
+
+ char *dsc = make_description_comment(problem_data);
+ if (dsc)
+ {
+ const char *package = get_problem_item_content_or_NULL(problem_data,
+ FILENAME_PACKAGE);
+ const char *release = get_problem_item_content_or_NULL(problem_data,
+ FILENAME_OS_RELEASE);
+ if (!release) /* Old dump dir format compat. Remove in abrt-2.1 */
+ release = get_problem_item_content_or_NULL(problem_data, "release");
+ const char *arch = get_problem_item_content_or_NULL(problem_data,
+ FILENAME_ARCHITECTURE);
+ const char *is_private = get_problem_item_content_or_NULL(problem_data,
+ "is_private");
+
+ char *full_dsc = xasprintf("Package: %s\n"
+ "Architecture: %s\n"
+ "OS Release: %s\n"
+ "%s", package, arch, release, dsc);
+
+ log(_("Adding new comment to bug %d"), bz->bi_id);
+ free(dsc);
+
+ int is_priv = is_private && string_to_bool(is_private);
+ rhbz_add_comment(client, bz->bi_id, full_dsc, is_priv);
+ free(full_dsc);
+ }
+ }
+
+ log(_("Logging out"));
+ rhbz_logout(client);
+
+ log("Status: %s%s%s %s/show_bug.cgi?id=%u",
+ bz->bi_status,
+ bz->bi_resolution ? " " : "",
+ bz->bi_resolution ? bz->bi_resolution : "",
+ bugzilla_url,
+ bz->bi_id);
+
+ dd = dd_opendir(dump_dir_name, /*flags:*/ 0);
+ if (dd)
+ {
+ char *msg = xasprintf("Bugzilla: URL=%s/show_bug.cgi?id=%u", bugzilla_url, bz->bi_id);
+ add_reported_to(dd, msg);
+ free(msg);
+ dd_close(dd);
+ }
+
+ free_problem_data(problem_data);
+ free_bug_info(bz);
+ abrt_xmlrpc_free_client(client);
+}
+
+int main(int argc, char **argv)
+{
+ abrt_init(argv);
+
+ map_string_h *settings = new_map_string();
+ const char *dump_dir_name = ".";
+ GList *conf_file = NULL;
+
+ /* Can't keep these strings/structs static: _() doesn't support that */
+ const char *program_usage_string = _(
+ "\b [-v] -c CONFFILE -d DIR\n"
+ "\n"
+ "Reports problem to Bugzilla"
+ );
+ enum {
+ OPT_v = 1 << 0,
+ OPT_d = 1 << 1,
+ OPT_c = 1 << 2,
+ };
+ /* Keep enum above and order of options below in sync! */
+ struct options program_options[] = {
+ OPT__VERBOSE(&g_verbose),
+ OPT_STRING('d', NULL, &dump_dir_name, "DIR" , _("Dump directory")),
+ OPT_LIST( 'c', NULL, &conf_file , "FILE", _("Configuration file (may be given many times)")),
+ OPT_END()
+ };
+ /*unsigned opts =*/ parse_opts(argc, argv, program_options, program_usage_string);
+
+ export_abrt_envvars(0);
+
+ while (conf_file)
+ {
+ char *fn = (char *)conf_file->data;
+ VERB1 log("Loading settings from '%s'", fn);
+ load_conf_file(fn, settings, /*skip key w/o values:*/ true);
+ VERB3 log("Loaded '%s'", fn);
+ conf_file = g_list_remove(conf_file, fn);
+ }
+
+ VERB1 log("Initializing XML-RPC library");
+ xmlrpc_env env;
+ xmlrpc_env_init(&env);
+ xmlrpc_client_setup_global_const(&env);
+ if (env.fault_occurred)
+ error_msg_and_die("XML-RPC Fault: %s(%d)", env.fault_string, env.fault_code);
+ xmlrpc_env_clean(&env);
+
+ report_to_bugzilla(dump_dir_name, settings);
+
+ free_map_string(settings);
+ return 0;
+}
diff --git a/src/plugins/abrt-action-bugzilla.cpp b/src/plugins/abrt-action-bugzilla.cpp
deleted file mode 100644
index e8a605f1..00000000
--- a/src/plugins/abrt-action-bugzilla.cpp
+++ /dev/null
@@ -1,958 +0,0 @@
-/*
- Copyright (C) 2010 ABRT team
- Copyright (C) 2010 RedHat Inc
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License along
- with this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
-*/
-#include "abrtlib.h"
-#include "abrt_xmlrpc.h"
-#include "abrt_problem_data.h"
-#include "parse_options.h"
-
-#define XML_RPC_SUFFIX "/xmlrpc.cgi"
-#define MAX_HOPS 5
-
-/*
- * TODO: npajkovs: better deallocation of xmlrpc value
- * npajkovs: better gathering function which collects all information from bugzilla
- * npajkovs: figure out how to deal with cloning bugs
- * npajkovs: check if attachment was uploaded successul an if not try it again(max 3 times)
- * and if it still fails. retrun successful, but mention that attaching failed
- * npajkovs: add option to set comment privat
- */
-
-struct bug_info {
- const char* bug_status;
- const char* bug_resolution;
- const char* bug_reporter;
- const char* bug_product;
- xmlrpc_int32 bug_dup_id;
- GList* bug_cc;
-};
-
-/* xzalloc */
-static void bug_info_init(struct bug_info* bz)
-{
- bz->bug_status = NULL;
- bz->bug_resolution = NULL;
- bz->bug_reporter = NULL;
- bz->bug_product = NULL;
- bz->bug_dup_id = -1;
- bz->bug_cc = NULL;
-}
-
-static void bug_info_destroy(struct bug_info* bz)
-{
- free((void*)bz->bug_status);
- free((void*)bz->bug_resolution);
- free((void*)bz->bug_reporter);
- free((void*)bz->bug_product);
-
- list_free_with_free(bz->bug_cc);
-}
-
-/*
- * Static namespace for xmlrpc stuff.
- * Used mainly to ensure we always destroy xmlrpc client and server_info.
- */
-
-namespace {
-
-struct ctx: public abrt_xmlrpc_conn {
- xmlrpc_env env;
-
- ctx(const char* url, bool ssl_verify): abrt_xmlrpc_conn(url, ssl_verify)
- { xmlrpc_env_init(&env); }
- ~ctx() { xmlrpc_env_clean(&env); }
-
- void login(const char* login, const char* passwd);
- void logout();
-
- const char* get_bug_status(xmlrpc_value* result_xml);
- const char* get_bug_resolution(xmlrpc_value* result_xml);
- const char* get_bug_reporter(xmlrpc_value* result_xml);
- const char* get_bug_product(xmlrpc_value* relult_xml);
-
- xmlrpc_value* call_quicksearch_duphash(const char* component, const char* release, const char* duphash);
- xmlrpc_value* get_cc_member(xmlrpc_value* result_xml);
- xmlrpc_value* get_member(const char* member, xmlrpc_value* result_xml);
-
- int get_array_size(xmlrpc_value* result_xml);
- xmlrpc_int32 get_bug_id(xmlrpc_value* result_xml);
- xmlrpc_int32 get_bug_dup_id(xmlrpc_value* result_xml);
- void get_bug_cc(xmlrpc_value* result_xml, struct bug_info* bz);
- int add_plus_one_cc(xmlrpc_int32 bug_id, const char* login);
- xmlrpc_int32 new_bug(problem_data_t *problem_data, int depend_on_bugno);
- int add_attachments(const char* bug_id_str, problem_data_t *problem_data);
- int get_bug_info(struct bug_info* bz, xmlrpc_int32 bug_id);
- int add_comment(xmlrpc_int32 bug_id, const char* comment, bool is_private);
-
- xmlrpc_value* call(const char* method, const char* format, ...);
-};
-
-xmlrpc_value* ctx::call(const char* method, const char* format, ...)
-{
- xmlrpc_value* result = NULL;
-
- if (!env.fault_occurred)
- {
- xmlrpc_value* param = NULL;
- va_list args;
- const char* suffix;
-
- va_start(args, format);
- xmlrpc_build_value_va(&env, format, args, &param, &suffix);
- va_end(args);
-
- if (*suffix != '\0')
- {
- xmlrpc_env_set_fault_formatted(
- &env, XMLRPC_INTERNAL_ERROR, "Junk after the argument "
- "specifier: '%s'. There must be exactly one arument.",
- suffix);
- }
- else
- {
- xmlrpc_client_call2(&env, m_pClient, m_pServer_info, method, param, &result);
- }
- xmlrpc_DECREF(param);
- if (env.fault_occurred)
- return NULL;
- }
-
- return result;
-}
-
-xmlrpc_value* ctx::get_member(const char* member, xmlrpc_value* result_xml)
-{
- xmlrpc_value* cc_member = NULL;
- xmlrpc_struct_find_value(&env, result_xml, member, &cc_member);
- if (env.fault_occurred)
- return NULL;
-
- return cc_member;
-}
-
-int ctx::get_array_size(xmlrpc_value* result_xml)
-{
- int size = xmlrpc_array_size(&env, result_xml);
- if (env.fault_occurred)
- return -1;
-
- return size;
-}
-
-xmlrpc_int32 ctx::get_bug_dup_id(xmlrpc_value* result_xml)
-{
- xmlrpc_value* dup_id = get_member("dup_id", result_xml);
- if (!dup_id)
- return -1;
-
- xmlrpc_int32 dup_id_int = -1;
- xmlrpc_read_int(&env, dup_id, &dup_id_int);
- xmlrpc_DECREF(dup_id);
- if (env.fault_occurred)
- return -1;
-
- VERB3 log("got dup_id: %i", dup_id_int);
- return dup_id_int;
-}
-
-const char* ctx::get_bug_product(xmlrpc_value* result_xml)
-{
- xmlrpc_value* product_member = get_member("product", result_xml);
- if (!product_member) //should never happend. Each bug has to set up product
- return NULL;
-
- const char* product = NULL;
- xmlrpc_read_string(&env, product_member, &product);
- xmlrpc_DECREF(product_member);
- if (env.fault_occurred)
- return NULL;
-
- if (*product != '\0')
- {
- VERB3 log("got bug product: %s", product);
- return product;
- }
-
- free((void*)product);
- return NULL;
-}
-
-const char* ctx::get_bug_reporter(xmlrpc_value* result_xml)
-{
- xmlrpc_value* reporter_member = get_member("reporter", result_xml);
- if (!reporter_member)
- return NULL;
-
- const char* reporter = NULL;
- xmlrpc_read_string(&env, reporter_member, &reporter);
- xmlrpc_DECREF(reporter_member);
- if (env.fault_occurred)
- return NULL;
-
- if (*reporter != '\0')
- {
- VERB3 log("got bug reporter: %s", reporter);
- return reporter;
- }
- free((void*)reporter);
- return NULL;
-}
-
-const char* ctx::get_bug_resolution(xmlrpc_value* result_xml)
-{
- xmlrpc_value* bug_resolution = get_member("resolution", result_xml);
- if (!bug_resolution)
- return NULL;
-
- const char* resolution_str = NULL;
- xmlrpc_read_string(&env, bug_resolution, &resolution_str);
- xmlrpc_DECREF(bug_resolution);
- if (env.fault_occurred)
- return NULL;
-
- if (*resolution_str != '\0')
- {
- VERB3 log("got resolution: %s", resolution_str);
- return resolution_str;
- }
- free((void*)resolution_str);
- return NULL;
-}
-
-const char* ctx::get_bug_status(xmlrpc_value* result_xml)
-{
- xmlrpc_value* bug_status = get_member("bug_status", result_xml);
- if (!bug_status)
- return NULL;
-
- const char* status_str = NULL;
- xmlrpc_read_string(&env, bug_status, &status_str);
- xmlrpc_DECREF(bug_status);
- if (env.fault_occurred)
- return NULL;
-
- if (*status_str != '\0')
- {
- VERB3 log("got bug_status: %s", status_str);
- return status_str;
- }
- free((void*)status_str);
- return NULL;
-}
-
-void ctx::get_bug_cc(xmlrpc_value* result_xml, struct bug_info* bz)
-{
- xmlrpc_value* cc_member = get_member("cc", result_xml);
- if (!cc_member)
- return;
-
- int array_size = xmlrpc_array_size(&env, cc_member);
- if (array_size == -1)
- return;
-
- VERB3 log("count members on cc %i", array_size);
-
- for (int i = 0; i < array_size; i++)
- {
- xmlrpc_value* item = NULL;
- xmlrpc_array_read_item(&env, cc_member, i, &item);
- if (env.fault_occurred)
- return;
-
- if (item)
- {
- const char* cc = NULL;
- xmlrpc_read_string(&env, item, &cc);
- xmlrpc_DECREF(item);
- if (env.fault_occurred)
- {
- xmlrpc_DECREF(cc_member);
- return;
- }
-
- if (*cc != '\0')
- {
- bz->bug_cc = g_list_append(bz->bug_cc, (char*)cc);
- VERB3 log("member on cc is %s", cc);
- continue;
- }
- free((char*)cc);
- }
- }
- xmlrpc_DECREF(cc_member);
- return;
-}
-
-xmlrpc_value* ctx::call_quicksearch_duphash(const char* component,
- const char* release, const char* duphash)
-{
- char *query = NULL;
- if (!release)
- query = xasprintf("ALL component:\"%s\" whiteboard:\"%s\"", component, duphash);
- else
- {
- char *product = NULL;
- char *version = NULL;
- parse_release_for_bz(release, &product, &version);
- query = xasprintf("ALL component:\"%s\" whiteboard:\"%s\" product:\"%s\"",
- component, duphash, product
- );
- free(product);
- free(version);
- }
-
- VERB3 log("quicksearch for `%s'", query);
- xmlrpc_value *ret = call("Bug.search", "({s:s})", "quicksearch", query);
- free(query);
- return ret;
-}
-
-xmlrpc_int32 ctx::get_bug_id(xmlrpc_value* result_xml)
-{
- xmlrpc_value* item = NULL;
- xmlrpc_array_read_item(&env, result_xml, 0, &item);
- if (env.fault_occurred)
- return -1;
-
- xmlrpc_value* bug = get_member("bug_id", item);
- xmlrpc_DECREF(item);
- if (!bug)
- return -1;
-
- xmlrpc_int32 bug_id = -1;
- xmlrpc_read_int(&env, bug, &bug_id);
- xmlrpc_DECREF(bug);
- if (env.fault_occurred)
- return -1;
-
- VERB3 log("got bug_id %d", (int)bug_id);
- return bug_id;
-}
-
-int ctx::add_plus_one_cc(xmlrpc_int32 bug_id, const char* login)
-{
- xmlrpc_value* result = call("Bug.update", "({s:i,s:{s:(s)}})", "ids", (int)bug_id, "updates", "add_cc", login);
- if (result)
- xmlrpc_DECREF(result);
- return result ? 0 : -1;
-}
-
-int ctx::add_comment(xmlrpc_int32 bug_id, const char* comment, bool is_private)
-{
- xmlrpc_value* result = call("Bug.add_comment", "({s:i,s:s,s:b})", "id", (int)bug_id,
- "comment", comment,
- "private", is_private);
- if (result)
- xmlrpc_DECREF(result);
- return result ? 0 : -1;
-}
-
-/* From RHEL6 kernel/panic.c:
- * { TAINT_PROPRIETARY_MODULE, 'P', 'G' },
- * { TAINT_FORCED_MODULE, 'F', ' ' },
- * { TAINT_UNSAFE_SMP, 'S', ' ' },
- * { TAINT_FORCED_RMMOD, 'R', ' ' },
- * { TAINT_MACHINE_CHECK, 'M', ' ' },
- * { TAINT_BAD_PAGE, 'B', ' ' },
- * { TAINT_USER, 'U', ' ' },
- * { TAINT_DIE, 'D', ' ' },
- * { TAINT_OVERRIDDEN_ACPI_TABLE, 'A', ' ' },
- * { TAINT_WARN, 'W', ' ' },
- * { TAINT_CRAP, 'C', ' ' },
- * { TAINT_FIRMWARE_WORKAROUND, 'I', ' ' },
- * entries 12 - 27 are unused
- * { TAINT_HARDWARE_UNSUPPORTED, 'H', ' ' },
- * entries 29 - 31 are unused
- */
-
-static const char * const taint_warnings[] = {
- "Proprietary Module",
- "Forced Module",
- "Unsafe SMP",
- "Forced rmmod",
- "Machine Check",
- "Bad Page",
- "User",
- "Die",
- "Overriden ACPI Table",
- "Warning Issued",
- "Experimental Module Loaded",
- "Firmware Workaround",
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- "Hardware Unsupported",
- NULL,
- NULL,
-};
-
-static const char *tainted_string(unsigned tainted)
-{
- unsigned idx = 0;
- while ((tainted >>= 1) != 0)
- idx++;
-
- return taint_warnings[idx];
-}
-
-xmlrpc_int32 ctx::new_bug(problem_data_t *problem_data, int depend_on_bugno)
-{
- const char *package = get_problem_item_content_or_NULL(problem_data, FILENAME_PACKAGE);
- const char *component = get_problem_item_content_or_NULL(problem_data, FILENAME_COMPONENT);
- const char *release = get_problem_item_content_or_NULL(problem_data, FILENAME_OS_RELEASE);
- if (!release) /* Old dump dir format compat. Remove in abrt-2.1 */
- release = get_problem_item_content_or_NULL(problem_data, "release");
- const char *arch = get_problem_item_content_or_NULL(problem_data, FILENAME_ARCHITECTURE);
- const char *duphash = get_problem_item_content_or_NULL(problem_data, FILENAME_DUPHASH);
- const char *reason = get_problem_item_content_or_NULL(problem_data, FILENAME_REASON);
- const char *function = get_problem_item_content_or_NULL(problem_data, FILENAME_CRASH_FUNCTION);
- const char *analyzer = get_problem_item_content_or_NULL(problem_data, FILENAME_ANALYZER);
- const char *tainted_str = get_problem_item_content_or_NULL(problem_data, FILENAME_TAINTED);
-
- struct strbuf *buf_summary = strbuf_new();
- strbuf_append_strf(buf_summary, "[abrt] %s", package);
-
- if (function != NULL && strlen(function) < 30)
- strbuf_append_strf(buf_summary, ": %s", function);
-
- if (reason != NULL)
- strbuf_append_strf(buf_summary, ": %s", reason);
-
- if (tainted_str && analyzer
- && (strcmp(analyzer, "Kerneloops") == 0)
- ) {
- unsigned long tainted = xatoi_positive(tainted_str);
- const char *tainted_warning = tainted_string(tainted);
- if (tainted_warning)
- strbuf_append_strf(buf_summary, ": TAINTED %s", tainted_warning);
- }
-
- char *status_whiteboard = xasprintf("abrt_hash:%s", duphash);
-
- char *bz_dsc = make_description_bz(problem_data);
- char *full_dsc = xasprintf("abrt version: "VERSION"\n%s", bz_dsc);
- free(bz_dsc);
-
- char *product = NULL;
- char *version = NULL;
- parse_release_for_bz(release, &product, &version);
-
- xmlrpc_value* result = NULL;
- char *summary = strbuf_free_nobuf(buf_summary);
- if (depend_on_bugno > -1)
- {
- result = call("Bug.create", "({s:s,s:s,s:s,s:s,s:s,s:s,s:s,s:i})",
- "product", product,
- "component", component,
- "version", version,
- "summary", summary,
- "description", full_dsc,
- "status_whiteboard", status_whiteboard,
- "platform", arch,
- "dependson", depend_on_bugno
- );
- }
- else
- {
- result = call("Bug.create", "({s:s,s:s,s:s,s:s,s:s,s:s,s:s})",
- "product", product,
- "component", component,
- "version", version,
- "summary", summary,
- "description", full_dsc,
- "status_whiteboard", status_whiteboard,
- "platform", arch
- );
- }
- free(status_whiteboard);
- free(product);
- free(version);
- free(summary);
- free(full_dsc);
-
- if (!result)
- return -1;
-
- xmlrpc_value* id = get_member("id", result);
- xmlrpc_DECREF(result);
- if (!id)
- return -1;
-
- xmlrpc_int32 bug_id = -1;
- xmlrpc_read_int(&env, id, &bug_id);
- xmlrpc_DECREF(id);
- if (env.fault_occurred)
- return -1;
-
- log(_("New bug id: %i"), (int)bug_id);
-
- return bug_id;
-}
-
-int ctx::add_attachments(const char* bug_id_str, problem_data_t *problem_data)
-{
- GHashTableIter iter;
- char *name;
- struct problem_item *value;
- g_hash_table_iter_init(&iter, problem_data);
- while (g_hash_table_iter_next(&iter, (void**)&name, (void**)&value))
- {
- const char *content = value->content;
-
- // We were special-casing FILENAME_BACKTRACE here, but Karel says
- // he can retrieve it in inlined form from comments too.
- if ((value->flags & CD_FLAG_TXT)
- && (strlen(content) > CD_TEXT_ATT_SIZE /*|| (strcmp(name, FILENAME_BACKTRACE) == 0)*/)
- ) {
- char *encoded64 = encode_base64(content, strlen(content));
- char *filename = xasprintf("File: %s", name);
- xmlrpc_value* result = call("bugzilla.addAttachment", "(s{s:s,s:s,s:s,s:s})", bug_id_str,
- "description", filename,
- "filename", name,
- "contenttype", "text/plain",
- "data", encoded64
- );
- free(encoded64);
- free(filename);
- if (!result)
- return -1;
-
- xmlrpc_DECREF(result);
- }
- }
- return 0;
-}
-
-int ctx::get_bug_info(struct bug_info* bz, xmlrpc_int32 bug_id)
-{
- char bug_id_str[sizeof(long)*3 + 2];
- sprintf(bug_id_str, "%lu", (long)bug_id);
- xmlrpc_value* result = call("bugzilla.getBug", "(s)", bug_id_str);
- if (!result)
- return -1;
-
- bz->bug_product = get_bug_product(result);
- if (bz->bug_product == NULL)
- return -1;
-
- bz->bug_status = get_bug_status(result);
- if (bz->bug_status == NULL)
- return -1;
-
- bz->bug_reporter = get_bug_reporter(result);
- if (bz->bug_reporter == NULL)
- return -1;
-
- // mandatory when bug status is CLOSED
- if (strcmp(bz->bug_status, "CLOSED") == 0)
- {
- bz->bug_resolution = get_bug_resolution(result);
- if ((env.fault_occurred) && (bz->bug_resolution == NULL))
- return -1;
- }
-
- // mandatory when bug status is CLOSED and resolution is DUPLICATE
- if ((strcmp(bz->bug_status, "CLOSED") == 0)
- && (strcmp(bz->bug_resolution, "DUPLICATE") == 0)
- ) {
- bz->bug_dup_id = get_bug_dup_id(result);
- if (env.fault_occurred)
- return -1;
- }
-
- get_bug_cc(result, bz);
- if (env.fault_occurred)
- return -1;
-
- xmlrpc_DECREF(result);
- return 0;
-}
-
-void ctx::login(const char* login, const char* passwd)
-{
- xmlrpc_value* result = call("User.login", "({s:s,s:s})", "login", login, "password", passwd);
-//TODO: with URL like http://bugzilla.redhat.com (that is, with http: instead of https:)
-//we are getting this error:
-//Logging into Bugzilla at http://bugzilla.redhat.com
-//Can't login. Server said: HTTP response code is 301, not 200
-//But this is a 301 redirect! We _can_ follow it if we configure curl to understand that!
- if (!result)
- error_msg_and_die("Can't login. Server said: %s", env.fault_string);
- xmlrpc_DECREF(result);
-}
-
-void ctx::logout()
-{
- xmlrpc_value* result = call("User.logout", "(s)", "");
- if (result)
- xmlrpc_DECREF(result);
-
- throw_if_xml_fault_occurred(&env);
-}
-
-} /* namespace */
-
-
-static void report_to_bugzilla(
- const char *dump_dir_name,
- map_string_h *settings)
-{
- struct dump_dir *dd = dd_opendir(dump_dir_name, /*flags:*/ 0);
- if (!dd)
- xfunc_die(); /* dd_opendir already emitted error msg */
- problem_data_t *problem_data = create_problem_data_from_dump_dir(dd);
- dd_close(dd);
-
- const char *env;
- const char *login;
- const char *password;
- const char *bugzilla_xmlrpc;
- const char *bugzilla_url;
- bool ssl_verify;
-
- env = getenv("Bugzilla_Login");
- login = env ? env : get_map_string_item_or_empty(settings, "Login");
- env = getenv("Bugzilla_Password");
- password = env ? env : get_map_string_item_or_empty(settings, "Password");
- if (!login[0] || !password[0])
- error_msg_and_die(_("Empty login or password, please check your configuration"));
-
- env = getenv("Bugzilla_BugzillaURL");
- bugzilla_url = env ? env : get_map_string_item_or_empty(settings, "BugzillaURL");
- if (!bugzilla_url[0])
- bugzilla_url = "https://bugzilla.redhat.com";
- bugzilla_xmlrpc = xasprintf("%s"XML_RPC_SUFFIX, bugzilla_url);
-
- env = getenv("Bugzilla_SSLVerify");
- ssl_verify = string_to_bool(env ? env : get_map_string_item_or_empty(settings, "SSLVerify"));
-
- const char *component = get_problem_item_content_or_NULL(problem_data, FILENAME_COMPONENT);
- const char *duphash = get_problem_item_content_or_NULL(problem_data, FILENAME_DUPHASH);
- if (!duphash)
- error_msg_and_die(_("Essential file '%s' is missing, can't continue.."),
- FILENAME_DUPHASH);
-
- if (!*duphash)
- error_msg_and_die(_("Essential file '%s' is empty, can't continue.."),
- FILENAME_DUPHASH);
-
- const char *release = get_problem_item_content_or_NULL(problem_data, FILENAME_OS_RELEASE);
- if (!release) /* Old dump dir format compat. Remove in abrt-2.1 */
- release = get_problem_item_content_or_NULL(problem_data, "release");
-
- ctx bz_server(bugzilla_xmlrpc, ssl_verify);
-
- log(_("Logging into Bugzilla at %s"), bugzilla_url);
- bz_server.login(login, password);
-
- log(_("Checking for duplicates"));
-
- char *product = NULL;
- char *version = NULL;
- parse_release_for_bz(release, &product, &version);
- free(version);
-
- xmlrpc_value *result;
- if (strcmp(product, "Fedora") == 0)
- result = bz_server.call_quicksearch_duphash(component, product, duphash);
- else
- result = bz_server.call_quicksearch_duphash(component, NULL, duphash);
-
- if (!result)
- throw_if_xml_fault_occurred(&bz_server.env);
-
- xmlrpc_value *all_bugs = bz_server.get_member("bugs", result);
- xmlrpc_DECREF(result);
-
- if (!all_bugs)
- {
- throw_if_xml_fault_occurred(&bz_server.env);
- error_msg_and_die(_("Missing mandatory member 'bugs'"));
- }
-
- xmlrpc_int32 bug_id = -1;
- int all_bugs_size = bz_server.get_array_size(all_bugs);
- struct bug_info bz;
- int depend_on_bugno = -1;
- if (all_bugs_size > 0)
- {
- bug_id = bz_server.get_bug_id(all_bugs);
- xmlrpc_DECREF(all_bugs);
- if (bug_id == -1)
- throw_if_xml_fault_occurred(&bz_server.env);
-
- bug_info_init(&bz);
- if (bz_server.get_bug_info(&bz, bug_id) == -1)
- {
- bug_info_destroy(&bz);
- throw_if_xml_fault_occurred(&bz_server.env);
- error_msg_and_die(_("get_bug_info() failed. Could not collect all mandatory information"));
- }
-
- if (strcmp(bz.bug_product, product) != 0)
- {
- depend_on_bugno = bug_id;
- bug_info_destroy(&bz);
- result = bz_server.call_quicksearch_duphash(component, release, duphash);
- if (!result)
- throw_if_xml_fault_occurred(&bz_server.env);
-
- all_bugs = bz_server.get_member("bugs", result);
- xmlrpc_DECREF(result);
-
- if (!all_bugs)
- {
- throw_if_xml_fault_occurred(&bz_server.env);
- error_msg_and_die(_("Missing mandatory member 'bugs'"));
- }
-
- all_bugs_size = bz_server.get_array_size(all_bugs);
- if (all_bugs_size > 0)
- {
- bug_id = bz_server.get_bug_id(all_bugs);
- xmlrpc_DECREF(all_bugs);
- if (bug_id == -1)
- throw_if_xml_fault_occurred(&bz_server.env);
-
- bug_info_init(&bz);
- if (bz_server.get_bug_info(&bz, bug_id) == -1)
- {
- bug_info_destroy(&bz);
- throw_if_xml_fault_occurred(&bz_server.env);
- error_msg_and_die(_("get_bug_info() failed. Could not collect all mandatory information"));
- }
- }
- else
- xmlrpc_DECREF(all_bugs);
- }
- }
- free(product);
-
- if (all_bugs_size < 0)
- {
- throw_if_xml_fault_occurred(&bz_server.env);
- }
- else if (all_bugs_size == 0) // Create new bug
- {
- log(_("Creating a new bug"));
- bug_id = bz_server.new_bug(problem_data, depend_on_bugno);
- if (bug_id < 0)
- {
- throw_if_xml_fault_occurred(&bz_server.env);
- error_msg_and_die(_("Bugzilla entry creation failed"));
- }
-
- log("Adding attachments to bug %ld", (long)bug_id);
- char bug_id_str[sizeof(long)*3 + 2];
- sprintf(bug_id_str, "%ld", (long) bug_id);
- int ret = bz_server.add_attachments(bug_id_str, problem_data);
- if (ret == -1)
- {
- throw_if_xml_fault_occurred(&bz_server.env);
- }
-
- log(_("Logging out"));
- bz_server.logout();
-
- log("Status: NEW %s/show_bug.cgi?id=%u",
- bugzilla_url,
- (int)bug_id
- );
- return;
- }
-
- if (all_bugs_size > 1)
- {
- // When someone clones bug it has same duphash, so we can find more than 1.
- // Need to be checked if component is same.
- VERB3 log("Bugzilla has %u reports with same duphash '%s'", all_bugs_size, duphash);
- }
-
- // decision based on state
- log(_("Bug is already reported: %i"), bug_id);
-
- xmlrpc_int32 original_bug_id = bug_id;
- if ((strcmp(bz.bug_status, "CLOSED") == 0) && (strcmp(bz.bug_resolution, "DUPLICATE") == 0))
- {
- for (int ii = 0; ii <= MAX_HOPS; ii++)
- {
- if (ii == MAX_HOPS)
- {
- VERB3 log("Bugzilla could not find a parent of bug %d", (int)original_bug_id);
- bug_info_destroy(&bz);
- error_msg_and_die(_("Bugzilla couldn't find parent of bug %d"), (int)original_bug_id);
- }
-
- log("Bug %d is a duplicate, using parent bug %d", bug_id, (int)bz.bug_dup_id);
- bug_id = bz.bug_dup_id;
- bug_info_destroy(&bz);
- bug_info_init(&bz);
-
- if (bz_server.get_bug_info(&bz, bug_id) == -1)
- {
- bug_info_destroy(&bz);
- if (bz_server.env.fault_occurred)
- {
- throw_if_xml_fault_occurred(&bz_server.env);
- }
- error_msg_and_die(_("get_bug_info() failed. Could not collect all mandatory information"));
- }
-
- // found a bug which is not CLOSED as DUPLICATE
- if (bz.bug_dup_id == -1)
- break;
- }
- }
-
- if (strcmp(bz.bug_status, "CLOSED") != 0)
- {
- int status = 0;
- if ((strcmp(bz.bug_reporter, login) != 0)
- && (g_list_find(bz.bug_cc, login)))
- {
- log(_("Add %s to CC list"), login);
- status = bz_server.add_plus_one_cc(bug_id, login);
- }
-
- if (status == -1)
- {
- bug_info_destroy(&bz);
- throw_if_xml_fault_occurred(&bz_server.env);
- }
-
- char *dsc = make_description_comment(problem_data);
- if (dsc)
- {
- const char* package = get_problem_item_content_or_NULL(problem_data, FILENAME_PACKAGE);
- const char* release = get_problem_item_content_or_NULL(problem_data, FILENAME_OS_RELEASE);
- if (!release) /* Old dump dir format compat. Remove in abrt-2.1 */
- release = get_problem_item_content_or_NULL(problem_data, "release");
- const char* arch = get_problem_item_content_or_NULL(problem_data, FILENAME_ARCHITECTURE);
- const char* is_private = get_problem_item_content_or_NULL(problem_data, "is_private");
-
- char *full_dsc = xasprintf("Package: %s\n"
- "Architecture: %s\n"
- "OS Release: %s\n"
- "%s", package, arch, release, dsc
- );
-
- log(_("Adding new comment to bug %d"), (int)bug_id);
-
- free(dsc);
-
- bool is_priv = is_private && string_to_bool(is_private);
- if (bz_server.add_comment(bug_id, full_dsc, is_priv) == -1)
- {
- free(full_dsc);
- bug_info_destroy(&bz);
- throw_xml_fault(&bz_server.env);
- }
- free(full_dsc);
- }
- }
-
- log(_("Logging out"));
- bz_server.logout();
-
- log("Status: %s%s%s %s/show_bug.cgi?id=%u",
- bz.bug_status,
- bz.bug_resolution ? " " : "",
- bz.bug_resolution ? bz.bug_resolution : "",
- bugzilla_url,
- (int)bug_id
- );
-
- dd = dd_opendir(dump_dir_name, /*flags:*/ 0);
- if (dd)
- {
- char *msg = xasprintf("Bugzilla: URL=%s/show_bug.cgi?id=%u", bugzilla_url, (int)bug_id);
- add_reported_to(dd, msg);
- free(msg);
- dd_close(dd);
- }
-
- free_problem_data(problem_data);
- bug_info_destroy(&bz);
-}
-
-int main(int argc, char **argv)
-{
- abrt_init(argv);
-
- map_string_h *settings = new_map_string();
- const char *dump_dir_name = ".";
- GList *conf_file = NULL;
-
- /* Can't keep these strings/structs static: _() doesn't support that */
- const char *program_usage_string = _(
- "\b [-v] -c CONFFILE -d DIR\n"
- "\n"
- "Reports problem to Bugzilla"
- );
- enum {
- OPT_v = 1 << 0,
- OPT_d = 1 << 1,
- OPT_c = 1 << 2,
- };
- /* Keep enum above and order of options below in sync! */
- struct options program_options[] = {
- OPT__VERBOSE(&g_verbose),
- OPT_STRING('d', NULL, &dump_dir_name, "DIR" , _("Dump directory")),
- OPT_LIST( 'c', NULL, &conf_file , "FILE", _("Configuration file (may be given many times)")),
- OPT_END()
- };
- /*unsigned opts =*/ parse_opts(argc, argv, program_options, program_usage_string);
-
- export_abrt_envvars(0);
-
- while (conf_file)
- {
- char *fn = (char *)conf_file->data;
- VERB1 log("Loading settings from '%s'", fn);
- load_conf_file(fn, settings, /*skip key w/o values:*/ true);
- VERB3 log("Loaded '%s'", fn);
- conf_file = g_list_remove(conf_file, fn);
- }
-
- VERB1 log("Initializing XML-RPC library");
- xmlrpc_env env;
- xmlrpc_env_init(&env);
- xmlrpc_client_setup_global_const(&env);
- if (env.fault_occurred)
- error_msg_and_die("XML-RPC Fault: %s(%d)", env.fault_string, env.fault_code);
- xmlrpc_env_clean(&env);
-
- report_to_bugzilla(dump_dir_name, settings);
-
- free_map_string(settings);
- return 0;
-}
diff --git a/src/plugins/abrt-action-install-debuginfo.py b/src/plugins/abrt-action-install-debuginfo.py
index a7968488..e15fc12b 100755
--- a/src/plugins/abrt-action-install-debuginfo.py
+++ b/src/plugins/abrt-action-install-debuginfo.py
@@ -96,7 +96,7 @@ def unpack_rpm(package_nevra, files, tmp_dir, destdir, keeprpm):
package_name = package_nevra + ".rpm"
package_full_path = tmp_dir + "/" + package_name
log1("Extracting %s to %s", package_full_path, destdir)
- log2(files)
+ log2("%s", files)
print _("Extracting cpio from %s") % (package_full_path)
unpacked_cpio_path = tmp_dir + "/unpacked.cpio"
try:
@@ -492,7 +492,7 @@ if __name__ == "__main__":
missing = filter_installed_debuginfos(b_ids, cachedir)
if missing:
- log2(missing)
+ log2("%s", missing)
print _("Coredump references %u debuginfo files, %u of them are not installed") % (len(b_ids), len(missing))
# TODO: should we pass keep_rpms=keeprpms to DebugInfoDownload here??
diff --git a/src/plugins/abrt-action-mailx.txt b/src/plugins/abrt-action-mailx.txt
index 2fb11bef..a12c2bf1 100644
--- a/src/plugins/abrt-action-mailx.txt
+++ b/src/plugins/abrt-action-mailx.txt
@@ -53,7 +53,7 @@ problem happens. When this is desired, modify the event configuration
file to run the tool on the 'post-create' event:
------------
-EVENT=post-create abrt-action-mailx
+EVENT=post-create abrt-action-mailx
------------
OPTIONS
diff --git a/src/plugins/abrt-retrace-client.c b/src/plugins/abrt-retrace-client.c
index aa232475..524fa186 100644
--- a/src/plugins/abrt-retrace-client.c
+++ b/src/plugins/abrt-retrace-client.c
@@ -103,7 +103,7 @@ static int create_archive(bool unlink_temp)
xmove_fd(tar_xz_pipe[0], STDIN_FILENO);
xdup2(tempfd, STDOUT_FILENO);
execvp(xz_args[0], (char * const*)xz_args);
- perror_msg("Can't execute '%s'", xz_args[0]);
+ perror_msg("Can't execute '%s'", xz_args[0]);
}
close(tar_xz_pipe[0]);
@@ -130,7 +130,7 @@ static int create_archive(bool unlink_temp)
xmove_fd(xopen("/dev/null", O_RDWR), STDIN_FILENO);
xmove_fd(tar_xz_pipe[1], STDOUT_FILENO);
execvp(tar_args[0], (char * const*)tar_args);
- perror_msg("Can't execute '%s'", tar_args[0]);
+ perror_msg("Can't execute '%s'", tar_args[0]);
}
close(tar_xz_pipe[1]);
diff --git a/src/plugins/abrt_rh_support.c b/src/plugins/abrt_rh_support.c
index 9a48485b..b83f041e 100644
--- a/src/plugins/abrt_rh_support.c
+++ b/src/plugins/abrt_rh_support.c
@@ -421,7 +421,7 @@ send_report_to_new_case(const char* baseURL,
atch_state->http_resp_code,
errmsg ? ": " : "",
errmsg ? errmsg : ""
- );
+ );
break;
case 200:
diff --git a/src/plugins/ccpp_events.conf b/src/plugins/ccpp_events.conf
index eb3384a6..b56601e9 100644
--- a/src/plugins/ccpp_events.conf
+++ b/src/plugins/ccpp_events.conf
@@ -9,7 +9,7 @@ EVENT=post-create analyzer=CCpp
test -r /var/log/messages || exit 0
executable=`cat executable` &&
base_executable=${executable##*/} &&
- grep -e "$base_executable" /var/log/messages | tail -999 >var_log_messages &&
+ grep -F -e "$base_executable" /var/log/messages | tail -999 >var_log_messages &&
echo "Element 'var_log_messages' saved"
)
@@ -18,7 +18,7 @@ EVENT=analyze_xsession_errors analyzer=CCpp
test -r ~/.xsession-errors || { echo "Can't read ~/.xsession-errors"; exit 1; }
executable=`cat executable` &&
base_executable=${executable##*/} &&
- grep -e "$base_executable" ~/.xsession-errors | tail -999 >xsession_errors &&
+ grep -F -e "$base_executable" ~/.xsession-errors | tail -999 >xsession_errors &&
echo "Element 'xsession_errors' saved"
# TODO: can we still specify additional directories to search for debuginfos,
diff --git a/src/plugins/rhbz.c b/src/plugins/rhbz.c
new file mode 100644
index 00000000..90587e5e
--- /dev/null
+++ b/src/plugins/rhbz.c
@@ -0,0 +1,482 @@
+/*
+ Copyright (C) 2011 ABRT team
+ Copyright (C) 2011 RedHat Inc
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License along
+ with this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+*/
+
+#include "abrtlib.h"
+#include "rhbz.h"
+
+#define MAX_HOPS 5
+
+struct bug_info *new_bug_info()
+{
+ struct bug_info *bi = xzalloc(sizeof(struct bug_info));
+ bi->bi_dup_id = -1;
+
+ return bi;
+}
+
+void free_bug_info(struct bug_info *bi)
+{
+ if (!bi)
+ return;
+
+ free((void*)bi->bi_status);
+ free((void*)bi->bi_resolution);
+ free((void*)bi->bi_reporter);
+ free((void*)bi->bi_product);
+
+ list_free_with_free(bi->bi_cc_list);
+
+ bi->bi_status = NULL;
+ bi->bi_resolution = NULL;
+ bi->bi_reporter = NULL;
+ bi->bi_product = NULL;
+
+ bi->bi_cc_list = NULL;
+
+ free(bi);
+}
+
+void rhbz_login(struct abrt_xmlrpc *ax, const char* login, const char* passwd)
+{
+ xmlrpc_value* result = abrt_xmlrpc_call(ax, "User.login", "({s:s,s:s})",
+ "login", login, "password", passwd);
+
+//TODO: with URL like http://bugzilla.redhat.com (that is, with http: instead of https:)
+//we are getting this error:
+//Logging into Bugzilla at http://bugzilla.redhat.com
+//Can't login. Server said: HTTP response code is 301, not 200
+//But this is a 301 redirect! We _can_ follow it if we configure curl to understand that!
+ xmlrpc_DECREF(result);
+}
+
+xmlrpc_value *rhbz_search_duphash(struct abrt_xmlrpc *ax, const char *component,
+ const char *product, const char *duphash)
+{
+ char *query = NULL;
+ if (!product)
+ query = xasprintf("ALL component:\"%s\" whiteboard:\"%s\"", component, duphash);
+ else
+ query = xasprintf("ALL component:\"%s\" whiteboard:\"%s\" product:\"%s\"",
+ component, duphash, product);
+
+ VERB3 log("search for '%s'", query);
+ xmlrpc_value *ret = abrt_xmlrpc_call(ax, "Bug.search", "({s:s})",
+ "quicksearch", query);
+ free(query);
+ return ret;
+}
+
+xmlrpc_value *rhbz_get_member(const char *member, xmlrpc_value *xml)
+{
+ xmlrpc_env env;
+ xmlrpc_env_init(&env);
+
+ xmlrpc_value *value = NULL;
+ /* The xmlrpc_struct_find_value functions consider "not found" to be
+ * a normal result. If a member of the structure with the specified key
+ * exists, it returns it as a handle to an xmlrpc_value. If not, it returns
+ * NULL in place of that handle.
+ */
+ xmlrpc_struct_find_value(&env, xml, member, &value);
+ if (env.fault_occurred)
+ abrt_xmlrpc_error(&env);
+
+ return value;
+}
+
+/* The only way this can fail is if arrayP is not actually an array XML-RPC
+ * value. So it is usually not worth checking *envP.
+ * die or return size of array
+ */
+int rhbz_array_size(xmlrpc_value *xml)
+{
+ xmlrpc_env env;
+ xmlrpc_env_init(&env);
+
+ int size = xmlrpc_array_size(&env, xml);
+ if (env.fault_occurred)
+ abrt_xmlrpc_die(&env);
+
+ return size;
+}
+
+/* die or return bug id; each bug must have bug id otherwise xml is corrupted */
+int rhbz_bug_id(xmlrpc_value* xml)
+{
+ xmlrpc_env env;
+ xmlrpc_env_init(&env);
+
+ xmlrpc_value *item = NULL;
+ xmlrpc_value *bug = NULL;
+ int bug_id = -1;;
+
+ xmlrpc_array_read_item(&env, xml, 0, &item);
+ if (env.fault_occurred)
+ abrt_xmlrpc_die(&env);
+
+ bug = rhbz_get_member("bug_id", item);
+ xmlrpc_DECREF(item);
+ if (!bug)
+ abrt_xmlrpc_die(&env);
+
+ xmlrpc_read_int(&env, bug, &bug_id);
+ xmlrpc_DECREF(bug);
+ if (env.fault_occurred)
+ abrt_xmlrpc_die(&env);
+
+ VERB3 log("found bug_id %i", bug_id);
+ return bug_id;
+}
+
+/* die when mandatory value is missing (set flag RHBZ_MANDATORY_MEMB)
+ * or return appropriate string or NULL when fail;
+ */
+// TODO: npajkovs: add flag to read xmlrpc_read_array_item first
+void *rhbz_bug_read_item(const char *memb, xmlrpc_value *xml, int flags)
+{
+ xmlrpc_env env;
+ xmlrpc_env_init(&env);
+
+ xmlrpc_value *member = rhbz_get_member(memb, xml);
+
+ const char *string = NULL;
+
+ if (!member)
+ goto die;
+
+ if (IS_READ_STR(flags))
+ {
+ xmlrpc_read_string(&env, member, &string);
+ xmlrpc_DECREF(member);
+ if (env.fault_occurred)
+ abrt_xmlrpc_die(&env);
+
+ if (!*string)
+ goto die;
+
+ VERB3 log("found %s: '%s'", memb, string);
+ return (void*)string;
+ }
+
+ {
+ if (IS_READ_INT(flags))
+ {
+ int *integer = xmalloc(sizeof(int));
+ xmlrpc_read_int(&env, member, integer);
+ xmlrpc_DECREF(member);
+ if (env.fault_occurred)
+ abrt_xmlrpc_die(&env);
+
+ VERB3 log("found %s: '%i'", memb, *integer);
+ return (void*)integer;
+ }
+ }
+die:
+ free((void*)string);
+ if (IS_MANDATORY(flags))
+ error_msg_and_die(_("Looks like corrupted xml response, because '%s'"
+ " member is missing."), memb);
+
+ return NULL;
+}
+
+GList *rhbz_bug_cc(xmlrpc_value* result_xml)
+{
+ xmlrpc_env env;
+ xmlrpc_env_init(&env);
+
+ xmlrpc_value* cc_member = rhbz_get_member("cc", result_xml);
+ if (!cc_member)
+ return NULL;
+
+ int array_size = rhbz_array_size(cc_member);
+
+ VERB3 log("count members on cc %i", array_size);
+ GList *cc_list = NULL;
+
+ for (int i = 0; i < array_size; ++i)
+ {
+ xmlrpc_value* item = NULL;
+ xmlrpc_array_read_item(&env, cc_member, i, &item);
+ if (env.fault_occurred)
+ abrt_xmlrpc_die(&env);
+
+ if (!item)
+ continue;
+
+ const char* cc = NULL;
+ xmlrpc_read_string(&env, item, &cc);
+ xmlrpc_DECREF(item);
+ if (env.fault_occurred)
+ abrt_xmlrpc_die(&env);
+
+ if (*cc != '\0')
+ {
+ cc_list = g_list_append(cc_list, (char*)cc);
+ VERB3 log("member on cc is %s", cc);
+ continue;
+ }
+ free((char*)cc);
+ }
+ xmlrpc_DECREF(cc_member);
+ return cc_list;
+}
+
+struct bug_info *rhbz_bug_info(struct abrt_xmlrpc *ax, int bug_id)
+{
+ struct bug_info *bz = new_bug_info();
+ xmlrpc_value *xml_bug_response = abrt_xmlrpc_call(ax, "bugzilla.getBug",
+ "(i)", bug_id);
+
+ int *ret = (int*)rhbz_bug_read_item("bug_id", xml_bug_response,
+ RHBZ_MANDATORY_MEMB | RHBZ_READ_INT);
+ bz->bi_id = *ret;
+ free(ret);
+ bz->bi_product = rhbz_bug_read_item("product", xml_bug_response,
+ RHBZ_MANDATORY_MEMB | RHBZ_READ_STR);
+ bz->bi_reporter = rhbz_bug_read_item("reporter", xml_bug_response,
+ RHBZ_MANDATORY_MEMB | RHBZ_READ_STR);
+ bz->bi_status = rhbz_bug_read_item("bug_status", xml_bug_response,
+ RHBZ_MANDATORY_MEMB | RHBZ_READ_STR);
+ bz->bi_resolution = rhbz_bug_read_item("resolution", xml_bug_response,
+ RHBZ_READ_STR);
+
+ if (strcmp(bz->bi_status, "CLOSED") == 0 && !bz->bi_resolution)
+ error_msg_and_die(_("Bug %i is CLOSED, but it has no RESOLUTION"), bz->bi_id);
+
+ ret = (int*)rhbz_bug_read_item("dup_id", xml_bug_response,
+ RHBZ_READ_INT);
+ if (strcmp(bz->bi_status, "CLOSED") == 0
+ && strcmp(bz->bi_resolution, "DUPLICATE") == 0
+ && !ret)
+ {
+ error_msg_and_die(_("Bug %i is CLOSED as DUPLICATE, but it has no DUP_ID"),
+ bz->bi_id);
+ }
+
+ bz->bi_dup_id = (ret) ? *ret: -1;
+ free(ret);
+
+ bz->bi_cc_list = rhbz_bug_cc(xml_bug_response);
+
+ xmlrpc_DECREF(xml_bug_response);
+
+ return bz;
+}
+
+/* suppress mail notify by {s:i} (nomail:1) (driven by flag) */
+int rhbz_new_bug(struct abrt_xmlrpc *ax, problem_data_t *problem_data,
+ int depend_on_bug)
+{
+ const char *package = get_problem_item_content_or_NULL(problem_data,
+ FILENAME_PACKAGE);
+ const char *component = get_problem_item_content_or_NULL(problem_data,
+ FILENAME_COMPONENT);
+ const char *release = get_problem_item_content_or_NULL(problem_data,
+ FILENAME_OS_RELEASE);
+ if (!release) /* Old dump dir format compat. Remove in abrt-2.1 */
+ release = get_problem_item_content_or_NULL(problem_data, "release");
+ const char *arch = get_problem_item_content_or_NULL(problem_data,
+ FILENAME_ARCHITECTURE);
+ const char *duphash = get_problem_item_content_or_NULL(problem_data,
+ FILENAME_DUPHASH);
+ const char *reason = get_problem_item_content_or_NULL(problem_data,
+ FILENAME_REASON);
+ const char *function = get_problem_item_content_or_NULL(problem_data,
+ FILENAME_CRASH_FUNCTION);
+ const char *analyzer = get_problem_item_content_or_NULL(problem_data,
+ FILENAME_ANALYZER);
+ const char *tainted_str = get_problem_item_content_or_NULL(problem_data,
+ FILENAME_TAINTED);
+
+ struct strbuf *buf_summary = strbuf_new();
+ strbuf_append_strf(buf_summary, "[abrt] %s", package);
+
+ if (function != NULL && strlen(function) < 30)
+ strbuf_append_strf(buf_summary, ": %s", function);
+
+ if (reason != NULL)
+ strbuf_append_strf(buf_summary, ": %s", reason);
+
+ if (tainted_str && analyzer
+ && (strcmp(analyzer, "Kerneloops") == 0)
+ ) {
+ //TODO: fix me; basically it doesn't work as it suppose to work
+ // I will fix it immediately when this patch land into abrt git
+ /*
+ unsigned long tainted = xatoi_positive(tainted_str);
+ const char *tainted_warning = tainted_string(tainted);
+ if (tainted_warning)
+ strbuf_append_strf(buf_summary, ": TAINTED %s", tainted_warning);
+ */
+ }
+
+ char *status_whiteboard = xasprintf("abrt_hash:%s", duphash);
+
+ char *bz_dsc = make_description_bz(problem_data);
+ char *full_dsc = xasprintf("abrt version: "VERSION"\n%s", bz_dsc);
+ free(bz_dsc);
+
+ char *product = NULL;
+ char *version = NULL;
+ parse_release_for_bz(release, &product, &version);
+
+ xmlrpc_value* result = NULL;
+ char *summary = strbuf_free_nobuf(buf_summary);
+ if (depend_on_bug > -1)
+ {
+ result = abrt_xmlrpc_call(ax, "Bug.create", "({s:s,s:s,s:s,s:s,s:s,s:s,s:s,s:i})",
+ "product", product,
+ "component", component,
+ "version", version,
+ "summary", summary,
+ "description", full_dsc,
+ "status_whiteboard", status_whiteboard,
+ "platform", arch,
+ "dependson", depend_on_bug);
+ }
+ else
+ {
+ result = abrt_xmlrpc_call(ax, "Bug.create", "({s:s,s:s,s:s,s:s,s:s,s:s,s:s})",
+ "product", product,
+ "component", component,
+ "version", version,
+ "summary", summary,
+ "description", full_dsc,
+ "status_whiteboard", status_whiteboard,
+ "platform", arch);
+ }
+ free(status_whiteboard);
+ free(product);
+ free(version);
+ free(summary);
+ free(full_dsc);
+
+ if (!result)
+ return -1;
+
+ int *r = rhbz_bug_read_item("id", result, RHBZ_MANDATORY_MEMB | RHBZ_READ_INT);
+ xmlrpc_DECREF(result);
+ int new_bug_id = *r;
+ free(r);
+
+ log(_("New bug id: %i"), new_bug_id);
+ return new_bug_id;
+}
+
+/* suppress mail notify by {s:i} (nomail:1) (driven by flag) */
+int rhbz_attachment(struct abrt_xmlrpc *ax, const char *filename,
+ const char *bug_id, const char *data)
+{
+ char *encoded64 = encode_base64(data, strlen(data));
+ char *fn = xasprintf("File: %s", filename);
+ xmlrpc_value* result;
+ result= abrt_xmlrpc_call(ax, "bugzilla.addAttachment", "(s{s:s,s:s,s:s,s:s})",
+ bug_id,
+ "description", fn,
+ "filename", filename,
+ "contenttype", "text/plain",
+ "data", encoded64);
+ free(encoded64);
+ free(fn);
+ if (!result)
+ return -1;
+
+ xmlrpc_DECREF(result);
+
+ return 0;
+}
+
+/* suppress mail notify by {s:i} (nomail:1) (driven by flag) */
+int rhbz_attachments(struct abrt_xmlrpc *ax, const char *bug_id,
+ problem_data_t *problem_data)
+{
+ GHashTableIter iter;
+ char *name;
+ struct problem_item *value;
+ g_hash_table_iter_init(&iter, problem_data);
+ while (g_hash_table_iter_next(&iter, (void**)&name, (void**)&value))
+ {
+ const char *content = value->content;
+
+ // We were special-casing FILENAME_BACKTRACE here, but karel says
+ // he can retrieve it in inlined form from comments too.
+ if ((value->flags & CD_FLAG_TXT)
+ && (strlen(content) > CD_TEXT_ATT_SIZE /*|| (strcmp(name, FILENAME_BACKTRACE) == 0)*/)
+ ) {
+ /* check if the attachment failed and try it once more */
+ rhbz_attachment(ax, name, bug_id, content);
+ }
+ }
+
+ return 0;
+}
+
+void rhbz_logout(struct abrt_xmlrpc *ax)
+{
+ xmlrpc_value* result = abrt_xmlrpc_call(ax, "User.logout", "(s)", "");
+ if (result)
+ xmlrpc_DECREF(result);
+}
+
+struct bug_info *rhbz_find_origin_bug_closed_duplicate(struct abrt_xmlrpc *ax,
+ struct bug_info *bi)
+{
+ struct bug_info *bi_tmp = new_bug_info();
+ bi_tmp->bi_id = bi->bi_id;
+ bi_tmp->bi_dup_id = bi->bi_dup_id;
+
+ for (int ii = 0; ii <= MAX_HOPS; ii++)
+ {
+ if (ii == MAX_HOPS)
+ error_msg_and_die(_("Bugzilla couldn't find parent of bug %d"), bi->bi_id);
+
+ log("Bug %d is a duplicate, using parent bug %d", bi_tmp->bi_id, bi_tmp->bi_dup_id);
+ int bug_id = bi_tmp->bi_dup_id;
+
+ free_bug_info(bi_tmp);
+ bi_tmp = rhbz_bug_info(ax, bug_id);
+
+ // found a bug which is not CLOSED as DUPLICATE
+ if (bi_tmp->bi_dup_id == -1)
+ break;
+ }
+
+ return bi_tmp;
+}
+
+/* suppress mail notify by {s:i} (nomail:1) */
+void rhbz_mail_to_cc(struct abrt_xmlrpc *ax, int bug_id, const char *mail)
+{
+ xmlrpc_value *result = abrt_xmlrpc_call(ax, "Bug.update", "({s:i,s:{s:(s)}})",
+ "ids", bug_id, "updates", "add_cc", mail);
+ if (result)
+ xmlrpc_DECREF(result);
+}
+
+void rhbz_add_comment(struct abrt_xmlrpc *ax, int bug_id, const char *comment,
+ int is_private)
+{
+ xmlrpc_value *result = abrt_xmlrpc_call(ax, "Bug.add_comment", "({s:i,s:s,s:b})",
+ "id", bug_id,
+ "comment", comment,
+ "private", is_private);
+ if (result)
+ xmlrpc_DECREF(result);
+}
diff --git a/src/plugins/rhbz.h b/src/plugins/rhbz.h
new file mode 100644
index 00000000..73d76f0a
--- /dev/null
+++ b/src/plugins/rhbz.h
@@ -0,0 +1,100 @@
+/*
+ Copyright (C) 2011 ABRT team
+ Copyright (C) 2011 RedHat Inc
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License along
+ with this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+*/
+
+#ifndef RHBZ_H
+#define RHBZ_H
+
+/* include/stdint.h: typedef int int32_t;
+ * include/xmlrpc-c/base.h: typedef int32_t xmlrpc_int32;
+ */
+
+#include "abrt_xmlrpc.h"
+#include "abrt_problem_data.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+enum {
+ RHBZ_MANDATORY_MEMB = (1 << 0),
+ RHBZ_READ_STR = (1 << 1),
+ RHBZ_READ_INT = (1 << 2),
+};
+
+#define IS_MANDATORY(flags) ((flags) & RHBZ_MANDATORY_MEMB)
+#define IS_READ_STR(flags) ((flags) & RHBZ_READ_STR)
+#define IS_READ_INT(flags) ((flags) & RHBZ_READ_INT)
+
+struct bug_info {
+ int bi_id;
+ int bi_dup_id;
+
+ const char *bi_status;
+ const char *bi_resolution;
+ const char *bi_reporter;
+ const char *bi_product;
+
+ GList *bi_cc_list;
+};
+
+struct bug_info *new_bug_info();
+void free_bug_info(struct bug_info *bz);
+
+void rhbz_login(struct abrt_xmlrpc *ax, const char *login, const char *passwd);
+
+void rhbz_mail_to_cc(struct abrt_xmlrpc *ax, int bug_id, const char *mail);
+
+void rhbz_add_comment(struct abrt_xmlrpc *ax, int bug_id, const char *comment,
+ int is_private);
+
+void *rhbz_bug_read_item(const char *memb, xmlrpc_value *xml, int flags);
+
+void rhbz_logout(struct abrt_xmlrpc *ax);
+
+xmlrpc_value *rhbz_search_duphash(struct abrt_xmlrpc *ax, const char *component,
+ const char *release, const char *duphash);
+
+xmlrpc_value *rhbz_get_member(const char *member, xmlrpc_value *xml);
+
+int rhbz_array_size(xmlrpc_value *xml);
+
+int rhbz_bug_id(xmlrpc_value *xml);
+
+int rhbz_new_bug(struct abrt_xmlrpc *ax, problem_data_t *problem_data,
+ int depend_on_bug);
+
+int rhbz_attachments(struct abrt_xmlrpc *ax, const char *bug_id,
+ problem_data_t *problem_data);
+
+int rhbz_attachment(struct abrt_xmlrpc *ax, const char *filename,
+ const char *bug_id, const char *data);
+
+GList *rhbz_bug_cc(xmlrpc_value *result_xml);
+
+struct bug_info *rhbz_bug_info(struct abrt_xmlrpc *ax, int bug_id);
+
+
+struct bug_info *rhbz_find_origin_bug_closed_duplicate(struct abrt_xmlrpc *ax,
+ struct bug_info *bi);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/src/report-python/reportmodule.c b/src/report-python/reportmodule.c
index c40cfaf6..92f435ce 100644
--- a/src/report-python/reportmodule.c
+++ b/src/report-python/reportmodule.c
@@ -30,7 +30,7 @@ static PyMethodDef module_methods[] = {
{ NULL }
};
-#ifndef PyMODINIT_FUNC /* declarations for DLL import/export */
+#ifndef PyMODINIT_FUNC /* declarations for DLL import/export */
#define PyMODINIT_FUNC void
#endif
PyMODINIT_FUNC