summaryrefslogtreecommitdiffstats
path: root/lib/subunit
diff options
context:
space:
mode:
Diffstat (limited to 'lib/subunit')
-rw-r--r--lib/subunit/Apache-2.0202
-rw-r--r--lib/subunit/BSD26
-rw-r--r--lib/subunit/COPYING36
-rw-r--r--lib/subunit/INSTALL32
-rw-r--r--lib/subunit/MANIFEST.in20
-rw-r--r--lib/subunit/Makefile.am141
-rw-r--r--lib/subunit/NEWS344
-rw-r--r--lib/subunit/README229
-rw-r--r--lib/subunit/c++/README50
-rw-r--r--lib/subunit/c++/SubunitTestProgressListener.cpp63
-rw-r--r--lib/subunit/c++/SubunitTestProgressListener.h56
-rw-r--r--lib/subunit/c/README68
-rw-r--r--lib/subunit/c/include/subunit/child.h96
-rw-r--r--lib/subunit/c/lib/child.c104
-rw-r--r--lib/subunit/c/tests/test_child.c234
-rw-r--r--lib/subunit/configure.ac75
-rwxr-xr-xlib/subunit/filters/subunit-filter158
-rwxr-xr-xlib/subunit/filters/subunit-ls48
-rwxr-xr-xlib/subunit/filters/subunit-notify44
-rwxr-xr-xlib/subunit/filters/subunit-stats41
-rwxr-xr-xlib/subunit/filters/subunit-tags26
-rwxr-xr-xlib/subunit/filters/subunit2csv23
-rwxr-xr-xlib/subunit/filters/subunit2gtk259
-rwxr-xr-xlib/subunit/filters/subunit2junitxml31
-rwxr-xr-xlib/subunit/filters/subunit2pyunit48
-rwxr-xr-xlib/subunit/filters/tap2subunit26
-rw-r--r--lib/subunit/libcppunit_subunit.pc.in11
-rw-r--r--lib/subunit/libsubunit.pc.in11
-rwxr-xr-xlib/subunit/perl/Makefile.PL.in21
-rw-r--r--lib/subunit/perl/lib/Subunit.pm183
-rw-r--r--lib/subunit/perl/lib/Subunit/Diff.pm85
-rwxr-xr-xlib/subunit/perl/subunit-diff31
-rw-r--r--lib/subunit/python/iso8601/LICENSE20
-rw-r--r--lib/subunit/python/iso8601/README26
-rw-r--r--lib/subunit/python/iso8601/README.subunit5
-rw-r--r--lib/subunit/python/iso8601/setup.py58
-rw-r--r--lib/subunit/python/iso8601/test_iso8601.py111
-rw-r--r--lib/subunit/python/subunit/__init__.py1314
-rw-r--r--lib/subunit/python/subunit/chunked.py185
-rw-r--r--lib/subunit/python/subunit/details.py119
-rw-r--r--lib/subunit/python/subunit/filters.py125
-rw-r--r--lib/subunit/python/subunit/iso8601.py133
-rw-r--r--lib/subunit/python/subunit/progress_model.py106
-rwxr-xr-xlib/subunit/python/subunit/run.py84
-rw-r--r--lib/subunit/python/subunit/test_results.py678
-rw-r--r--lib/subunit/python/subunit/tests/TestUtil.py80
-rw-r--r--lib/subunit/python/subunit/tests/__init__.py43
-rwxr-xr-xlib/subunit/python/subunit/tests/sample-script.py21
-rwxr-xr-xlib/subunit/python/subunit/tests/sample-two-script.py7
-rw-r--r--lib/subunit/python/subunit/tests/test_chunked.py152
-rw-r--r--lib/subunit/python/subunit/tests/test_details.py112
-rw-r--r--lib/subunit/python/subunit/tests/test_progress_model.py118
-rw-r--r--lib/subunit/python/subunit/tests/test_run.py52
-rw-r--r--lib/subunit/python/subunit/tests/test_subunit_filter.py370
-rw-r--r--lib/subunit/python/subunit/tests/test_subunit_stats.py84
-rw-r--r--lib/subunit/python/subunit/tests/test_subunit_tags.py69
-rw-r--r--lib/subunit/python/subunit/tests/test_tap2subunit.py445
-rw-r--r--lib/subunit/python/subunit/tests/test_test_protocol.py1337
-rw-r--r--lib/subunit/python/subunit/tests/test_test_results.py572
-rwxr-xr-xlib/subunit/runtests.py138
-rwxr-xr-xlib/subunit/setup.py63
-rw-r--r--lib/subunit/shell/README62
-rw-r--r--lib/subunit/shell/share/subunit.sh61
-rwxr-xr-xlib/subunit/shell/tests/test_function_output.sh97
-rwxr-xr-xlib/subunit/shell/tests/test_source_library.sh108
65 files changed, 0 insertions, 9777 deletions
diff --git a/lib/subunit/Apache-2.0 b/lib/subunit/Apache-2.0
deleted file mode 100644
index d645695673..0000000000
--- a/lib/subunit/Apache-2.0
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/lib/subunit/BSD b/lib/subunit/BSD
deleted file mode 100644
index fa130cd529..0000000000
--- a/lib/subunit/BSD
+++ /dev/null
@@ -1,26 +0,0 @@
-Copyright (c) Robert Collins and Subunit contributors
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions
-are met:
-1. Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
-2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-3. Neither the name of Robert Collins nor the names of Subunit contributors
- may be used to endorse or promote products derived from this software
- without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY ROBERT COLLINS AND SUBUNIT CONTRIBUTORS ``AS IS''
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
-FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
-OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
-OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGE.
diff --git a/lib/subunit/COPYING b/lib/subunit/COPYING
deleted file mode 100644
index 3ba50f8e08..0000000000
--- a/lib/subunit/COPYING
+++ /dev/null
@@ -1,36 +0,0 @@
-Subunit is licensed under two licenses, the Apache License, Version 2.0 or the
-3-clause BSD License. You may use this project under either of these licenses
-- choose the one that works best for you.
-
-We require contributions to be licensed under both licenses. The primary
-difference between them is that the Apache license takes care of potential
-issues with Patents and other intellectual property concerns. This is
-important to Subunit as Subunit wants to be license compatible in a very
-broad manner to allow reuse and incorporation into other projects.
-
-Generally every source file in Subunit needs a license grant under both these
-licenses. As the code is shipped as a single unit, a brief form is used:
-----
-Copyright (c) [yyyy][,yyyy]* [name or 'Subunit Contributors']
-
-Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-license at the users choice. A copy of both licenses are available in the
-project source as Apache-2.0 and BSD. You may not use this file except in
-compliance with one of these two licences.
-
-Unless required by applicable law or agreed to in writing, software
-distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-license you chose for the specific language governing permissions and
-limitations under that license.
-----
-
-Code that has been incorporated into Subunit from other projects will
-naturally be under its own license, and will retain that license.
-
-A known list of such code is maintained here:
-* The python/iso8601 module by Michael Twomey, distributed under an MIT style
- licence - see python/iso8601/LICENSE for details.
-* The runtests.py and python/subunit/tests/TestUtil.py module are GPL test
- support modules. There are not installed by Subunit - they are only ever
- used on the build machine. Copyright 2004 Canonical Limited.
diff --git a/lib/subunit/INSTALL b/lib/subunit/INSTALL
deleted file mode 100644
index eeea734f6a..0000000000
--- a/lib/subunit/INSTALL
+++ /dev/null
@@ -1,32 +0,0 @@
-To install subunit
-------------------
-
-Bootstrap::
- autoreconf -vi
-Configure::
- ./configure
-Install::
- make install
-
-Dependencies
-------------
-
-* Python for the filters
-* 'testtools' (On Debian and Ubuntu systems the 'python-testtools' package,
- the testtools package on pypi, or https://launchpad.net/testtools) for
- the extended test API which permits attachments. Version 0.9.23 or newer is
- required. Of particular note, http://testtools.python-hosting.com/ is not
- the testtools you want.
-* A C compiler for the C bindings
-* Perl for the Perl tools (including subunit-diff)
-* Check to run the subunit test suite.
-* python-gtk2 if you wish to use subunit2gtk
-* python-junitxml if you wish to use subunit2junitxml
-* pkg-config for configure detection of supporting libraries.
-
-Binary packages
----------------
-
-A number of distributions now include subunit, you can try via your package
-manager. The authors maintain a personal package archive on Launchpad::
- https://launchpad.net/~testing-cabal/+archive/archive
diff --git a/lib/subunit/MANIFEST.in b/lib/subunit/MANIFEST.in
deleted file mode 100644
index eb98981628..0000000000
--- a/lib/subunit/MANIFEST.in
+++ /dev/null
@@ -1,20 +0,0 @@
-exclude .bzrignore
-exclude aclocal.m4
-prune autom4te.cache
-prune c
-prune c++
-prune compile
-exclude configure*
-exclude depcomp
-exclude INSTALL
-exclude install-sh
-exclude lib*
-exclude ltmain.sh
-prune m4
-exclude Makefile*
-exclude missing
-prune perl
-exclude py-compile
-prune shell
-exclude stamp-h1
-include NEWS
diff --git a/lib/subunit/Makefile.am b/lib/subunit/Makefile.am
deleted file mode 100644
index da1602037e..0000000000
--- a/lib/subunit/Makefile.am
+++ /dev/null
@@ -1,141 +0,0 @@
-EXTRA_DIST = \
- .bzrignore \
- Apache-2.0 \
- BSD \
- INSTALL \
- Makefile.am \
- NEWS \
- README \
- c++/README \
- c/README \
- c/check-subunit-0.9.3.patch \
- c/check-subunit-0.9.5.patch \
- c/check-subunit-0.9.6.patch \
- perl/Makefile.PL.in \
- perl/lib/Subunit.pm \
- perl/lib/Subunit/Diff.pm \
- perl/subunit-diff \
- python/iso8601/LICENSE \
- python/iso8601/README \
- python/iso8601/README.subunit \
- python/iso8601/setup.py \
- python/iso8601/test_iso8601.py \
- python/subunit/tests/TestUtil.py \
- python/subunit/tests/__init__.py \
- python/subunit/tests/sample-script.py \
- python/subunit/tests/sample-two-script.py \
- python/subunit/tests/test_chunked.py \
- python/subunit/tests/test_details.py \
- python/subunit/tests/test_progress_model.py \
- python/subunit/tests/test_subunit_filter.py \
- python/subunit/tests/test_run.py \
- python/subunit/tests/test_subunit_stats.py \
- python/subunit/tests/test_subunit_tags.py \
- python/subunit/tests/test_tap2subunit.py \
- python/subunit/tests/test_test_protocol.py \
- python/subunit/tests/test_test_results.py \
- runtests.py \
- setup.py \
- shell/README \
- shell/share/subunit.sh \
- shell/subunit-ui.patch \
- shell/tests/test_function_output.sh \
- shell/tests/test_source_library.sh
-
-ACLOCAL_AMFLAGS = -I m4
-
-include_subunitdir = $(includedir)/subunit
-
-dist_bin_SCRIPTS = \
- filters/subunit-filter \
- filters/subunit-ls \
- filters/subunit-notify \
- filters/subunit-stats \
- filters/subunit-tags \
- filters/subunit2csv \
- filters/subunit2gtk \
- filters/subunit2junitxml \
- filters/subunit2pyunit \
- filters/tap2subunit
-
-TESTS_ENVIRONMENT = SHELL_SHARE='$(top_srcdir)/shell/share/' PYTHONPATH='$(abs_top_srcdir)/python':${PYTHONPATH}
-TESTS = runtests.py $(check_PROGRAMS)
-
-## install libsubunit.pc
-pcdatadir = $(libdir)/pkgconfig
-pcdata_DATA = \
- libsubunit.pc \
- libcppunit_subunit.pc
-
-pkgpython_PYTHON = \
- python/subunit/__init__.py \
- python/subunit/chunked.py \
- python/subunit/details.py \
- python/subunit/filters.py \
- python/subunit/iso8601.py \
- python/subunit/progress_model.py \
- python/subunit/run.py \
- python/subunit/test_results.py
-
-lib_LTLIBRARIES = libsubunit.la
-lib_LTLIBRARIES += libcppunit_subunit.la
-
-include_subunit_HEADERS = \
- c/include/subunit/child.h \
- c++/SubunitTestProgressListener.h
-
-check_PROGRAMS = \
- c/tests/test_child
-
-check_SCRIPTS = \
- runtests.py
-
-libsubunit_la_SOURCES = \
- c/lib/child.c \
- c/include/subunit/child.h
-
-libcppunit_subunit_la_SOURCES = \
- c++/SubunitTestProgressListener.cpp \
- c++/SubunitTestProgressListener.h
-
-tests_LDADD = @CHECK_LIBS@ $(top_builddir)/libsubunit.la
-c_tests_test_child_CFLAGS = -I$(top_srcdir)/c/include $(SUBUNIT_CFLAGS) @CHECK_CFLAGS@
-c_tests_test_child_LDADD = $(tests_LDADD)
-
-
-all-local: perl/Makefile
- $(MAKE) -C perl all
-
-check-local: perl/Makefile
- $(MAKE) -C perl check
-
-clean-local:
- find . -type f -name "*.pyc" -exec rm {} ';'
- rm -f perl/Makefile
-
-# Remove perl dir for VPATH builds.
-distclean-local:
- -rmdir perl > /dev/null
- -rm perl/Makefile.PL > /dev/null
-
-install-exec-local: perl/Makefile
- $(MAKE) -C perl install
-
-mostlyclean-local:
- rm -rf perl/blib
- rm -rf perl/pm_to_blib
-
-# 'uninstall' perl files during distcheck
-uninstall-local:
- if [ "_inst" = `basename ${prefix}` ]; then \
- $(MAKE) -C perl uninstall_distcheck; \
- rm -f "$(DESTDIR)$(bindir)"/subunit-diff; \
- fi
-
-# The default for MakeMaker; can be overridden by exporting
-INSTALLDIRS ?= site
-
-perl/Makefile: perl/Makefile.PL
- mkdir -p perl
- cd perl && perl Makefile.PL INSTALLDIRS=${INSTALLDIRS}
- -rm perl/Makefile.old > /dev/null
diff --git a/lib/subunit/NEWS b/lib/subunit/NEWS
deleted file mode 100644
index 081dc5dbfc..0000000000
--- a/lib/subunit/NEWS
+++ /dev/null
@@ -1,344 +0,0 @@
----------------------
-subunit release notes
----------------------
-
-NEXT (In development)
----------------------
-
-0.0.9
------
-
-BUG FIXES
-~~~~~~~~~
-
-* All the source files are now included in the distribution tarball.
- (Arfrever Frehtes Taifersar Arahesis, Robert Collins, #996275)
-
-* ``python/subunit/tests/test_run.py`` and ``python/subunit/filters.py`` were
- not included in the 0.0.8 tarball. (Robert Collins)
-
-* Test ids which include non-ascii unicode characters are now supported.
- (Robert Collins, #1029866)
-
-* The ``failfast`` option to ``subunit.run`` will now work. The dependency on
- testtools has been raised to 0.9.23 to permit this.
- (Robert Collins, #1090582)
-
-0.0.8
------
-
-IMPROVEMENTS
-~~~~~~~~~~~~
-
-* Perl module now correctly outputs "failure" instead of "fail". (Stewart Smith)
-
-* Shell functions now output timestamps. (Stewart Smith, Robert Collins)
-
-* 'subunit2csv' script that converts subunit output to CSV format.
- (Jonathan Lange)
-
-* ``TagCollapsingDecorator`` now correctly distinguishes between local and
- global tags. (Jonathan Lange)
-
-* ``TestResultFilter`` always forwards ``time:`` events.
- (Benji York, Brad Crittenden)
-
-BUG FIXES
-~~~~~~~~~
-
-* Add 'subunit --no-xfail', which will omit expected failures from the subunit
- stream. (John Arbash Meinel, #623642)
-
-* Add 'subunit -F/--only-genuine-failures' which sets all of '--no-skips',
- '--no-xfail', '--no-passthrough, '--no-success', and gives you just the
- failure stream. (John Arbash Meinel)
-
-* Python2.6 support was broken by the fixup feature.
- (Arfrever Frehtes Taifersar Arahesis, #987490)
-
-* Python3 support regressed in trunk.
- (Arfrever Frehtes Taifersar Arahesis, #987514)
-
-* Python3 support was insufficiently robust in detecting unicode streams.
- (Robert Collins, Arfrever Frehtes Taifersar Arahesis)
-
-* Tag support has been implemented for TestProtocolClient.
- (Robert Collins, #518016)
-
-* Tags can now be filtered. (Jonathan Lange, #664171)
-
-* Test suite works with latest testtools (but not older ones - formatting
- changes only). (Robert Collins)
-
-0.0.7
------
-
-The Subunit Python test runner ``python -m subunit.run`` can now report the
-test ids and also filter via a test id list file thanks to improvements in
-``testtools.run``. See the testtools manual, or testrepository - a major
-user of such functionality.
-
-Additionally the protocol now has a keyword uxsuccess for Unexpected Success
-reporting. Older parsers will report tests with this status code as 'lost
-connection'.
-
-IMPROVEMENTS
-~~~~~~~~~~~~
-
-* Add ``TimeCollapsingDecorator`` which collapses multiple sequential time()
- calls into just the first and last. (Jonathan Lange)
-
-* Add ``TagCollapsingDecorator`` which collapses many tags() calls into one
- where possible. (Jonathan Lange, Robert Collins)
-
-* Force flush of writes to stdout in c/tests/test_child.
- (Jelmer Vernooij, #687611)
-
-* Provisional Python 3.x support.
- (Robert Collins, Tres Seaver, Martin[gz], #666819)
-
-* ``subunit.chunked.Decoder`` Python class takes a new ``strict`` option,
- which defaults to ``True``. When ``False``, the ``Decoder`` will accept
- incorrect input that is still unambiguous. i.e. subunit will not barf if
- a \r is missing from the input. (Martin Pool)
-
-* ``subunit-filter`` preserves the relative ordering of ``time:`` statements,
- so you can now use filtered streams to gather data about how long it takes
- to run a test. (Jonathan Lange, #716554)
-
-* ``subunit-ls`` now handles a stream with time: instructions that start
- partway through the stream (which may lead to strange times) more gracefully.
- (Robert Collins, #785954)
-
-* ``subunit-ls`` should handle the new test outcomes in Python2.7 better.
- (Robert Collins, #785953)
-
-* ``TestResultFilter`` now collapses sequential calls to time().
- (Jonathan Lange, #567150)
-
-* ``TestResultDecorator.tags()`` now actually works, and is no longer a buggy
- copy/paste of ``TestResultDecorator.time()``. (Jonathan Lange, #681828)
-
-* ``TestResultFilter`` now supports a ``fixup_expected_failures``
- argument. (Jelmer Vernooij, #755241)
-
-* The ``subunit.run`` Python module supports ``-l`` and ``--load-list`` as
- per ``testtools.run``. This required a dependency bump due to a small
- API change in ``testtools``. (Robert Collins)
-
-* The help for subunit-filter was confusing about the behaviour of ``-f`` /
- ``--no-failure``. (Robert Collins, #703392)
-
-* The Python2.7 / testtools addUnexpectedSuccess API is now supported. This
- required adding a new status code to the protocol. (Robert Collins, #654474)
-
-CHANGES
-~~~~~~~
-
-* testtools 0.9.11 or newer is new needed (due to the Python 3 support).
- (Robert Collins)
-
-0.0.6
------
-
-This release of subunit fixes a number of unicode related bugs. This depends on
-testtools 0.9.4 and will not function without it. Thanks to Tres Seaver there
-is also an optional native setup.py file for use with easy_install and the
-like.
-
-BUG FIXES
-~~~~~~~~~
-
-* Be consistent about delivering unicode content to testtools StringException
- class which has become (appropriately) conservative. (Robert Collins)
-
-* Fix incorrect reference to subunit_test_failf in c/README.
- (Brad Hards, #524341)
-
-* Fix incorrect ordering of tags method parameters in TestResultDecorator. This
- is purely cosmetic as the parameters are passed down with no interpretation.
- (Robert Collins, #537611)
-
-* Old style tracebacks with no encoding info are now treated as UTF8 rather
- than some-random-codec-like-ascii. (Robert Collins)
-
-* On windows, ProtocolTestCase and TestProtocolClient will set their streams to
- binary mode by calling into msvcrt; this avoids having their input or output
- mangled by the default line ending translation on that platform.
- (Robert Collins, Martin [gz], #579296)
-
-IMPROVEMENTS
-~~~~~~~~~~~~
-
-* Subunit now has a setup.py for python deployments that are not using
- distribution packages. (Tres Seaver, #538181)
-
-* Subunit now supports test discovery by building on the testtools support for
- it. You can take advantage of it with "python -m subunit.run discover [path]"
- and see "python -m subunit.run discover --help" for more options.
-
-* Subunit now uses the improved unicode support in testtools when outputting
- non-details based test information; this should consistently UTF8 encode such
- strings.
-
-* The Python TestProtocolClient now flushes output on startTest and stopTest.
- (Martin [gz]).
-
-
-0.0.5
------
-
-BUG FIXES
-~~~~~~~~~
-
-* make check was failing if subunit wasn't installed due to a missing include
- path for the test program test_child.
-
-* make distcheck was failing due to a missing $(top_srcdir) rune.
-
-IMPROVEMENTS
-~~~~~~~~~~~~
-
-* New filter `subunit-notify` that will show a notification window with test
- statistics when the test run finishes.
-
-* subunit.run will now pipe its output to the command in the
- SUBUNIT_FORMATTER environment variable, if set.
-
-0.0.4
------
-
-BUG FIXES
-~~~~~~~~~
-
-* subunit2junitxml -f required a value, this is now fixed and -f acts as a
- boolean switch with no parameter.
-
-* Building with autoconf 2.65 is now supported.
-
-
-0.0.3
------
-
- CHANGES:
-
- * License change, by unanimous agreement of contributors to BSD/Apache
- License Version 2.0. This makes Subunit compatible with more testing
- frameworks.
-
- IMPROVEMENTS:
-
- * CPPUnit is now directly supported: subunit builds a cppunit listener
- ``libcppunit-subunit``.
-
- * In the python API ``addExpectedFailure`` and ``addUnexpectedSuccess``
- from python 2.7/3.1 are now supported. ``addExpectedFailure`` is
- serialised as ``xfail``, and ``addUnexpectedSuccess`` as ``success``.
- The ``ProtocolTestCase`` parser now calls outcomes using an extended
- API that permits attaching arbitrary MIME resources such as text files
- log entries and so on. This extended API is being developed with the
- Python testing community, and is in flux. ``TestResult`` objects that
- do not support the API will be detected and transparently downgraded
- back to the regular Python unittest API.
-
- * INSTALLDIRS can be set to control the perl MakeMaker 'INSTALLDIRS'
- viarable when installing.
-
- * Multipart test outcomes are tentatively supported; the exact protocol
- for them, both serialiser and object is not yet finalised. Testers and
- early adopters are sought. As part of this and also in an attempt to
- provider a more precise focus on the wire protocol and toolchain,
- Subunit now depends on testtools (http://launchpad.net/testtools)
- release 0.9.0 or newer.
-
- * subunit2junitxml supports a new option, --forward which causes it
- to forward the raw subunit stream in a similar manner to tee. This
- is used with the -o option to both write a xml report and get some
- other subunit filter to process the stream.
-
- * The C library now has ``subunit_test_skip``.
-
- BUG FIXES:
-
- * Install progress_model.py correctly.
-
- * Non-gcc builds will no longer try to use gcc specific flags.
- (Thanks trondn-norbye)
-
- API CHANGES:
-
- INTERNALS:
-
-0.0.2
------
-
- CHANGES:
-
- IMPROVEMENTS:
-
- * A number of filters now support ``--no-passthrough`` to cause all
- non-subunit content to be discarded. This is useful when precise control
- over what is output is required - such as with subunit2junitxml.
-
- * A small perl parser is now included, and a new ``subunit-diff`` tool
- using that is included. (Jelmer Vernooij)
-
- * Subunit streams can now include optional, incremental lookahead
- information about progress. This allows reporters to make estimates
- about completion, when such information is available. See the README
- under ``progress`` for more details.
-
- * ``subunit-filter`` now supports regex filtering via ``--with`` and
- ``without`` options. (Martin Pool)
-
- * ``subunit2gtk`` has been added, a filter that shows a GTK summary of a
- test stream.
-
- * ``subunit2pyunit`` has a --progress flag which will cause the bzrlib
- test reporter to be used, which has a textual progress bar. This requires
- a recent bzrlib as a minor bugfix was required in bzrlib to support this.
-
- * ``subunit2junitxml`` has been added. This filter converts a subunit
- stream to a single JUnit style XML stream using the pyjunitxml
- python library.
-
- * The shell functions support skipping via ``subunit_skip_test`` now.
-
- BUG FIXES:
-
- * ``xfail`` outcomes are now passed to python TestResult's via
- addExpectedFailure if it is present on the TestResult. Python 2.6 and
- earlier which do not have this function will have ``xfail`` outcomes
- passed through as success outcomes as earlier versions of subunit did.
-
- API CHANGES:
-
- * tags are no longer passed around in python via the ``TestCase.tags``
- attribute. Instead ``TestResult.tags(new_tags, gone_tags)`` is called,
- and like in the protocol, if called while a test is active only applies
- to that test. (Robert Collins)
-
- * ``TestResultFilter`` takes a new optional constructor parameter
- ``filter_predicate``. (Martin Pool)
-
- * When a progress: directive is encountered in a subunit stream, the
- python bindings now call the ``progress(offset, whence)`` method on
- ``TestResult``.
-
- * When a time: directive is encountered in a subunit stream, the python
- bindings now call the ``time(seconds)`` method on ``TestResult``.
-
- INTERNALS:
-
- * (python) Added ``subunit.test_results.AutoTimingTestResultDecorator``. Most
- users of subunit will want to wrap their ``TestProtocolClient`` objects
- in this decorator to get test timing data for performance analysis.
-
- * (python) ExecTestCase supports passing arguments to test scripts.
-
- * (python) New helper ``subunit.test_results.HookedTestResultDecorator``
- which can be used to call some code on every event, without having to
- implement all the event methods.
-
- * (python) ``TestProtocolClient.time(a_datetime)`` has been added which
- causes a timestamp to be output to the stream.
diff --git a/lib/subunit/README b/lib/subunit/README
deleted file mode 100644
index 47a97345cd..0000000000
--- a/lib/subunit/README
+++ /dev/null
@@ -1,229 +0,0 @@
-
- subunit: A streaming protocol for test results
- Copyright (C) 2005-2009 Robert Collins <robertc@robertcollins.net>
-
- Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
- license at the users choice. A copy of both licenses are available in the
- project source as Apache-2.0 and BSD. You may not use this file except in
- compliance with one of these two licences.
-
- Unless required by applicable law or agreed to in writing, software
- distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
- WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- license you chose for the specific language governing permissions and
- limitations under that license.
-
- See the COPYING file for full details on the licensing of Subunit.
-
- subunit reuses iso8601 by Michael Twomey, distributed under an MIT style
- licence - see python/iso8601/LICENSE for details.
-
-Subunit
--------
-
-Subunit is a streaming protocol for test results. The protocol is human
-readable and easily generated and parsed. By design all the components of
-the protocol conceptually fit into the xUnit TestCase->TestResult interaction.
-
-Subunit comes with command line filters to process a subunit stream and
-language bindings for python, C, C++ and shell. Bindings are easy to write
-for other languages.
-
-A number of useful things can be done easily with subunit:
- * Test aggregation: Tests run separately can be combined and then
- reported/displayed together. For instance, tests from different languages
- can be shown as a seamless whole.
- * Test archiving: A test run may be recorded and replayed later.
- * Test isolation: Tests that may crash or otherwise interact badly with each
- other can be run separately and then aggregated, rather than interfering
- with each other.
- * Grid testing: subunit can act as the necessary serialisation and
- deserialiation to get test runs on distributed machines to be reported in
- real time.
-
-Subunit supplies the following filters:
- * tap2subunit - convert perl's TestAnythingProtocol to subunit.
- * subunit2csv - convert a subunit stream to csv.
- * subunit2pyunit - convert a subunit stream to pyunit test results.
- * subunit2gtk - show a subunit stream in GTK.
- * subunit2junitxml - convert a subunit stream to JUnit's XML format.
- * subunit-diff - compare two subunit streams.
- * subunit-filter - filter out tests from a subunit stream.
- * subunit-ls - list info about tests present in a subunit stream.
- * subunit-stats - generate a summary of a subunit stream.
- * subunit-tags - add or remove tags from a stream.
-
-Integration with other tools
-----------------------------
-
-Subunit's language bindings act as integration with various test runners like
-'check', 'cppunit', Python's 'unittest'. Beyond that a small amount of glue
-(typically a few lines) will allow Subunit to be used in more sophisticated
-ways.
-
-Python
-======
-
-Subunit has excellent Python support: most of the filters and tools are written
-in python and there are facilities for using Subunit to increase test isolation
-seamlessly within a test suite.
-
-One simple way to run an existing python test suite and have it output subunit
-is the module ``subunit.run``::
-
- $ python -m subunit.run mypackage.tests.test_suite
-
-For more information on the Python support Subunit offers , please see
-``pydoc subunit``, or the source in ``python/subunit/__init__.py``
-
-C
-=
-
-Subunit has C bindings to emit the protocol, and comes with a patch for 'check'
-which has been nominally accepted by the 'check' developers. See 'c/README' for
-more details.
-
-C++
-===
-
-The C library is includable and usable directly from C++. A TestListener for
-CPPUnit is included in the Subunit distribution. See 'c++/README' for details.
-
-shell
-=====
-
-Similar to C, the shell bindings consist of simple functions to output protocol
-elements, and a patch for adding subunit output to the 'ShUnit' shell test
-runner. See 'shell/README' for details.
-
-Filter recipes
---------------
-
-To ignore some failing tests whose root cause is already known::
-
- subunit-filter --without 'AttributeError.*flavor'
-
-
-The protocol
-------------
-
-Sample subunit wire contents
-----------------------------
-
-The following::
- test: test foo works
- success: test foo works.
- test: tar a file.
- failure: tar a file. [
- ..
- ].. space is eaten.
- foo.c:34 WARNING foo is not defined.
- ]
- a writeln to stdout
-
-When run through subunit2pyunit::
- .F
- a writeln to stdout
-
- ========================
- FAILURE: tar a file.
- -------------------
- ..
- ].. space is eaten.
- foo.c:34 WARNING foo is not defined.
-
-
-Subunit protocol description
-============================
-
-This description is being ported to an EBNF style. Currently its only partly in
-that style, but should be fairly clear all the same. When in doubt, refer the
-source (and ideally help fix up the description!). Generally the protocol is
-line orientated and consists of either directives and their parameters, or
-when outside a DETAILS region unexpected lines which are not interpreted by
-the parser - they should be forwarded unaltered.
-
-test|testing|test:|testing: test LABEL
-success|success:|successful|successful: test LABEL
-success|success:|successful|successful: test LABEL DETAILS
-failure: test LABEL
-failure: test LABEL DETAILS
-error: test LABEL
-error: test LABEL DETAILS
-skip[:] test LABEL
-skip[:] test LABEL DETAILS
-xfail[:] test LABEL
-xfail[:] test LABEL DETAILS
-uxsuccess[:] test LABEL
-uxsuccess[:] test LABEL DETAILS
-progress: [+|-]X
-progress: push
-progress: pop
-tags: [-]TAG ...
-time: YYYY-MM-DD HH:MM:SSZ
-
-LABEL: UTF8*
-DETAILS ::= BRACKETED | MULTIPART
-BRACKETED ::= '[' CR UTF8-lines ']' CR
-MULTIPART ::= '[ multipart' CR PART* ']' CR
-PART ::= PART_TYPE CR NAME CR PART_BYTES CR
-PART_TYPE ::= Content-Type: type/sub-type(;parameter=value,parameter=value)
-PART_BYTES ::= (DIGITS CR LF BYTE{DIGITS})* '0' CR LF
-
-unexpected output on stdout -> stdout.
-exit w/0 or last test completing -> error
-
-Tags given outside a test are applied to all following tests
-Tags given after a test: line and before the result line for the same test
-apply only to that test, and inherit the current global tags.
-A '-' before a tag is used to remove tags - e.g. to prevent a global tag
-applying to a single test, or to cancel a global tag.
-
-The progress directive is used to provide progress information about a stream
-so that stream consumer can provide completion estimates, progress bars and so
-on. Stream generators that know how many tests will be present in the stream
-should output "progress: COUNT". Stream filters that add tests should output
-"progress: +COUNT", and those that remove tests should output
-"progress: -COUNT". An absolute count should reset the progress indicators in
-use - it indicates that two separate streams from different generators have
-been trivially concatenated together, and there is no knowledge of how many
-more complete streams are incoming. Smart concatenation could scan each stream
-for their count and sum them, or alternatively translate absolute counts into
-relative counts inline. It is recommended that outputters avoid absolute counts
-unless necessary. The push and pop directives are used to provide local regions
-for progress reporting. This fits with hierarchically operating test
-environments - such as those that organise tests into suites - the top-most
-runner can report on the number of suites, and each suite surround its output
-with a (push, pop) pair. Interpreters should interpret a pop as also advancing
-the progress of the restored level by one step. Encountering progress
-directives between the start and end of a test pair indicates that a previous
-test was interrupted and did not cleanly terminate: it should be implicitly
-closed with an error (the same as when a stream ends with no closing test
-directive for the most recently started test).
-
-The time directive acts as a clock event - it sets the time for all future
-events. The value should be a valid ISO8601 time.
-
-The skip, xfail and uxsuccess outcomes are not supported by all testing
-environments. In Python the testttools (https://launchpad.net/testtools)
-library is used to translate these automatically if an older Python version
-that does not support them is in use. See the testtools documentation for the
-translation policy.
-
-skip is used to indicate a test was discovered but not executed. xfail is used
-to indicate a test that errored in some expected fashion (also know as "TODO"
-tests in some frameworks). uxsuccess is used to indicate and unexpected success
-where a test though to be failing actually passes. It is complementary to
-xfail.
-
-Hacking on subunit
-------------------
-
-Releases
-========
-
-* Update versions in configure.ac and python/subunit/__init__.py.
-* Make PyPI and regular tarball releases. Upload the regular one to LP, the
- PyPI one to PyPI.
-* Push a tagged commit.
-
diff --git a/lib/subunit/c++/README b/lib/subunit/c++/README
deleted file mode 100644
index 7b8184400e..0000000000
--- a/lib/subunit/c++/README
+++ /dev/null
@@ -1,50 +0,0 @@
-#
-# subunit C++ bindings.
-# Copyright (C) 2006 Robert Collins <robertc@robertcollins.net>
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-#
-
-Currently there are no native C++ bindings for subunit. However the C library
-can be used from C++ safely. A CPPUnit listener is built as part of Subunit to
-allow CPPUnit users to simply get Subunit output.
-
-To use the listener, use pkg-config (or your preferred replacement) to get the
-cflags and link settings from libcppunit_subunit.pc.
-
-In your test driver main, use SubunitTestProgressListener, as shown in this
-example main::
-
- {
- // Create the event manager and test controller
- CPPUNIT_NS::TestResult controller;
-
- // Add a listener that collects test result
- // so we can get the overall status.
- // note this isn't needed for subunit...
- CPPUNIT_NS::TestResultCollector result;
- controller.addListener( &result );
-
- // Add a listener that print test activity in subunit format.
- CPPUNIT_NS::SubunitTestProgressListener progress;
- controller.addListener( &progress );
-
- // Add the top suite to the test runner
- CPPUNIT_NS::TestRunner runner;
- runner.addTest( CPPUNIT_NS::TestFactoryRegistry::getRegistry().makeTest() );
- runner.run( controller );
-
- return result.wasSuccessful() ? 0 : 1;
- }
diff --git a/lib/subunit/c++/SubunitTestProgressListener.cpp b/lib/subunit/c++/SubunitTestProgressListener.cpp
deleted file mode 100644
index 76cd9e1194..0000000000
--- a/lib/subunit/c++/SubunitTestProgressListener.cpp
+++ /dev/null
@@ -1,63 +0,0 @@
-/* Subunit test listener for cppunit (http://cppunit.sourceforge.net).
- * Copyright (C) 2006 Robert Collins <robertc@robertcollins.net>
- *
- * Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
- * license at the users choice. A copy of both licenses are available in the
- * project source as Apache-2.0 and BSD. You may not use this file except in
- * compliance with one of these two licences.
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under these licenses is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the license you chose for the specific language governing permissions
- * and limitations under that license.
- */
-
-#include <cppunit/Exception.h>
-#include <cppunit/Test.h>
-#include <cppunit/TestFailure.h>
-#include <cppunit/TextOutputter.h>
-#include <iostream>
-
-// Have to be able to import the public interface without config.h.
-#include "SubunitTestProgressListener.h"
-#include "config.h"
-#include "subunit/child.h"
-
-
-CPPUNIT_NS_BEGIN
-
-
-void
-SubunitTestProgressListener::startTest( Test *test )
-{
- subunit_test_start(test->getName().c_str());
- last_test_failed = false;
-}
-
-void
-SubunitTestProgressListener::addFailure( const TestFailure &failure )
-{
- std::ostringstream capture_stream;
- TextOutputter outputter(NULL, capture_stream);
- outputter.printFailureLocation(failure.sourceLine());
- outputter.printFailureDetail(failure.thrownException());
-
- if (failure.isError())
- subunit_test_error(failure.failedTestName().c_str(),
- capture_stream.str().c_str());
- else
- subunit_test_fail(failure.failedTestName().c_str(),
- capture_stream.str().c_str());
- last_test_failed = true;
-}
-
-void
-SubunitTestProgressListener::endTest( Test *test)
-{
- if (!last_test_failed)
- subunit_test_pass(test->getName().c_str());
-}
-
-
-CPPUNIT_NS_END
diff --git a/lib/subunit/c++/SubunitTestProgressListener.h b/lib/subunit/c++/SubunitTestProgressListener.h
deleted file mode 100644
index 5206d833c7..0000000000
--- a/lib/subunit/c++/SubunitTestProgressListener.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/* Subunit test listener for cppunit (http://cppunit.sourceforge.net).
- * Copyright (C) 2006 Robert Collins <robertc@robertcollins.net>
- *
- * Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
- * license at the users choice. A copy of both licenses are available in the
- * project source as Apache-2.0 and BSD. You may not use this file except in
- * compliance with one of these two licences.
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under these licenses is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the license you chose for the specific language governing permissions
- * and limitations under that license.
- */
-#ifndef CPPUNIT_SUBUNITTESTPROGRESSLISTENER_H
-#define CPPUNIT_SUBUNITTESTPROGRESSLISTENER_H
-
-#include <cppunit/TestListener.h>
-
-
-CPPUNIT_NS_BEGIN
-
-
-/*!
- * \brief TestListener that outputs subunit
- * (http://www.robertcollins.net/unittest/subunit) compatible output.
- * \ingroup TrackingTestExecution
- */
-class CPPUNIT_API SubunitTestProgressListener : public TestListener
-{
-public:
-
- SubunitTestProgressListener() {}
-
- void startTest( Test *test );
-
- void addFailure( const TestFailure &failure );
-
- void endTest( Test *test );
-
-private:
- /// Prevents the use of the copy constructor.
- SubunitTestProgressListener( const SubunitTestProgressListener &copy );
-
- /// Prevents the use of the copy operator.
- void operator =( const SubunitTestProgressListener &copy );
-
-private:
- int last_test_failed;
-};
-
-
-CPPUNIT_NS_END
-
-#endif // CPPUNIT_SUBUNITTESTPROGRESSLISTENER_H
-
diff --git a/lib/subunit/c/README b/lib/subunit/c/README
deleted file mode 100644
index b62fd45395..0000000000
--- a/lib/subunit/c/README
+++ /dev/null
@@ -1,68 +0,0 @@
-#
-# subunit C bindings.
-# Copyright (C) 2006 Robert Collins <robertc@robertcollins.net>
-#
-# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-# license at the users choice. A copy of both licenses are available in the
-# project source as Apache-2.0 and BSD. You may not use this file except in
-# compliance with one of these two licences.
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# license you chose for the specific language governing permissions and
-# limitations under that license.
-
-This subtree contains an implementation of the subunit child protocol.
-Currently I have no plans to write a test runner in C, so I have not written
-an implementation of the parent protocol. [but will happily accept patches].
-This implementation is built using SCons and tested via 'check'.
-See the tests/ directory for the test programs.
-You can use `make check` or `scons check` to run the tests.
-
-The C protocol consists of four functions which you can use to output test
-metadata trivially. See lib/subunit_child.[ch] for details.
-
-However, this is not a test runner - subunit provides no support for [for
-instance] managing assertions, cleaning up on errors etc. You can look at
-'check' (http://check.sourceforge.net/) or
-'gunit' (https://garage.maemo.org/projects/gunit) for C unit test
-frameworks.
-There is a patch for 'check' (check-subunit-*.patch) in this source tree.
-Its also available as request ID #1470750 in the sourceforge request tracker
-http://sourceforge.net/tracker/index.php. The 'check' developers have indicated
-they will merge this during the current release cycle.
-
-If you are a test environment maintainer - either homegrown, or 'check' or
-'gunit' or some other, you will to know how the subunit calls should be used.
-Here is what a manually written test using the bindings might look like:
-
-
-void
-a_test(void) {
- int result;
- subunit_test_start("test name");
- # determine if test passes or fails
- result = SOME_VALUE;
- if (!result) {
- subunit_test_pass("test name");
- } else {
- subunit_test_fail("test name",
- "Something went wrong running something:\n"
- "exited with result: '%s'", result);
- }
-}
-
-Which when run with a subunit test runner will generate something like:
-test name ... ok
-
-on success, and:
-
-test name ... FAIL
-
-======================================================================
-FAIL: test name
-----------------------------------------------------------------------
-RemoteError:
-Something went wrong running something:
-exited with result: '1'
diff --git a/lib/subunit/c/include/subunit/child.h b/lib/subunit/c/include/subunit/child.h
deleted file mode 100644
index 896d2dfad0..0000000000
--- a/lib/subunit/c/include/subunit/child.h
+++ /dev/null
@@ -1,96 +0,0 @@
-/**
- *
- * subunit C bindings.
- * Copyright (C) 2006 Robert Collins <robertc@robertcollins.net>
- *
- * Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
- * license at the users choice. A copy of both licenses are available in the
- * project source as Apache-2.0 and BSD. You may not use this file except in
- * compliance with one of these two licences.
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under these licenses is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the license you chose for the specific language governing permissions
- * and limitations under that license.
- **/
-
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-
-/**
- * subunit_test_start:
- *
- * Report that a test is starting.
- * @name: test case name
- */
-extern void subunit_test_start(char const * const name);
-
-
-/**
- * subunit_test_pass:
- *
- * Report that a test has passed.
- *
- * @name: test case name
- */
-extern void subunit_test_pass(char const * const name);
-
-
-/**
- * subunit_test_fail:
- *
- * Report that a test has failed.
- * @name: test case name
- * @error: a string describing the error.
- */
-extern void subunit_test_fail(char const * const name, char const * const error);
-
-
-/**
- * subunit_test_error:
- *
- * Report that a test has errored. An error is an unintentional failure - i.e.
- * a segfault rather than a failed assertion.
- * @name: test case name
- * @error: a string describing the error.
- */
-extern void subunit_test_error(char const * const name,
- char const * const error);
-
-
-/**
- * subunit_test_skip:
- *
- * Report that a test has been skipped. An skip is a test that has not run to
- * conclusion but hasn't given an error either - its result is unknown.
- * @name: test case name
- * @reason: a string describing the reason for the skip.
- */
-extern void subunit_test_skip(char const * const name,
- char const * const reason);
-
-
-enum subunit_progress_whence {
- SUBUNIT_PROGRESS_SET,
- SUBUNIT_PROGRESS_CUR,
- SUBUNIT_PROGRESS_POP,
- SUBUNIT_PROGRESS_PUSH,
-};
-
-/**
- * subunit_progress:
- *
- * Report the progress of a test run.
- * @whence: The type of progress update to report.
- * @offset: Offset of the progress (only for SUBUNIT_PROGRESS_SET
- * and SUBUNIT_PROGRESS_CUR).
- */
-extern void subunit_progress(enum subunit_progress_whence whence, int offset);
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/lib/subunit/c/lib/child.c b/lib/subunit/c/lib/child.c
deleted file mode 100644
index 20f38da8c9..0000000000
--- a/lib/subunit/c/lib/child.c
+++ /dev/null
@@ -1,104 +0,0 @@
-/**
- *
- * subunit C child-side bindings: report on tests being run.
- * Copyright (C) 2006 Robert Collins <robertc@robertcollins.net>
- *
- * Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
- * license at the users choice. A copy of both licenses are available in the
- * project source as Apache-2.0 and BSD. You may not use this file except in
- * compliance with one of these two licences.
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under these licenses is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the license you chose for the specific language governing permissions
- * and limitations under that license.
- **/
-
-#include <stdio.h>
-#include <string.h>
-#include "subunit/child.h"
-
-/* Write details about a test event. It is the callers responsibility to ensure
- * that details are only provided for events the protocol expects details on.
- * @event: The event - e.g. 'skip'
- * @name: The test name/id.
- * @details: The details of the event, may be NULL if no details are present.
- */
-static void
-subunit_send_event(char const * const event, char const * const name,
- char const * const details)
-{
- if (NULL == details) {
- fprintf(stdout, "%s: %s\n", event, name);
- } else {
- fprintf(stdout, "%s: %s [\n", event, name);
- fprintf(stdout, "%s", details);
- if (details[strlen(details) - 1] != '\n')
- fprintf(stdout, "\n");
- fprintf(stdout, "]\n");
- }
- fflush(stdout);
-}
-
-/* these functions all flush to ensure that the test runner knows the action
- * that has been taken even if the subsequent test etc takes a long time or
- * never completes (i.e. a segfault).
- */
-
-void
-subunit_test_start(char const * const name)
-{
- subunit_send_event("test", name, NULL);
-}
-
-
-void
-subunit_test_pass(char const * const name)
-{
- /* TODO: add success details as an option */
- subunit_send_event("success", name, NULL);
-}
-
-
-void
-subunit_test_fail(char const * const name, char const * const error)
-{
- subunit_send_event("failure", name, error);
-}
-
-
-void
-subunit_test_error(char const * const name, char const * const error)
-{
- subunit_send_event("error", name, error);
-}
-
-
-void
-subunit_test_skip(char const * const name, char const * const reason)
-{
- subunit_send_event("skip", name, reason);
-}
-
-void
-subunit_progress(enum subunit_progress_whence whence, int offset)
-{
- switch (whence) {
- case SUBUNIT_PROGRESS_SET:
- printf("progress: %d\n", offset);
- break;
- case SUBUNIT_PROGRESS_CUR:
- printf("progress: %+-d\n", offset);
- break;
- case SUBUNIT_PROGRESS_POP:
- printf("progress: pop\n");
- break;
- case SUBUNIT_PROGRESS_PUSH:
- printf("progress: push\n");
- break;
- default:
- fprintf(stderr, "Invalid whence %d in subunit_progress()\n", whence);
- break;
- }
-}
diff --git a/lib/subunit/c/tests/test_child.c b/lib/subunit/c/tests/test_child.c
deleted file mode 100644
index 1318322ab2..0000000000
--- a/lib/subunit/c/tests/test_child.c
+++ /dev/null
@@ -1,234 +0,0 @@
-/**
- *
- * subunit C bindings.
- * Copyright (C) 2006 Robert Collins <robertc@robertcollins.net>
- *
- * Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
- * license at the users choice. A copy of both licenses are available in the
- * project source as Apache-2.0 and BSD. You may not use this file except in
- * compliance with one of these two licences.
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under these licenses is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the license you chose for the specific language governing permissions
- * and limitations under that license.
- **/
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <unistd.h>
-#include <string.h>
-#include <check.h>
-
-#include "subunit/child.h"
-
-/**
- * Helper function to capture stdout, run some call, and check what
- * was written.
- * @expected the expected stdout content
- * @function the function to call.
- **/
-static void
-test_stdout_function(char const * expected,
- void (*function)(void))
-{
- /* test that the start function emits a correct test: line. */
- int bytecount;
- int old_stdout;
- int new_stdout[2];
- char buffer[100];
- /* we need a socketpair to capture stdout in */
- fail_if(pipe(new_stdout), "Failed to create a socketpair.");
- /* backup stdout so we can replace it */
- old_stdout = dup(1);
- if (old_stdout == -1) {
- close(new_stdout[0]);
- close(new_stdout[1]);
- fail("Failed to backup stdout before replacing.");
- }
- /* redirect stdout so we can analyse it */
- if (dup2(new_stdout[1], 1) != 1) {
- close(old_stdout);
- close(new_stdout[0]);
- close(new_stdout[1]);
- fail("Failed to redirect stdout");
- }
- /* yes this can block. Its a test case with < 100 bytes of output.
- * DEAL.
- */
- function();
- /* flush writes on FILE object to file descriptor */
- fflush(stdout);
- /* restore stdout now */
- if (dup2(old_stdout, 1) != 1) {
- close(old_stdout);
- close(new_stdout[0]);
- close(new_stdout[1]);
- fail("Failed to restore stdout");
- }
- /* and we dont need the write side any more */
- if (close(new_stdout[1])) {
- close(new_stdout[0]);
- fail("Failed to close write side of socketpair.");
- }
- /* get the output */
- bytecount = read(new_stdout[0], buffer, 100);
- if (0 > bytecount) {
- close(new_stdout[0]);
- fail("Failed to read captured output.");
- }
- buffer[bytecount]='\0';
- /* and we dont need the read side any more */
- fail_if(close(new_stdout[0]), "Failed to close write side of socketpair.");
- /* compare with expected outcome */
- fail_if(strcmp(expected, buffer), "Did not get expected output [%s], got [%s]", expected, buffer);
-}
-
-
-static void
-call_test_start(void)
-{
- subunit_test_start("test case");
-}
-
-
-START_TEST (test_start)
-{
- test_stdout_function("test: test case\n", call_test_start);
-}
-END_TEST
-
-
-static void
-call_test_pass(void)
-{
- subunit_test_pass("test case");
-}
-
-
-START_TEST (test_pass)
-{
- test_stdout_function("success: test case\n", call_test_pass);
-}
-END_TEST
-
-
-static void
-call_test_fail(void)
-{
- subunit_test_fail("test case", "Multiple lines\n of error\n");
-}
-
-
-START_TEST (test_fail)
-{
- test_stdout_function("failure: test case [\n"
- "Multiple lines\n"
- " of error\n"
- "]\n",
- call_test_fail);
-}
-END_TEST
-
-
-static void
-call_test_error(void)
-{
- subunit_test_error("test case", "Multiple lines\n of output\n");
-}
-
-
-START_TEST (test_error)
-{
- test_stdout_function("error: test case [\n"
- "Multiple lines\n"
- " of output\n"
- "]\n",
- call_test_error);
-}
-END_TEST
-
-
-static void
-call_test_skip(void)
-{
- subunit_test_skip("test case", "Multiple lines\n of output\n");
-}
-
-
-START_TEST (test_skip)
-{
- test_stdout_function("skip: test case [\n"
- "Multiple lines\n"
- " of output\n"
- "]\n",
- call_test_skip);
-}
-END_TEST
-
-
-static void
-call_test_progress_pop(void)
-{
- subunit_progress(SUBUNIT_PROGRESS_POP, 0);
-}
-
-static void
-call_test_progress_set(void)
-{
- subunit_progress(SUBUNIT_PROGRESS_SET, 5);
-}
-
-static void
-call_test_progress_push(void)
-{
- subunit_progress(SUBUNIT_PROGRESS_PUSH, 0);
-}
-
-static void
-call_test_progress_cur(void)
-{
- subunit_progress(SUBUNIT_PROGRESS_CUR, -6);
-}
-
-START_TEST (test_progress)
-{
- test_stdout_function("progress: pop\n",
- call_test_progress_pop);
- test_stdout_function("progress: push\n",
- call_test_progress_push);
- test_stdout_function("progress: 5\n",
- call_test_progress_set);
- test_stdout_function("progress: -6\n",
- call_test_progress_cur);
-}
-END_TEST
-
-static Suite *
-child_suite(void)
-{
- Suite *s = suite_create("subunit_child");
- TCase *tc_core = tcase_create("Core");
- suite_add_tcase (s, tc_core);
- tcase_add_test (tc_core, test_start);
- tcase_add_test (tc_core, test_pass);
- tcase_add_test (tc_core, test_fail);
- tcase_add_test (tc_core, test_error);
- tcase_add_test (tc_core, test_skip);
- tcase_add_test (tc_core, test_progress);
- return s;
-}
-
-
-int
-main(void)
-{
- int nf;
- Suite *s = child_suite();
- SRunner *sr = srunner_create(s);
- srunner_run_all(sr, CK_NORMAL);
- nf = srunner_ntests_failed(sr);
- srunner_free(sr);
- return (nf == 0) ? EXIT_SUCCESS : EXIT_FAILURE;
-}
diff --git a/lib/subunit/configure.ac b/lib/subunit/configure.ac
deleted file mode 100644
index cf21d55480..0000000000
--- a/lib/subunit/configure.ac
+++ /dev/null
@@ -1,75 +0,0 @@
-m4_define([SUBUNIT_MAJOR_VERSION], [0])
-m4_define([SUBUNIT_MINOR_VERSION], [0])
-m4_define([SUBUNIT_MICRO_VERSION], [9])
-m4_define([SUBUNIT_VERSION],
-m4_defn([SUBUNIT_MAJOR_VERSION]).m4_defn([SUBUNIT_MINOR_VERSION]).m4_defn([SUBUNIT_MICRO_VERSION]))
-AC_PREREQ([2.59])
-AC_INIT([subunit], [SUBUNIT_VERSION], [subunit-dev@lists.launchpad.net])
-AC_CONFIG_SRCDIR([c/lib/child.c])
-AM_INIT_AUTOMAKE([-Wall -Werror foreign subdir-objects])
-AC_CONFIG_MACRO_DIR([m4])
-[SUBUNIT_MAJOR_VERSION]=SUBUNIT_MAJOR_VERSION
-[SUBUNIT_MINOR_VERSION]=SUBUNIT_MINOR_VERSION
-[SUBUNIT_MICRO_VERSION]=SUBUNIT_MICRO_VERSION
-[SUBUNIT_VERSION]=SUBUNIT_VERSION
-AC_SUBST([SUBUNIT_MAJOR_VERSION])
-AC_SUBST([SUBUNIT_MINOR_VERSION])
-AC_SUBST([SUBUNIT_MICRO_VERSION])
-AC_SUBST([SUBUNIT_VERSION])
-AC_USE_SYSTEM_EXTENSIONS
-AC_PROG_CC
-AC_PROG_CXX
-AM_PROG_CC_C_O
-AC_PROG_INSTALL
-AC_PROG_LN_S
-AC_PROG_LIBTOOL
-AM_PATH_PYTHON
-
-AS_IF([test "$GCC" = "yes"],
- [
- SUBUNIT_CFLAGS="-Wall -Werror -Wextra -Wstrict-prototypes "
- SUBUNIT_CFLAGS="$SUBUNIT_CFLAGS -Wmissing-prototypes -Wwrite-strings "
- SUBUNIT_CFLAGS="$SUBUNIT_CFLAGS -Wno-variadic-macros "
- SUBUNIT_CXXFLAGS="-Wall -Werror -Wextra -Wwrite-strings -Wno-variadic-macros"
- ])
-
-AM_CFLAGS="$SUBUNIT_CFLAGS -I\$(top_srcdir)/c/include"
-AM_CXXFLAGS="$SUBUNIT_CXXFLAGS -I\$(top_srcdir)/c/include"
-AC_SUBST(AM_CFLAGS)
-AC_SUBST(AM_CXXFLAGS)
-
-# Checks for libraries.
-
-# Checks for header files.
-AC_CHECK_HEADERS([stdlib.h])
-
-# Checks for typedefs, structures, and compiler characteristics.
-AC_C_CONST
-AC_TYPE_PID_T
-AC_TYPE_SIZE_T
-AC_HEADER_TIME
-AC_STRUCT_TM
-
-AC_CHECK_SIZEOF(int, 4)
-AC_CHECK_SIZEOF(short, 2)
-AC_CHECK_SIZEOF(long, 4)
-
-# Checks for library functions.
-AC_FUNC_MALLOC
-AC_FUNC_REALLOC
-
-# Easier memory management.
-# C unit testing.
-PKG_CHECK_MODULES([CHECK], [check >= 0.9.4])
-# C++ unit testing.
-PKG_CHECK_MODULES([CPPUNIT], [cppunit])
-
-# Output files
-AC_CONFIG_HEADERS([config.h])
-
-AC_CONFIG_FILES([libsubunit.pc
- libcppunit_subunit.pc
- Makefile
- perl/Makefile.PL
- ])
-AC_OUTPUT
diff --git a/lib/subunit/filters/subunit-filter b/lib/subunit/filters/subunit-filter
deleted file mode 100755
index 6a1ecc9a01..0000000000
--- a/lib/subunit/filters/subunit-filter
+++ /dev/null
@@ -1,158 +0,0 @@
-#!/usr/bin/env python
-# subunit: extensions to python unittest to get test results from subprocesses.
-# Copyright (C) 2008 Robert Collins <robertc@robertcollins.net>
-# (C) 2009 Martin Pool
-#
-# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-# license at the users choice. A copy of both licenses are available in the
-# project source as Apache-2.0 and BSD. You may not use this file except in
-# compliance with one of these two licences.
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# license you chose for the specific language governing permissions and
-# limitations under that license.
-#
-
-"""Filter a subunit stream to include/exclude tests.
-
-The default is to strip successful tests.
-
-Tests can be filtered by Python regular expressions with --with and --without,
-which match both the test name and the error text (if any). The result
-contains tests which match any of the --with expressions and none of the
---without expressions. For case-insensitive matching prepend '(?i)'.
-Remember to quote shell metacharacters.
-"""
-
-from optparse import OptionParser
-import sys
-import re
-
-from subunit import (
- DiscardStream,
- ProtocolTestCase,
- TestProtocolClient,
- read_test_list,
- )
-from subunit.filters import filter_by_result
-from subunit.test_results import (
- and_predicates,
- make_tag_filter,
- TestResultFilter,
- )
-
-
-def make_options(description):
- parser = OptionParser(description=__doc__)
- parser.add_option("--error", action="store_false",
- help="include errors", default=False, dest="error")
- parser.add_option("-e", "--no-error", action="store_true",
- help="exclude errors", dest="error")
- parser.add_option("--failure", action="store_false",
- help="include failures", default=False, dest="failure")
- parser.add_option("-f", "--no-failure", action="store_true",
- help="exclude failures", dest="failure")
- parser.add_option("--passthrough", action="store_false",
- help="Show all non subunit input.", default=False, dest="no_passthrough")
- parser.add_option("--no-passthrough", action="store_true",
- help="Hide all non subunit input.", default=False, dest="no_passthrough")
- parser.add_option("-s", "--success", action="store_false",
- help="include successes", dest="success")
- parser.add_option("--no-success", action="store_true",
- help="exclude successes", default=True, dest="success")
- parser.add_option("--no-skip", action="store_true",
- help="exclude skips", dest="skip")
- parser.add_option("--xfail", action="store_false",
- help="include expected falures", default=True, dest="xfail")
- parser.add_option("--no-xfail", action="store_true",
- help="exclude expected falures", default=True, dest="xfail")
- parser.add_option(
- "--with-tag", type=str,
- help="include tests with these tags", action="append", dest="with_tags")
- parser.add_option(
- "--without-tag", type=str,
- help="exclude tests with these tags", action="append", dest="without_tags")
- parser.add_option("-m", "--with", type=str,
- help="regexp to include (case-sensitive by default)",
- action="append", dest="with_regexps")
- parser.add_option("--fixup-expected-failures", type=str,
- help="File with list of test ids that are expected to fail; on failure "
- "their result will be changed to xfail; on success they will be "
- "changed to error.", dest="fixup_expected_failures", action="append")
- parser.add_option("--without", type=str,
- help="regexp to exclude (case-sensitive by default)",
- action="append", dest="without_regexps")
- parser.add_option("-F", "--only-genuine-failures", action="callback",
- callback=only_genuine_failures_callback,
- help="Only pass through failures and exceptions.")
- return parser
-
-
-def only_genuine_failures_callback(option, opt, value, parser):
- parser.rargs.insert(0, '--no-passthrough')
- parser.rargs.insert(0, '--no-xfail')
- parser.rargs.insert(0, '--no-skip')
- parser.rargs.insert(0, '--no-success')
-
-
-def _compile_re_from_list(l):
- return re.compile("|".join(l), re.MULTILINE)
-
-
-def _make_regexp_filter(with_regexps, without_regexps):
- """Make a callback that checks tests against regexps.
-
- with_regexps and without_regexps are each either a list of regexp strings,
- or None.
- """
- with_re = with_regexps and _compile_re_from_list(with_regexps)
- without_re = without_regexps and _compile_re_from_list(without_regexps)
-
- def check_regexps(test, outcome, err, details, tags):
- """Check if this test and error match the regexp filters."""
- test_str = str(test) + outcome + str(err) + str(details)
- if with_re and not with_re.search(test_str):
- return False
- if without_re and without_re.search(test_str):
- return False
- return True
- return check_regexps
-
-
-def _make_result(output, options, predicate):
- """Make the result that we'll send the test outcomes to."""
- fixup_expected_failures = set()
- for path in options.fixup_expected_failures or ():
- fixup_expected_failures.update(read_test_list(path))
- return TestResultFilter(
- TestProtocolClient(output),
- filter_error=options.error,
- filter_failure=options.failure,
- filter_success=options.success,
- filter_skip=options.skip,
- filter_xfail=options.xfail,
- filter_predicate=predicate,
- fixup_expected_failures=fixup_expected_failures)
-
-
-def main():
- parser = make_options(__doc__)
- (options, args) = parser.parse_args()
-
- regexp_filter = _make_regexp_filter(
- options.with_regexps, options.without_regexps)
- tag_filter = make_tag_filter(options.with_tags, options.without_tags)
- filter_predicate = and_predicates([regexp_filter, tag_filter])
-
- filter_by_result(
- lambda output_to: _make_result(sys.stdout, options, filter_predicate),
- output_path=None,
- passthrough=(not options.no_passthrough),
- forward=False)
- sys.exit(0)
-
-
-if __name__ == '__main__':
- main()
diff --git a/lib/subunit/filters/subunit-ls b/lib/subunit/filters/subunit-ls
deleted file mode 100755
index 82db4c371a..0000000000
--- a/lib/subunit/filters/subunit-ls
+++ /dev/null
@@ -1,48 +0,0 @@
-#!/usr/bin/env python
-# subunit: extensions to python unittest to get test results from subprocesses.
-# Copyright (C) 2008 Robert Collins <robertc@robertcollins.net>
-#
-# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-# license at the users choice. A copy of both licenses are available in the
-# project source as Apache-2.0 and BSD. You may not use this file except in
-# compliance with one of these two licences.
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# license you chose for the specific language governing permissions and
-# limitations under that license.
-#
-
-"""List tests in a subunit stream."""
-
-from optparse import OptionParser
-import sys
-
-from subunit import DiscardStream, ProtocolTestCase
-from subunit.test_results import (
- AutoTimingTestResultDecorator,
- TestIdPrintingResult,
- )
-
-
-parser = OptionParser(description=__doc__)
-parser.add_option("--times", action="store_true",
- help="list the time each test took (requires a timestamped stream)",
- default=False)
-parser.add_option("--no-passthrough", action="store_true",
- help="Hide all non subunit input.", default=False, dest="no_passthrough")
-(options, args) = parser.parse_args()
-result = AutoTimingTestResultDecorator(
- TestIdPrintingResult(sys.stdout, options.times))
-if options.no_passthrough:
- passthrough_stream = DiscardStream()
-else:
- passthrough_stream = None
-test = ProtocolTestCase(sys.stdin, passthrough=passthrough_stream)
-test.run(result)
-if result.wasSuccessful():
- exit_code = 0
-else:
- exit_code = 1
-sys.exit(exit_code)
diff --git a/lib/subunit/filters/subunit-notify b/lib/subunit/filters/subunit-notify
deleted file mode 100755
index 8cce2d1609..0000000000
--- a/lib/subunit/filters/subunit-notify
+++ /dev/null
@@ -1,44 +0,0 @@
-#!/usr/bin/env python
-# subunit: extensions to python unittest to get test results from subprocesses.
-# Copyright (C) 2010 Jelmer Vernooij <jelmer@samba.org>
-#
-# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-# license at the users choice. A copy of both licenses are available in the
-# project source as Apache-2.0 and BSD. You may not use this file except in
-# compliance with one of these two licences.
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# license you chose for the specific language governing permissions and
-# limitations under that license.
-#
-
-"""Notify the user of a finished test run."""
-
-import pygtk
-pygtk.require('2.0')
-import pynotify
-
-from subunit import TestResultStats
-from subunit.filters import run_filter_script
-
-if not pynotify.init("Subunit-notify"):
- sys.exit(1)
-
-
-def notify_of_result(result):
- if result.failed_tests > 0:
- summary = "Test run failed"
- else:
- summary = "Test run successful"
- body = "Total tests: %d; Passed: %d; Failed: %d" % (
- result.total_tests,
- result.passed_tests,
- result.failed_tests,
- )
- nw = pynotify.Notification(summary, body)
- nw.show()
-
-
-run_filter_script(TestResultStats, __doc__, notify_of_result)
diff --git a/lib/subunit/filters/subunit-stats b/lib/subunit/filters/subunit-stats
deleted file mode 100755
index 4734988fc2..0000000000
--- a/lib/subunit/filters/subunit-stats
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/usr/bin/env python
-# subunit: extensions to python unittest to get test results from subprocesses.
-# Copyright (C) 2009 Robert Collins <robertc@robertcollins.net>
-#
-# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-# license at the users choice. A copy of both licenses are available in the
-# project source as Apache-2.0 and BSD. You may not use this file except in
-# compliance with one of these two licences.
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# license you chose for the specific language governing permissions and
-# limitations under that license.
-#
-
-"""Filter a subunit stream to get aggregate statistics."""
-
-from optparse import OptionParser
-import sys
-import unittest
-
-from subunit import DiscardStream, ProtocolTestCase, TestResultStats
-
-parser = OptionParser(description=__doc__)
-parser.add_option("--no-passthrough", action="store_true",
- help="Hide all non subunit input.", default=False, dest="no_passthrough")
-(options, args) = parser.parse_args()
-result = TestResultStats(sys.stdout)
-if options.no_passthrough:
- passthrough_stream = DiscardStream()
-else:
- passthrough_stream = None
-test = ProtocolTestCase(sys.stdin, passthrough=passthrough_stream)
-test.run(result)
-result.formatStats()
-if result.wasSuccessful():
- exit_code = 0
-else:
- exit_code = 1
-sys.exit(exit_code)
diff --git a/lib/subunit/filters/subunit-tags b/lib/subunit/filters/subunit-tags
deleted file mode 100755
index edbbfce480..0000000000
--- a/lib/subunit/filters/subunit-tags
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/usr/bin/env python
-# subunit: extensions to python unittest to get test results from subprocesses.
-# Copyright (C) 2009 Robert Collins <robertc@robertcollins.net>
-#
-# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-# license at the users choice. A copy of both licenses are available in the
-# project source as Apache-2.0 and BSD. You may not use this file except in
-# compliance with one of these two licences.
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# license you chose for the specific language governing permissions and
-# limitations under that license.
-#
-
-"""A filter to change tags on a subunit stream.
-
-subunit-tags foo -> adds foo
-subunit-tags foo -bar -> adds foo and removes bar
-"""
-
-import sys
-
-from subunit import tag_stream
-sys.exit(tag_stream(sys.stdin, sys.stdout, sys.argv[1:]))
diff --git a/lib/subunit/filters/subunit2csv b/lib/subunit/filters/subunit2csv
deleted file mode 100755
index 14620ff674..0000000000
--- a/lib/subunit/filters/subunit2csv
+++ /dev/null
@@ -1,23 +0,0 @@
-#!/usr/bin/env python
-# subunit: extensions to python unittest to get test results from subprocesses.
-# Copyright (C) 2009 Robert Collins <robertc@robertcollins.net>
-#
-# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-# license at the users choice. A copy of both licenses are available in the
-# project source as Apache-2.0 and BSD. You may not use this file except in
-# compliance with one of these two licences.
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under these licenses is d on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# license you chose for the specific language governing permissions and
-# limitations under that license.
-#
-
-"""Turn a subunit stream into a CSV"""
-
-from subunit.filters import run_filter_script
-from subunit.test_results import CsvResult
-
-
-run_filter_script(CsvResult, __doc__)
diff --git a/lib/subunit/filters/subunit2gtk b/lib/subunit/filters/subunit2gtk
deleted file mode 100755
index c2cb2de3ce..0000000000
--- a/lib/subunit/filters/subunit2gtk
+++ /dev/null
@@ -1,259 +0,0 @@
-#!/usr/bin/env python
-# subunit: extensions to python unittest to get test results from subprocesses.
-# Copyright (C) 2009 Robert Collins <robertc@robertcollins.net>
-#
-# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-# license at the users choice. A copy of both licenses are available in the
-# project source as Apache-2.0 and BSD. You may not use this file except in
-# compliance with one of these two licences.
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# license you chose for the specific language governing permissions and
-# limitations under that license.
-#
-
-### The GTK progress bar __init__ function is derived from the pygtk tutorial:
-# The PyGTK Tutorial is Copyright (C) 2001-2005 John Finlay.
-#
-# The GTK Tutorial is Copyright (C) 1997 Ian Main.
-#
-# Copyright (C) 1998-1999 Tony Gale.
-#
-# Permission is granted to make and distribute verbatim copies of this manual
-# provided the copyright notice and this permission notice are preserved on all
-# copies.
-#
-# Permission is granted to copy and distribute modified versions of this
-# document under the conditions for verbatim copying, provided that this
-# copyright notice is included exactly as in the original, and that the entire
-# resulting derived work is distributed under the terms of a permission notice
-# identical to this one.
-#
-# Permission is granted to copy and distribute translations of this document
-# into another language, under the above conditions for modified versions.
-#
-# If you are intending to incorporate this document into a published work,
-# please contact the maintainer, and we will make an effort to ensure that you
-# have the most up to date information available.
-#
-# There is no guarantee that this document lives up to its intended purpose.
-# This is simply provided as a free resource. As such, the authors and
-# maintainers of the information provided within can not make any guarantee
-# that the information is even accurate.
-
-"""Display a subunit stream in a gtk progress window."""
-
-import sys
-import unittest
-
-import pygtk
-pygtk.require('2.0')
-import gtk, gtk.gdk, gobject
-
-from subunit import (
- PROGRESS_POP,
- PROGRESS_PUSH,
- PROGRESS_SET,
- TestProtocolServer,
- )
-from subunit.progress_model import ProgressModel
-
-
-class GTKTestResult(unittest.TestResult):
-
- def __init__(self):
- super(GTKTestResult, self).__init__()
- # Instance variables (in addition to TestResult)
- self.window = None
- self.run_label = None
- self.ok_label = None
- self.not_ok_label = None
- self.total_tests = None
-
- self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
- self.window.set_resizable(True)
-
- self.window.connect("destroy", gtk.main_quit)
- self.window.set_title("Tests...")
- self.window.set_border_width(0)
-
- vbox = gtk.VBox(False, 5)
- vbox.set_border_width(10)
- self.window.add(vbox)
- vbox.show()
-
- # Create a centering alignment object
- align = gtk.Alignment(0.5, 0.5, 0, 0)
- vbox.pack_start(align, False, False, 5)
- align.show()
-
- # Create the ProgressBar
- self.pbar = gtk.ProgressBar()
- align.add(self.pbar)
- self.pbar.set_text("Running")
- self.pbar.show()
- self.progress_model = ProgressModel()
-
- separator = gtk.HSeparator()
- vbox.pack_start(separator, False, False, 0)
- separator.show()
-
- # rows, columns, homogeneous
- table = gtk.Table(2, 3, False)
- vbox.pack_start(table, False, True, 0)
- table.show()
- # Show summary details about the run. Could use an expander.
- label = gtk.Label("Run:")
- table.attach(label, 0, 1, 1, 2, gtk.EXPAND | gtk.FILL,
- gtk.EXPAND | gtk.FILL, 5, 5)
- label.show()
- self.run_label = gtk.Label("N/A")
- table.attach(self.run_label, 1, 2, 1, 2, gtk.EXPAND | gtk.FILL,
- gtk.EXPAND | gtk.FILL, 5, 5)
- self.run_label.show()
-
- label = gtk.Label("OK:")
- table.attach(label, 0, 1, 2, 3, gtk.EXPAND | gtk.FILL,
- gtk.EXPAND | gtk.FILL, 5, 5)
- label.show()
- self.ok_label = gtk.Label("N/A")
- table.attach(self.ok_label, 1, 2, 2, 3, gtk.EXPAND | gtk.FILL,
- gtk.EXPAND | gtk.FILL, 5, 5)
- self.ok_label.show()
-
- label = gtk.Label("Not OK:")
- table.attach(label, 0, 1, 3, 4, gtk.EXPAND | gtk.FILL,
- gtk.EXPAND | gtk.FILL, 5, 5)
- label.show()
- self.not_ok_label = gtk.Label("N/A")
- table.attach(self.not_ok_label, 1, 2, 3, 4, gtk.EXPAND | gtk.FILL,
- gtk.EXPAND | gtk.FILL, 5, 5)
- self.not_ok_label.show()
-
- self.window.show()
- # For the demo.
- self.window.set_keep_above(True)
- self.window.present()
-
- def stopTest(self, test):
- super(GTKTestResult, self).stopTest(test)
- self.progress_model.advance()
- if self.progress_model.width() == 0:
- self.pbar.pulse()
- else:
- pos = self.progress_model.pos()
- width = self.progress_model.width()
- percentage = (pos / float(width))
- self.pbar.set_fraction(percentage)
-
- def stopTestRun(self):
- try:
- super(GTKTestResult, self).stopTestRun()
- except AttributeError:
- pass
- self.pbar.set_text('Finished')
-
- def addError(self, test, err):
- super(GTKTestResult, self).addError(test, err)
- self.update_counts()
-
- def addFailure(self, test, err):
- super(GTKTestResult, self).addFailure(test, err)
- self.update_counts()
-
- def addSuccess(self, test):
- super(GTKTestResult, self).addSuccess(test)
- self.update_counts()
-
- def addSkip(self, test, reason):
- # addSkip is new in Python 2.7/3.1
- addSkip = getattr(super(GTKTestResult, self), 'addSkip', None)
- if callable(addSkip):
- addSkip(test, reason)
- self.update_counts()
-
- def addExpectedFailure(self, test, err):
- # addExpectedFailure is new in Python 2.7/3.1
- addExpectedFailure = getattr(super(GTKTestResult, self),
- 'addExpectedFailure', None)
- if callable(addExpectedFailure):
- addExpectedFailure(test, err)
- self.update_counts()
-
- def addUnexpectedSuccess(self, test):
- # addUnexpectedSuccess is new in Python 2.7/3.1
- addUnexpectedSuccess = getattr(super(GTKTestResult, self),
- 'addUnexpectedSuccess', None)
- if callable(addUnexpectedSuccess):
- addUnexpectedSuccess(test)
- self.update_counts()
-
- def progress(self, offset, whence):
- if whence == PROGRESS_PUSH:
- self.progress_model.push()
- elif whence == PROGRESS_POP:
- self.progress_model.pop()
- elif whence == PROGRESS_SET:
- self.total_tests = offset
- self.progress_model.set_width(offset)
- else:
- self.total_tests += offset
- self.progress_model.adjust_width(offset)
-
- def time(self, a_datetime):
- # We don't try to estimate completion yet.
- pass
-
- def update_counts(self):
- self.run_label.set_text(str(self.testsRun))
- bad = len(self.failures + self.errors)
- self.ok_label.set_text(str(self.testsRun - bad))
- self.not_ok_label.set_text(str(bad))
-
-
-class GIOProtocolTestCase(object):
-
- def __init__(self, stream, result, on_finish):
- self.stream = stream
- self.schedule_read()
- self.hup_id = gobject.io_add_watch(stream, gobject.IO_HUP, self.hup)
- self.protocol = TestProtocolServer(result)
- self.on_finish = on_finish
-
- def read(self, source, condition, all=False):
- #NB: \o/ actually blocks
- line = source.readline()
- if not line:
- self.protocol.lostConnection()
- self.on_finish()
- return False
- self.protocol.lineReceived(line)
- # schedule more IO shortly - if we say we're willing to do it
- # immediately we starve things.
- if not all:
- source_id = gobject.timeout_add(1, self.schedule_read)
- return False
- else:
- return True
-
- def schedule_read(self):
- self.read_id = gobject.io_add_watch(self.stream, gobject.IO_IN, self.read)
-
- def hup(self, source, condition):
- while self.read(source, condition, all=True): pass
- self.protocol.lostConnection()
- gobject.source_remove(self.read_id)
- self.on_finish()
- return False
-
-
-result = GTKTestResult()
-test = GIOProtocolTestCase(sys.stdin, result, result.stopTestRun)
-gtk.main()
-if result.wasSuccessful():
- exit_code = 0
-else:
- exit_code = 1
-sys.exit(exit_code)
diff --git a/lib/subunit/filters/subunit2junitxml b/lib/subunit/filters/subunit2junitxml
deleted file mode 100755
index d568c71dd4..0000000000
--- a/lib/subunit/filters/subunit2junitxml
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/usr/bin/env python
-# subunit: extensions to python unittest to get test results from subprocesses.
-# Copyright (C) 2009 Robert Collins <robertc@robertcollins.net>
-#
-# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-# license at the users choice. A copy of both licenses are available in the
-# project source as Apache-2.0 and BSD. You may not use this file except in
-# compliance with one of these two licences.
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# license you chose for the specific language governing permissions and
-# limitations under that license.
-#
-
-"""Filter a subunit stream to get aggregate statistics."""
-
-
-import sys
-from subunit.filters import run_filter_script
-
-try:
- from junitxml import JUnitXmlResult
-except ImportError:
- sys.stderr.write("python-junitxml (https://launchpad.net/pyjunitxml or "
- "http://pypi.python.org/pypi/junitxml) is required for this filter.")
- raise
-
-
-run_filter_script(JUnitXmlResult, __doc__)
diff --git a/lib/subunit/filters/subunit2pyunit b/lib/subunit/filters/subunit2pyunit
deleted file mode 100755
index 83a23d14d1..0000000000
--- a/lib/subunit/filters/subunit2pyunit
+++ /dev/null
@@ -1,48 +0,0 @@
-#!/usr/bin/env python
-# subunit: extensions to python unittest to get test results from subprocesses.
-# Copyright (C) 2009 Robert Collins <robertc@robertcollins.net>
-#
-# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-# license at the users choice. A copy of both licenses are available in the
-# project source as Apache-2.0 and BSD. You may not use this file except in
-# compliance with one of these two licences.
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# license you chose for the specific language governing permissions and
-# limitations under that license.
-#
-
-"""Display a subunit stream through python's unittest test runner."""
-
-from optparse import OptionParser
-import sys
-import unittest
-
-from subunit import DiscardStream, ProtocolTestCase, TestProtocolServer
-
-parser = OptionParser(description=__doc__)
-parser.add_option("--no-passthrough", action="store_true",
- help="Hide all non subunit input.", default=False, dest="no_passthrough")
-parser.add_option("--progress", action="store_true",
- help="Use bzrlib's test reporter (requires bzrlib)",
- default=False)
-(options, args) = parser.parse_args()
-if options.no_passthrough:
- passthrough_stream = DiscardStream()
-else:
- passthrough_stream = None
-test = ProtocolTestCase(sys.stdin, passthrough=passthrough_stream)
-if options.progress:
- from bzrlib.tests import TextTestRunner
- from bzrlib import ui
- ui.ui_factory = ui.make_ui_for_terminal(None, sys.stdout, sys.stderr)
- runner = TextTestRunner()
-else:
- runner = unittest.TextTestRunner(verbosity=2)
-if runner.run(test).wasSuccessful():
- exit_code = 0
-else:
- exit_code = 1
-sys.exit(exit_code)
diff --git a/lib/subunit/filters/tap2subunit b/lib/subunit/filters/tap2subunit
deleted file mode 100755
index c571972225..0000000000
--- a/lib/subunit/filters/tap2subunit
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/usr/bin/env python
-# subunit: extensions to python unittest to get test results from subprocesses.
-# Copyright (C) 2009 Robert Collins <robertc@robertcollins.net>
-#
-# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-# license at the users choice. A copy of both licenses are available in the
-# project source as Apache-2.0 and BSD. You may not use this file except in
-# compliance with one of these two licences.
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# license you chose for the specific language governing permissions and
-# limitations under that license.
-#
-
-"""A filter that reads a TAP stream and outputs a subunit stream.
-
-More information on TAP is available at
-http://testanything.org/wiki/index.php/Main_Page.
-"""
-
-import sys
-
-from subunit import TAP2SubUnit
-sys.exit(TAP2SubUnit(sys.stdin, sys.stdout))
diff --git a/lib/subunit/libcppunit_subunit.pc.in b/lib/subunit/libcppunit_subunit.pc.in
deleted file mode 100644
index 98982c78ae..0000000000
--- a/lib/subunit/libcppunit_subunit.pc.in
+++ /dev/null
@@ -1,11 +0,0 @@
-prefix=@prefix@
-exec_prefix=@exec_prefix@
-libdir=@libdir@
-includedir=@includedir@
-
-Name: cppunit subunit listener
-Description: Subunit output listener for the CPPUnit test library.
-URL: http://launchpad.net/subunit
-Version: @VERSION@
-Libs: -L${libdir} -lsubunit
-Cflags: -I${includedir}
diff --git a/lib/subunit/libsubunit.pc.in b/lib/subunit/libsubunit.pc.in
deleted file mode 100644
index 67564148e8..0000000000
--- a/lib/subunit/libsubunit.pc.in
+++ /dev/null
@@ -1,11 +0,0 @@
-prefix=@prefix@
-exec_prefix=@exec_prefix@
-libdir=@libdir@
-includedir=@includedir@
-
-Name: subunit
-Description: Subunit test protocol library.
-URL: http://launchpad.net/subunit
-Version: @VERSION@
-Libs: -L${libdir} -lsubunit
-Cflags: -I${includedir}
diff --git a/lib/subunit/perl/Makefile.PL.in b/lib/subunit/perl/Makefile.PL.in
deleted file mode 100755
index cf5e6c4c76..0000000000
--- a/lib/subunit/perl/Makefile.PL.in
+++ /dev/null
@@ -1,21 +0,0 @@
-use ExtUtils::MakeMaker;
-WriteMakefile(
- 'INSTALL_BASE' => '@prefix@',
- 'NAME' => 'Subunit',
- 'VERSION' => '@SUBUNIT_VERSION@',
- 'test' => { 'TESTS' => 'tests/*.pl' },
- 'PMLIBDIRS' => [ 'lib' ],
- 'EXE_FILES' => [ '@abs_srcdir@/subunit-diff' ],
-);
-sub MY::postamble {
-<<'EOT';
-check: # test
-
-uninstall_distcheck:
- rm -fr $(DESTINSTALLARCHLIB)
- rm MYMETA.yml
-
-VPATH = @srcdir@
-.PHONY: uninstall_distcheck
-EOT
-}
diff --git a/lib/subunit/perl/lib/Subunit.pm b/lib/subunit/perl/lib/Subunit.pm
deleted file mode 100644
index 72aa1ebd66..0000000000
--- a/lib/subunit/perl/lib/Subunit.pm
+++ /dev/null
@@ -1,183 +0,0 @@
-# Perl module for parsing and generating the Subunit protocol
-# Copyright (C) 2008-2009 Jelmer Vernooij <jelmer@samba.org>
-#
-# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-# license at the users choice. A copy of both licenses are available in the
-# project source as Apache-2.0 and BSD. You may not use this file except in
-# compliance with one of these two licences.
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# license you chose for the specific language governing permissions and
-# limitations under that license.
-
-package Subunit;
-use POSIX;
-
-require Exporter;
-@ISA = qw(Exporter);
-@EXPORT_OK = qw(parse_results $VERSION);
-
-use vars qw ( $VERSION );
-
-$VERSION = '0.0.2';
-
-use strict;
-
-sub parse_results($$$)
-{
- my ($msg_ops, $statistics, $fh) = @_;
- my $expected_fail = 0;
- my $unexpected_fail = 0;
- my $unexpected_err = 0;
- my $open_tests = [];
-
- while(<$fh>) {
- if (/^test: (.+)\n/) {
- $msg_ops->control_msg($_);
- $msg_ops->start_test($1);
- push (@$open_tests, $1);
- } elsif (/^time: (\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+)Z\n/) {
- $msg_ops->report_time(mktime($6, $5, $4, $3, $2, $1-1900));
- } elsif (/^(success|successful|failure|fail|skip|knownfail|error|xfail): (.*?)( \[)?([ \t]*)\n/) {
- $msg_ops->control_msg($_);
- my $result = $1;
- my $testname = $2;
- my $reason = undef;
- if ($3) {
- $reason = "";
- # reason may be specified in next lines
- my $terminated = 0;
- while(<$fh>) {
- $msg_ops->control_msg($_);
- if ($_ eq "]\n") { $terminated = 1; last; } else { $reason .= $_; }
- }
-
- unless ($terminated) {
- $statistics->{TESTS_ERROR}++;
- $msg_ops->end_test($testname, "error", 1, "reason ($result) interrupted");
- return 1;
- }
- }
- if ($result eq "success" or $result eq "successful") {
- pop(@$open_tests); #FIXME: Check that popped value == $testname
- $statistics->{TESTS_EXPECTED_OK}++;
- $msg_ops->end_test($testname, $result, 0, $reason);
- } elsif ($result eq "xfail" or $result eq "knownfail") {
- pop(@$open_tests); #FIXME: Check that popped value == $testname
- $statistics->{TESTS_EXPECTED_FAIL}++;
- $msg_ops->end_test($testname, $result, 0, $reason);
- $expected_fail++;
- } elsif ($result eq "failure" or $result eq "fail") {
- pop(@$open_tests); #FIXME: Check that popped value == $testname
- $statistics->{TESTS_UNEXPECTED_FAIL}++;
- $msg_ops->end_test($testname, $result, 1, $reason);
- $unexpected_fail++;
- } elsif ($result eq "skip") {
- $statistics->{TESTS_SKIP}++;
- my $last = pop(@$open_tests);
- if (defined($last) and $last ne $testname) {
- push (@$open_tests, $testname);
- }
- $msg_ops->end_test($testname, $result, 0, $reason);
- } elsif ($result eq "error") {
- $statistics->{TESTS_ERROR}++;
- pop(@$open_tests); #FIXME: Check that popped value == $testname
- $msg_ops->end_test($testname, $result, 1, $reason);
- $unexpected_err++;
- }
- } else {
- $msg_ops->output_msg($_);
- }
- }
-
- while ($#$open_tests+1 > 0) {
- $msg_ops->end_test(pop(@$open_tests), "error", 1,
- "was started but never finished!");
- $statistics->{TESTS_ERROR}++;
- $unexpected_err++;
- }
-
- return 1 if $unexpected_err > 0;
- return 1 if $unexpected_fail > 0;
- return 0;
-}
-
-sub start_test($)
-{
- my ($testname) = @_;
- print "test: $testname\n";
-}
-
-sub end_test($$;$)
-{
- my $name = shift;
- my $result = shift;
- my $reason = shift;
- if ($reason) {
- print "$result: $name [\n";
- print "$reason";
- print "]\n";
- } else {
- print "$result: $name\n";
- }
-}
-
-sub skip_test($;$)
-{
- my $name = shift;
- my $reason = shift;
- end_test($name, "skip", $reason);
-}
-
-sub fail_test($;$)
-{
- my $name = shift;
- my $reason = shift;
- end_test($name, "failure", $reason);
-}
-
-sub success_test($;$)
-{
- my $name = shift;
- my $reason = shift;
- end_test($name, "success", $reason);
-}
-
-sub xfail_test($;$)
-{
- my $name = shift;
- my $reason = shift;
- end_test($name, "xfail", $reason);
-}
-
-sub report_time($)
-{
- my ($time) = @_;
- my ($sec, $min, $hour, $mday, $mon, $year, $wday, $yday, $isdst) = localtime($time);
- printf "time: %04d-%02d-%02d %02d:%02d:%02dZ\n", $year+1900, $mon, $mday, $hour, $min, $sec;
-}
-
-sub progress_pop()
-{
- print "progress: pop\n";
-}
-
-sub progress_push()
-{
- print "progress: push\n";
-}
-
-sub progress($;$)
-{
- my ($count, $whence) = @_;
-
- unless(defined($whence)) {
- $whence = "";
- }
-
- print "progress: $whence$count\n";
-}
-
-1;
diff --git a/lib/subunit/perl/lib/Subunit/Diff.pm b/lib/subunit/perl/lib/Subunit/Diff.pm
deleted file mode 100644
index e7841c3b00..0000000000
--- a/lib/subunit/perl/lib/Subunit/Diff.pm
+++ /dev/null
@@ -1,85 +0,0 @@
-#!/usr/bin/perl
-# Diff two subunit streams
-# Copyright (C) Jelmer Vernooij <jelmer@samba.org>
-#
-# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-# license at the users choice. A copy of both licenses are available in the
-# project source as Apache-2.0 and BSD. You may not use this file except in
-# compliance with one of these two licences.
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# license you chose for the specific language governing permissions and
-# limitations under that license.
-
-package Subunit::Diff;
-
-use strict;
-
-use Subunit qw(parse_results);
-
-sub control_msg() { }
-sub report_time($$) { }
-
-sub output_msg($$)
-{
- my ($self, $msg) = @_;
-
- # No output for now, perhaps later diff this as well ?
-}
-
-sub start_test($$)
-{
- my ($self, $testname) = @_;
-}
-
-sub end_test($$$$$)
-{
- my ($self, $testname, $result, $unexpected, $reason) = @_;
-
- $self->{$testname} = $result;
-}
-
-sub new {
- my ($class) = @_;
-
- my $self = {
- };
- bless($self, $class);
-}
-
-sub from_file($)
-{
- my ($path) = @_;
- my $statistics = {
- TESTS_UNEXPECTED_OK => 0,
- TESTS_EXPECTED_OK => 0,
- TESTS_UNEXPECTED_FAIL => 0,
- TESTS_EXPECTED_FAIL => 0,
- TESTS_ERROR => 0,
- TESTS_SKIP => 0,
- };
-
- my $ret = new Subunit::Diff();
- open(IN, $path) or return;
- parse_results($ret, $statistics, *IN);
- close(IN);
- return $ret;
-}
-
-sub diff($$)
-{
- my ($old, $new) = @_;
- my $ret = {};
-
- foreach my $testname (keys %$old) {
- if ($new->{$testname} ne $old->{$testname}) {
- $ret->{$testname} = [$old->{$testname}, $new->{$testname}];
- }
- }
-
- return $ret;
-}
-
-1;
diff --git a/lib/subunit/perl/subunit-diff b/lib/subunit/perl/subunit-diff
deleted file mode 100755
index 581e832ae3..0000000000
--- a/lib/subunit/perl/subunit-diff
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/usr/bin/perl
-# Diff two subunit streams
-# Copyright (C) Jelmer Vernooij <jelmer@samba.org>
-#
-# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-# license at the users choice. A copy of both licenses are available in the
-# project source as Apache-2.0 and BSD. You may not use this file except in
-# compliance with one of these two licences.
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# license you chose for the specific language governing permissions and
-# limitations under that license.
-
-use Getopt::Long;
-use strict;
-use FindBin qw($RealBin $Script);
-use lib "$RealBin/lib";
-use Subunit::Diff;
-
-my $old = Subunit::Diff::from_file($ARGV[0]);
-my $new = Subunit::Diff::from_file($ARGV[1]);
-
-my $ret = Subunit::Diff::diff($old, $new);
-
-foreach my $e (sort(keys %$ret)) {
- printf "%s: %s -> %s\n", $e, $ret->{$e}[0], $ret->{$e}[1];
-}
-
-0;
diff --git a/lib/subunit/python/iso8601/LICENSE b/lib/subunit/python/iso8601/LICENSE
deleted file mode 100644
index 5ca93dae79..0000000000
--- a/lib/subunit/python/iso8601/LICENSE
+++ /dev/null
@@ -1,20 +0,0 @@
-Copyright (c) 2007 Michael Twomey
-
-Permission is hereby granted, free of charge, to any person obtaining a
-copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be included
-in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
-OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/lib/subunit/python/iso8601/README b/lib/subunit/python/iso8601/README
deleted file mode 100644
index 5ec9d45597..0000000000
--- a/lib/subunit/python/iso8601/README
+++ /dev/null
@@ -1,26 +0,0 @@
-A simple package to deal with ISO 8601 date time formats.
-
-ISO 8601 defines a neutral, unambiguous date string format, which also
-has the property of sorting naturally.
-
-e.g. YYYY-MM-DDTHH:MM:SSZ or 2007-01-25T12:00:00Z
-
-Currently this covers only the most common date formats encountered, not
-all of ISO 8601 is handled.
-
-Currently the following formats are handled:
-
-* 2006-01-01T00:00:00Z
-* 2006-01-01T00:00:00[+-]00:00
-
-I'll add more as I encounter them in my day to day life. Patches with
-new formats and tests will be gratefully accepted of course :)
-
-References:
-
-* http://www.cl.cam.ac.uk/~mgk25/iso-time.html - simple overview
-
-* http://hydracen.com/dx/iso8601.htm - more detailed enumeration of
- valid formats.
-
-See the LICENSE file for the license this package is released under.
diff --git a/lib/subunit/python/iso8601/README.subunit b/lib/subunit/python/iso8601/README.subunit
deleted file mode 100644
index d1ed8a11a6..0000000000
--- a/lib/subunit/python/iso8601/README.subunit
+++ /dev/null
@@ -1,5 +0,0 @@
-This is a [slightly rearranged] import of http://pypi.python.org/pypi/iso8601/
-version 0.1.4. The OS X hidden files have been stripped, and the package
-turned into a single module, to simplify installation. The remainder of the
-source distribution is included in the subunit source tree at python/iso8601
-for reference.
diff --git a/lib/subunit/python/iso8601/setup.py b/lib/subunit/python/iso8601/setup.py
deleted file mode 100644
index cdb61ecf6a..0000000000
--- a/lib/subunit/python/iso8601/setup.py
+++ /dev/null
@@ -1,58 +0,0 @@
-try:
- from setuptools import setup
-except ImportError:
- from distutils import setup
-
-long_description="""Simple module to parse ISO 8601 dates
-
-This module parses the most common forms of ISO 8601 date strings (e.g.
-2007-01-14T20:34:22+00:00) into datetime objects.
-
->>> import iso8601
->>> iso8601.parse_date("2007-01-25T12:00:00Z")
-datetime.datetime(2007, 1, 25, 12, 0, tzinfo=<iso8601.iso8601.Utc ...>)
->>>
-
-Changes
-=======
-
-0.1.4
------
-
-* The default_timezone argument wasn't being passed through correctly,
- UTC was being used in every case. Fixes issue 10.
-
-0.1.3
------
-
-* Fixed the microsecond handling, the generated microsecond values were
- way too small. Fixes issue 9.
-
-0.1.2
------
-
-* Adding ParseError to __all__ in iso8601 module, allows people to import it.
- Addresses issue 7.
-* Be a little more flexible when dealing with dates without leading zeroes.
- This violates the spec a little, but handles more dates as seen in the
- field. Addresses issue 6.
-* Allow date/time separators other than T.
-
-0.1.1
------
-
-* When parsing dates without a timezone the specified default is used. If no
- default is specified then UTC is used. Addresses issue 4.
-"""
-
-setup(
- name="iso8601",
- version="0.1.4",
- description=long_description.split("\n")[0],
- long_description=long_description,
- author="Michael Twomey",
- author_email="micktwomey+iso8601@gmail.com",
- url="http://code.google.com/p/pyiso8601/",
- packages=["iso8601"],
- license="MIT",
-)
diff --git a/lib/subunit/python/iso8601/test_iso8601.py b/lib/subunit/python/iso8601/test_iso8601.py
deleted file mode 100644
index ff9e2731cf..0000000000
--- a/lib/subunit/python/iso8601/test_iso8601.py
+++ /dev/null
@@ -1,111 +0,0 @@
-import iso8601
-
-def test_iso8601_regex():
- assert iso8601.ISO8601_REGEX.match("2006-10-11T00:14:33Z")
-
-def test_timezone_regex():
- assert iso8601.TIMEZONE_REGEX.match("+01:00")
- assert iso8601.TIMEZONE_REGEX.match("+00:00")
- assert iso8601.TIMEZONE_REGEX.match("+01:20")
- assert iso8601.TIMEZONE_REGEX.match("-01:00")
-
-def test_parse_date():
- d = iso8601.parse_date("2006-10-20T15:34:56Z")
- assert d.year == 2006
- assert d.month == 10
- assert d.day == 20
- assert d.hour == 15
- assert d.minute == 34
- assert d.second == 56
- assert d.tzinfo == iso8601.UTC
-
-def test_parse_date_fraction():
- d = iso8601.parse_date("2006-10-20T15:34:56.123Z")
- assert d.year == 2006
- assert d.month == 10
- assert d.day == 20
- assert d.hour == 15
- assert d.minute == 34
- assert d.second == 56
- assert d.microsecond == 123000
- assert d.tzinfo == iso8601.UTC
-
-def test_parse_date_fraction_2():
- """From bug 6
-
- """
- d = iso8601.parse_date("2007-5-7T11:43:55.328Z'")
- assert d.year == 2007
- assert d.month == 5
- assert d.day == 7
- assert d.hour == 11
- assert d.minute == 43
- assert d.second == 55
- assert d.microsecond == 328000
- assert d.tzinfo == iso8601.UTC
-
-def test_parse_date_tz():
- d = iso8601.parse_date("2006-10-20T15:34:56.123+02:30")
- assert d.year == 2006
- assert d.month == 10
- assert d.day == 20
- assert d.hour == 15
- assert d.minute == 34
- assert d.second == 56
- assert d.microsecond == 123000
- assert d.tzinfo.tzname(None) == "+02:30"
- offset = d.tzinfo.utcoffset(None)
- assert offset.days == 0
- assert offset.seconds == 60 * 60 * 2.5
-
-def test_parse_invalid_date():
- try:
- iso8601.parse_date(None)
- except iso8601.ParseError:
- pass
- else:
- assert 1 == 2
-
-def test_parse_invalid_date2():
- try:
- iso8601.parse_date("23")
- except iso8601.ParseError:
- pass
- else:
- assert 1 == 2
-
-def test_parse_no_timezone():
- """issue 4 - Handle datetime string without timezone
-
- This tests what happens when you parse a date with no timezone. While not
- strictly correct this is quite common. I'll assume UTC for the time zone
- in this case.
- """
- d = iso8601.parse_date("2007-01-01T08:00:00")
- assert d.year == 2007
- assert d.month == 1
- assert d.day == 1
- assert d.hour == 8
- assert d.minute == 0
- assert d.second == 0
- assert d.microsecond == 0
- assert d.tzinfo == iso8601.UTC
-
-def test_parse_no_timezone_different_default():
- tz = iso8601.FixedOffset(2, 0, "test offset")
- d = iso8601.parse_date("2007-01-01T08:00:00", default_timezone=tz)
- assert d.tzinfo == tz
-
-def test_space_separator():
- """Handle a separator other than T
-
- """
- d = iso8601.parse_date("2007-06-23 06:40:34.00Z")
- assert d.year == 2007
- assert d.month == 6
- assert d.day == 23
- assert d.hour == 6
- assert d.minute == 40
- assert d.second == 34
- assert d.microsecond == 0
- assert d.tzinfo == iso8601.UTC
diff --git a/lib/subunit/python/subunit/__init__.py b/lib/subunit/python/subunit/__init__.py
deleted file mode 100644
index 42dcf297e4..0000000000
--- a/lib/subunit/python/subunit/__init__.py
+++ /dev/null
@@ -1,1314 +0,0 @@
-#
-# subunit: extensions to Python unittest to get test results from subprocesses.
-# Copyright (C) 2005 Robert Collins <robertc@robertcollins.net>
-#
-# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-# license at the users choice. A copy of both licenses are available in the
-# project source as Apache-2.0 and BSD. You may not use this file except in
-# compliance with one of these two licences.
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# license you chose for the specific language governing permissions and
-# limitations under that license.
-#
-
-"""Subunit - a streaming test protocol
-
-Overview
-++++++++
-
-The ``subunit`` Python package provides a number of ``unittest`` extensions
-which can be used to cause tests to output Subunit, to parse Subunit streams
-into test activity, perform seamless test isolation within a regular test
-case and variously sort, filter and report on test runs.
-
-
-Key Classes
------------
-
-The ``subunit.TestProtocolClient`` class is a ``unittest.TestResult``
-extension which will translate a test run into a Subunit stream.
-
-The ``subunit.ProtocolTestCase`` class is an adapter between the Subunit wire
-protocol and the ``unittest.TestCase`` object protocol. It is used to translate
-a stream into a test run, which regular ``unittest.TestResult`` objects can
-process and report/inspect.
-
-Subunit has support for non-blocking usage too, for use with asyncore or
-Twisted. See the ``TestProtocolServer`` parser class for more details.
-
-Subunit includes extensions to the Python ``TestResult`` protocol. These are
-all done in a compatible manner: ``TestResult`` objects that do not implement
-the extension methods will not cause errors to be raised, instead the extension
-will either lose fidelity (for instance, folding expected failures to success
-in Python versions < 2.7 or 3.1), or discard the extended data (for extra
-details, tags, timestamping and progress markers).
-
-The test outcome methods ``addSuccess``, ``addError``, ``addExpectedFailure``,
-``addFailure``, ``addSkip`` take an optional keyword parameter ``details``
-which can be used instead of the usual python unittest parameter.
-When used the value of details should be a dict from ``string`` to
-``testtools.content.Content`` objects. This is a draft API being worked on with
-the Python Testing In Python mail list, with the goal of permitting a common
-way to provide additional data beyond a traceback, such as captured data from
-disk, logging messages etc. The reference for this API is in testtools (0.9.0
-and newer).
-
-The ``tags(new_tags, gone_tags)`` method is called (if present) to add or
-remove tags in the test run that is currently executing. If called when no
-test is in progress (that is, if called outside of the ``startTest``,
-``stopTest`` pair), the the tags apply to all subsequent tests. If called
-when a test is in progress, then the tags only apply to that test.
-
-The ``time(a_datetime)`` method is called (if present) when a ``time:``
-directive is encountered in a Subunit stream. This is used to tell a TestResult
-about the time that events in the stream occurred at, to allow reconstructing
-test timing from a stream.
-
-The ``progress(offset, whence)`` method controls progress data for a stream.
-The offset parameter is an int, and whence is one of subunit.PROGRESS_CUR,
-subunit.PROGRESS_SET, PROGRESS_PUSH, PROGRESS_POP. Push and pop operations
-ignore the offset parameter.
-
-
-Python test support
--------------------
-
-``subunit.run`` is a convenience wrapper to run a Python test suite via
-the command line, reporting via Subunit::
-
- $ python -m subunit.run mylib.tests.test_suite
-
-The ``IsolatedTestSuite`` class is a TestSuite that forks before running its
-tests, allowing isolation between the test runner and some tests.
-
-Similarly, ``IsolatedTestCase`` is a base class which can be subclassed to get
-tests that will fork() before that individual test is run.
-
-`ExecTestCase`` is a convenience wrapper for running an external
-program to get a Subunit stream and then report that back to an arbitrary
-result object::
-
- class AggregateTests(subunit.ExecTestCase):
-
- def test_script_one(self):
- './bin/script_one'
-
- def test_script_two(self):
- './bin/script_two'
-
- # Normally your normal test loading would take of this automatically,
- # It is only spelt out in detail here for clarity.
- suite = unittest.TestSuite([AggregateTests("test_script_one"),
- AggregateTests("test_script_two")])
- # Create any TestResult class you like.
- result = unittest._TextTestResult(sys.stdout)
- # And run your suite as normal, Subunit will exec each external script as
- # needed and report to your result object.
- suite.run(result)
-
-Utility modules
----------------
-
-* subunit.chunked contains HTTP chunked encoding/decoding logic.
-* subunit.test_results contains TestResult helper classes.
-"""
-
-import os
-import re
-import subprocess
-import sys
-import unittest
-if sys.version_info > (3, 0):
- from io import UnsupportedOperation as _UnsupportedOperation
-else:
- _UnsupportedOperation = AttributeError
-
-
-from testtools import content, content_type, ExtendedToOriginalDecorator
-from testtools.content import TracebackContent
-from testtools.compat import _b, _u, BytesIO, StringIO
-try:
- from testtools.testresult.real import _StringException
- RemoteException = _StringException
- # For testing: different pythons have different str() implementations.
- if sys.version_info > (3, 0):
- _remote_exception_str = "testtools.testresult.real._StringException"
- _remote_exception_str_chunked = "34\r\n" + _remote_exception_str
- else:
- _remote_exception_str = "_StringException"
- _remote_exception_str_chunked = "1A\r\n" + _remote_exception_str
-except ImportError:
- raise ImportError ("testtools.testresult.real does not contain "
- "_StringException, check your version.")
-from testtools import testresult
-
-from subunit import chunked, details, iso8601, test_results
-
-# same format as sys.version_info: "A tuple containing the five components of
-# the version number: major, minor, micro, releaselevel, and serial. All
-# values except releaselevel are integers; the release level is 'alpha',
-# 'beta', 'candidate', or 'final'. The version_info value corresponding to the
-# Python version 2.0 is (2, 0, 0, 'final', 0)." Additionally we use a
-# releaselevel of 'dev' for unreleased under-development code.
-#
-# If the releaselevel is 'alpha' then the major/minor/micro components are not
-# established at this point, and setup.py will use a version of next-$(revno).
-# If the releaselevel is 'final', then the tarball will be major.minor.micro.
-# Otherwise it is major.minor.micro~$(revno).
-
-__version__ = (0, 0, 9, 'final', 0)
-
-PROGRESS_SET = 0
-PROGRESS_CUR = 1
-PROGRESS_PUSH = 2
-PROGRESS_POP = 3
-
-
-def test_suite():
- import subunit.tests
- return subunit.tests.test_suite()
-
-
-def join_dir(base_path, path):
- """
- Returns an absolute path to C{path}, calculated relative to the parent
- of C{base_path}.
-
- @param base_path: A path to a file or directory.
- @param path: An absolute path, or a path relative to the containing
- directory of C{base_path}.
-
- @return: An absolute path to C{path}.
- """
- return os.path.join(os.path.dirname(os.path.abspath(base_path)), path)
-
-
-def tags_to_new_gone(tags):
- """Split a list of tags into a new_set and a gone_set."""
- new_tags = set()
- gone_tags = set()
- for tag in tags:
- if tag[0] == '-':
- gone_tags.add(tag[1:])
- else:
- new_tags.add(tag)
- return new_tags, gone_tags
-
-
-class DiscardStream(object):
- """A filelike object which discards what is written to it."""
-
- def fileno(self):
- raise _UnsupportedOperation()
-
- def write(self, bytes):
- pass
-
- def read(self, len=0):
- return _b('')
-
-
-class _ParserState(object):
- """State for the subunit parser."""
-
- def __init__(self, parser):
- self.parser = parser
- self._test_sym = (_b('test'), _b('testing'))
- self._colon_sym = _b(':')
- self._error_sym = (_b('error'),)
- self._failure_sym = (_b('failure'),)
- self._progress_sym = (_b('progress'),)
- self._skip_sym = _b('skip')
- self._success_sym = (_b('success'), _b('successful'))
- self._tags_sym = (_b('tags'),)
- self._time_sym = (_b('time'),)
- self._xfail_sym = (_b('xfail'),)
- self._uxsuccess_sym = (_b('uxsuccess'),)
- self._start_simple = _u(" [")
- self._start_multipart = _u(" [ multipart")
-
- def addError(self, offset, line):
- """An 'error:' directive has been read."""
- self.parser.stdOutLineReceived(line)
-
- def addExpectedFail(self, offset, line):
- """An 'xfail:' directive has been read."""
- self.parser.stdOutLineReceived(line)
-
- def addFailure(self, offset, line):
- """A 'failure:' directive has been read."""
- self.parser.stdOutLineReceived(line)
-
- def addSkip(self, offset, line):
- """A 'skip:' directive has been read."""
- self.parser.stdOutLineReceived(line)
-
- def addSuccess(self, offset, line):
- """A 'success:' directive has been read."""
- self.parser.stdOutLineReceived(line)
-
- def lineReceived(self, line):
- """a line has been received."""
- parts = line.split(None, 1)
- if len(parts) == 2 and line.startswith(parts[0]):
- cmd, rest = parts
- offset = len(cmd) + 1
- cmd = cmd.rstrip(self._colon_sym)
- if cmd in self._test_sym:
- self.startTest(offset, line)
- elif cmd in self._error_sym:
- self.addError(offset, line)
- elif cmd in self._failure_sym:
- self.addFailure(offset, line)
- elif cmd in self._progress_sym:
- self.parser._handleProgress(offset, line)
- elif cmd in self._skip_sym:
- self.addSkip(offset, line)
- elif cmd in self._success_sym:
- self.addSuccess(offset, line)
- elif cmd in self._tags_sym:
- self.parser._handleTags(offset, line)
- self.parser.subunitLineReceived(line)
- elif cmd in self._time_sym:
- self.parser._handleTime(offset, line)
- self.parser.subunitLineReceived(line)
- elif cmd in self._xfail_sym:
- self.addExpectedFail(offset, line)
- elif cmd in self._uxsuccess_sym:
- self.addUnexpectedSuccess(offset, line)
- else:
- self.parser.stdOutLineReceived(line)
- else:
- self.parser.stdOutLineReceived(line)
-
- def lostConnection(self):
- """Connection lost."""
- self.parser._lostConnectionInTest(_u('unknown state of '))
-
- def startTest(self, offset, line):
- """A test start command received."""
- self.parser.stdOutLineReceived(line)
-
-
-class _InTest(_ParserState):
- """State for the subunit parser after reading a test: directive."""
-
- def _outcome(self, offset, line, no_details, details_state):
- """An outcome directive has been read.
-
- :param no_details: Callable to call when no details are presented.
- :param details_state: The state to switch to for details
- processing of this outcome.
- """
- test_name = line[offset:-1].decode('utf8')
- if self.parser.current_test_description == test_name:
- self.parser._state = self.parser._outside_test
- self.parser.current_test_description = None
- no_details()
- self.parser.client.stopTest(self.parser._current_test)
- self.parser._current_test = None
- self.parser.subunitLineReceived(line)
- elif self.parser.current_test_description + self._start_simple == \
- test_name:
- self.parser._state = details_state
- details_state.set_simple()
- self.parser.subunitLineReceived(line)
- elif self.parser.current_test_description + self._start_multipart == \
- test_name:
- self.parser._state = details_state
- details_state.set_multipart()
- self.parser.subunitLineReceived(line)
- else:
- self.parser.stdOutLineReceived(line)
-
- def _error(self):
- self.parser.client.addError(self.parser._current_test,
- details={})
-
- def addError(self, offset, line):
- """An 'error:' directive has been read."""
- self._outcome(offset, line, self._error,
- self.parser._reading_error_details)
-
- def _xfail(self):
- self.parser.client.addExpectedFailure(self.parser._current_test,
- details={})
-
- def addExpectedFail(self, offset, line):
- """An 'xfail:' directive has been read."""
- self._outcome(offset, line, self._xfail,
- self.parser._reading_xfail_details)
-
- def _uxsuccess(self):
- self.parser.client.addUnexpectedSuccess(self.parser._current_test)
-
- def addUnexpectedSuccess(self, offset, line):
- """A 'uxsuccess:' directive has been read."""
- self._outcome(offset, line, self._uxsuccess,
- self.parser._reading_uxsuccess_details)
-
- def _failure(self):
- self.parser.client.addFailure(self.parser._current_test, details={})
-
- def addFailure(self, offset, line):
- """A 'failure:' directive has been read."""
- self._outcome(offset, line, self._failure,
- self.parser._reading_failure_details)
-
- def _skip(self):
- self.parser.client.addSkip(self.parser._current_test, details={})
-
- def addSkip(self, offset, line):
- """A 'skip:' directive has been read."""
- self._outcome(offset, line, self._skip,
- self.parser._reading_skip_details)
-
- def _succeed(self):
- self.parser.client.addSuccess(self.parser._current_test, details={})
-
- def addSuccess(self, offset, line):
- """A 'success:' directive has been read."""
- self._outcome(offset, line, self._succeed,
- self.parser._reading_success_details)
-
- def lostConnection(self):
- """Connection lost."""
- self.parser._lostConnectionInTest(_u(''))
-
-
-class _OutSideTest(_ParserState):
- """State for the subunit parser outside of a test context."""
-
- def lostConnection(self):
- """Connection lost."""
-
- def startTest(self, offset, line):
- """A test start command received."""
- self.parser._state = self.parser._in_test
- test_name = line[offset:-1].decode('utf8')
- self.parser._current_test = RemotedTestCase(test_name)
- self.parser.current_test_description = test_name
- self.parser.client.startTest(self.parser._current_test)
- self.parser.subunitLineReceived(line)
-
-
-class _ReadingDetails(_ParserState):
- """Common logic for readin state details."""
-
- def endDetails(self):
- """The end of a details section has been reached."""
- self.parser._state = self.parser._outside_test
- self.parser.current_test_description = None
- self._report_outcome()
- self.parser.client.stopTest(self.parser._current_test)
-
- def lineReceived(self, line):
- """a line has been received."""
- self.details_parser.lineReceived(line)
- self.parser.subunitLineReceived(line)
-
- def lostConnection(self):
- """Connection lost."""
- self.parser._lostConnectionInTest(_u('%s report of ') %
- self._outcome_label())
-
- def _outcome_label(self):
- """The label to describe this outcome."""
- raise NotImplementedError(self._outcome_label)
-
- def set_simple(self):
- """Start a simple details parser."""
- self.details_parser = details.SimpleDetailsParser(self)
-
- def set_multipart(self):
- """Start a multipart details parser."""
- self.details_parser = details.MultipartDetailsParser(self)
-
-
-class _ReadingFailureDetails(_ReadingDetails):
- """State for the subunit parser when reading failure details."""
-
- def _report_outcome(self):
- self.parser.client.addFailure(self.parser._current_test,
- details=self.details_parser.get_details())
-
- def _outcome_label(self):
- return "failure"
-
-
-class _ReadingErrorDetails(_ReadingDetails):
- """State for the subunit parser when reading error details."""
-
- def _report_outcome(self):
- self.parser.client.addError(self.parser._current_test,
- details=self.details_parser.get_details())
-
- def _outcome_label(self):
- return "error"
-
-
-class _ReadingExpectedFailureDetails(_ReadingDetails):
- """State for the subunit parser when reading xfail details."""
-
- def _report_outcome(self):
- self.parser.client.addExpectedFailure(self.parser._current_test,
- details=self.details_parser.get_details())
-
- def _outcome_label(self):
- return "xfail"
-
-
-class _ReadingUnexpectedSuccessDetails(_ReadingDetails):
- """State for the subunit parser when reading uxsuccess details."""
-
- def _report_outcome(self):
- self.parser.client.addUnexpectedSuccess(self.parser._current_test,
- details=self.details_parser.get_details())
-
- def _outcome_label(self):
- return "uxsuccess"
-
-
-class _ReadingSkipDetails(_ReadingDetails):
- """State for the subunit parser when reading skip details."""
-
- def _report_outcome(self):
- self.parser.client.addSkip(self.parser._current_test,
- details=self.details_parser.get_details("skip"))
-
- def _outcome_label(self):
- return "skip"
-
-
-class _ReadingSuccessDetails(_ReadingDetails):
- """State for the subunit parser when reading success details."""
-
- def _report_outcome(self):
- self.parser.client.addSuccess(self.parser._current_test,
- details=self.details_parser.get_details("success"))
-
- def _outcome_label(self):
- return "success"
-
-
-class TestProtocolServer(object):
- """A parser for subunit.
-
- :ivar tags: The current tags associated with the protocol stream.
- """
-
- def __init__(self, client, stream=None, forward_stream=None):
- """Create a TestProtocolServer instance.
-
- :param client: An object meeting the unittest.TestResult protocol.
- :param stream: The stream that lines received which are not part of the
- subunit protocol should be written to. This allows custom handling
- of mixed protocols. By default, sys.stdout will be used for
- convenience. It should accept bytes to its write() method.
- :param forward_stream: A stream to forward subunit lines to. This
- allows a filter to forward the entire stream while still parsing
- and acting on it. By default forward_stream is set to
- DiscardStream() and no forwarding happens.
- """
- self.client = ExtendedToOriginalDecorator(client)
- if stream is None:
- stream = sys.stdout
- if sys.version_info > (3, 0):
- stream = stream.buffer
- self._stream = stream
- self._forward_stream = forward_stream or DiscardStream()
- # state objects we can switch too
- self._in_test = _InTest(self)
- self._outside_test = _OutSideTest(self)
- self._reading_error_details = _ReadingErrorDetails(self)
- self._reading_failure_details = _ReadingFailureDetails(self)
- self._reading_skip_details = _ReadingSkipDetails(self)
- self._reading_success_details = _ReadingSuccessDetails(self)
- self._reading_xfail_details = _ReadingExpectedFailureDetails(self)
- self._reading_uxsuccess_details = _ReadingUnexpectedSuccessDetails(self)
- # start with outside test.
- self._state = self._outside_test
- # Avoid casts on every call
- self._plusminus = _b('+-')
- self._push_sym = _b('push')
- self._pop_sym = _b('pop')
-
- def _handleProgress(self, offset, line):
- """Process a progress directive."""
- line = line[offset:].strip()
- if line[0] in self._plusminus:
- whence = PROGRESS_CUR
- delta = int(line)
- elif line == self._push_sym:
- whence = PROGRESS_PUSH
- delta = None
- elif line == self._pop_sym:
- whence = PROGRESS_POP
- delta = None
- else:
- whence = PROGRESS_SET
- delta = int(line)
- self.client.progress(delta, whence)
-
- def _handleTags(self, offset, line):
- """Process a tags command."""
- tags = line[offset:].decode('utf8').split()
- new_tags, gone_tags = tags_to_new_gone(tags)
- self.client.tags(new_tags, gone_tags)
-
- def _handleTime(self, offset, line):
- # Accept it, but do not do anything with it yet.
- try:
- event_time = iso8601.parse_date(line[offset:-1])
- except TypeError:
- raise TypeError(_u("Failed to parse %r, got %r")
- % (line, sys.exec_info[1]))
- self.client.time(event_time)
-
- def lineReceived(self, line):
- """Call the appropriate local method for the received line."""
- self._state.lineReceived(line)
-
- def _lostConnectionInTest(self, state_string):
- error_string = _u("lost connection during %stest '%s'") % (
- state_string, self.current_test_description)
- self.client.addError(self._current_test, RemoteError(error_string))
- self.client.stopTest(self._current_test)
-
- def lostConnection(self):
- """The input connection has finished."""
- self._state.lostConnection()
-
- def readFrom(self, pipe):
- """Blocking convenience API to parse an entire stream.
-
- :param pipe: A file-like object supporting readlines().
- :return: None.
- """
- for line in pipe.readlines():
- self.lineReceived(line)
- self.lostConnection()
-
- def _startTest(self, offset, line):
- """Internal call to change state machine. Override startTest()."""
- self._state.startTest(offset, line)
-
- def subunitLineReceived(self, line):
- self._forward_stream.write(line)
-
- def stdOutLineReceived(self, line):
- self._stream.write(line)
-
-
-class TestProtocolClient(testresult.TestResult):
- """A TestResult which generates a subunit stream for a test run.
-
- # Get a TestSuite or TestCase to run
- suite = make_suite()
- # Create a stream (any object with a 'write' method). This should accept
- # bytes not strings: subunit is a byte orientated protocol.
- stream = file('tests.log', 'wb')
- # Create a subunit result object which will output to the stream
- result = subunit.TestProtocolClient(stream)
- # Optionally, to get timing data for performance analysis, wrap the
- # serialiser with a timing decorator
- result = subunit.test_results.AutoTimingTestResultDecorator(result)
- # Run the test suite reporting to the subunit result object
- suite.run(result)
- # Close the stream.
- stream.close()
- """
-
- def __init__(self, stream):
- testresult.TestResult.__init__(self)
- stream = _make_stream_binary(stream)
- self._stream = stream
- self._progress_fmt = _b("progress: ")
- self._bytes_eol = _b("\n")
- self._progress_plus = _b("+")
- self._progress_push = _b("push")
- self._progress_pop = _b("pop")
- self._empty_bytes = _b("")
- self._start_simple = _b(" [\n")
- self._end_simple = _b("]\n")
-
- def addError(self, test, error=None, details=None):
- """Report an error in test test.
-
- Only one of error and details should be provided: conceptually there
- are two separate methods:
- addError(self, test, error)
- addError(self, test, details)
-
- :param error: Standard unittest positional argument form - an
- exc_info tuple.
- :param details: New Testing-in-python drafted API; a dict from string
- to subunit.Content objects.
- """
- self._addOutcome("error", test, error=error, details=details)
- if self.failfast:
- self.stop()
-
- def addExpectedFailure(self, test, error=None, details=None):
- """Report an expected failure in test test.
-
- Only one of error and details should be provided: conceptually there
- are two separate methods:
- addError(self, test, error)
- addError(self, test, details)
-
- :param error: Standard unittest positional argument form - an
- exc_info tuple.
- :param details: New Testing-in-python drafted API; a dict from string
- to subunit.Content objects.
- """
- self._addOutcome("xfail", test, error=error, details=details)
-
- def addFailure(self, test, error=None, details=None):
- """Report a failure in test test.
-
- Only one of error and details should be provided: conceptually there
- are two separate methods:
- addFailure(self, test, error)
- addFailure(self, test, details)
-
- :param error: Standard unittest positional argument form - an
- exc_info tuple.
- :param details: New Testing-in-python drafted API; a dict from string
- to subunit.Content objects.
- """
- self._addOutcome("failure", test, error=error, details=details)
- if self.failfast:
- self.stop()
-
- def _addOutcome(self, outcome, test, error=None, details=None,
- error_permitted=True):
- """Report a failure in test test.
-
- Only one of error and details should be provided: conceptually there
- are two separate methods:
- addOutcome(self, test, error)
- addOutcome(self, test, details)
-
- :param outcome: A string describing the outcome - used as the
- event name in the subunit stream.
- :param error: Standard unittest positional argument form - an
- exc_info tuple.
- :param details: New Testing-in-python drafted API; a dict from string
- to subunit.Content objects.
- :param error_permitted: If True then one and only one of error or
- details must be supplied. If False then error must not be supplied
- and details is still optional. """
- self._stream.write(_b("%s: " % outcome) + self._test_id(test))
- if error_permitted:
- if error is None and details is None:
- raise ValueError
- else:
- if error is not None:
- raise ValueError
- if error is not None:
- self._stream.write(self._start_simple)
- tb_content = TracebackContent(error, test)
- for bytes in tb_content.iter_bytes():
- self._stream.write(bytes)
- elif details is not None:
- self._write_details(details)
- else:
- self._stream.write(_b("\n"))
- if details is not None or error is not None:
- self._stream.write(self._end_simple)
-
- def addSkip(self, test, reason=None, details=None):
- """Report a skipped test."""
- if reason is None:
- self._addOutcome("skip", test, error=None, details=details)
- else:
- self._stream.write(_b("skip: %s [\n" % test.id()))
- self._stream.write(_b("%s\n" % reason))
- self._stream.write(self._end_simple)
-
- def addSuccess(self, test, details=None):
- """Report a success in a test."""
- self._addOutcome("successful", test, details=details, error_permitted=False)
-
- def addUnexpectedSuccess(self, test, details=None):
- """Report an unexpected success in test test.
-
- Details can optionally be provided: conceptually there
- are two separate methods:
- addError(self, test)
- addError(self, test, details)
-
- :param details: New Testing-in-python drafted API; a dict from string
- to subunit.Content objects.
- """
- self._addOutcome("uxsuccess", test, details=details,
- error_permitted=False)
- if self.failfast:
- self.stop()
-
- def _test_id(self, test):
- result = test.id()
- if type(result) is not bytes:
- result = result.encode('utf8')
- return result
-
- def startTest(self, test):
- """Mark a test as starting its test run."""
- super(TestProtocolClient, self).startTest(test)
- self._stream.write(_b("test: ") + self._test_id(test) + _b("\n"))
- self._stream.flush()
-
- def stopTest(self, test):
- super(TestProtocolClient, self).stopTest(test)
- self._stream.flush()
-
- def progress(self, offset, whence):
- """Provide indication about the progress/length of the test run.
-
- :param offset: Information about the number of tests remaining. If
- whence is PROGRESS_CUR, then offset increases/decreases the
- remaining test count. If whence is PROGRESS_SET, then offset
- specifies exactly the remaining test count.
- :param whence: One of PROGRESS_CUR, PROGRESS_SET, PROGRESS_PUSH,
- PROGRESS_POP.
- """
- if whence == PROGRESS_CUR and offset > -1:
- prefix = self._progress_plus
- offset = _b(str(offset))
- elif whence == PROGRESS_PUSH:
- prefix = self._empty_bytes
- offset = self._progress_push
- elif whence == PROGRESS_POP:
- prefix = self._empty_bytes
- offset = self._progress_pop
- else:
- prefix = self._empty_bytes
- offset = _b(str(offset))
- self._stream.write(self._progress_fmt + prefix + offset +
- self._bytes_eol)
-
- def tags(self, new_tags, gone_tags):
- """Inform the client about tags added/removed from the stream."""
- if not new_tags and not gone_tags:
- return
- tags = set([tag.encode('utf8') for tag in new_tags])
- tags.update([_b("-") + tag.encode('utf8') for tag in gone_tags])
- tag_line = _b("tags: ") + _b(" ").join(tags) + _b("\n")
- self._stream.write(tag_line)
-
- def time(self, a_datetime):
- """Inform the client of the time.
-
- ":param datetime: A datetime.datetime object.
- """
- time = a_datetime.astimezone(iso8601.Utc())
- self._stream.write(_b("time: %04d-%02d-%02d %02d:%02d:%02d.%06dZ\n" % (
- time.year, time.month, time.day, time.hour, time.minute,
- time.second, time.microsecond)))
-
- def _write_details(self, details):
- """Output details to the stream.
-
- :param details: An extended details dict for a test outcome.
- """
- self._stream.write(_b(" [ multipart\n"))
- for name, content in sorted(details.items()):
- self._stream.write(_b("Content-Type: %s/%s" %
- (content.content_type.type, content.content_type.subtype)))
- parameters = content.content_type.parameters
- if parameters:
- self._stream.write(_b(";"))
- param_strs = []
- for param, value in parameters.items():
- param_strs.append("%s=%s" % (param, value))
- self._stream.write(_b(",".join(param_strs)))
- self._stream.write(_b("\n%s\n" % name))
- encoder = chunked.Encoder(self._stream)
- list(map(encoder.write, content.iter_bytes()))
- encoder.close()
-
- def done(self):
- """Obey the testtools result.done() interface."""
-
-
-def RemoteError(description=_u("")):
- return (_StringException, _StringException(description), None)
-
-
-class RemotedTestCase(unittest.TestCase):
- """A class to represent test cases run in child processes.
-
- Instances of this class are used to provide the Python test API a TestCase
- that can be printed to the screen, introspected for metadata and so on.
- However, as they are a simply a memoisation of a test that was actually
- run in the past by a separate process, they cannot perform any interactive
- actions.
- """
-
- def __eq__ (self, other):
- try:
- return self.__description == other.__description
- except AttributeError:
- return False
-
- def __init__(self, description):
- """Create a psuedo test case with description description."""
- self.__description = description
-
- def error(self, label):
- raise NotImplementedError("%s on RemotedTestCases is not permitted." %
- label)
-
- def setUp(self):
- self.error("setUp")
-
- def tearDown(self):
- self.error("tearDown")
-
- def shortDescription(self):
- return self.__description
-
- def id(self):
- return "%s" % (self.__description,)
-
- def __str__(self):
- return "%s (%s)" % (self.__description, self._strclass())
-
- def __repr__(self):
- return "<%s description='%s'>" % \
- (self._strclass(), self.__description)
-
- def run(self, result=None):
- if result is None: result = self.defaultTestResult()
- result.startTest(self)
- result.addError(self, RemoteError(_u("Cannot run RemotedTestCases.\n")))
- result.stopTest(self)
-
- def _strclass(self):
- cls = self.__class__
- return "%s.%s" % (cls.__module__, cls.__name__)
-
-
-class ExecTestCase(unittest.TestCase):
- """A test case which runs external scripts for test fixtures."""
-
- def __init__(self, methodName='runTest'):
- """Create an instance of the class that will use the named test
- method when executed. Raises a ValueError if the instance does
- not have a method with the specified name.
- """
- unittest.TestCase.__init__(self, methodName)
- testMethod = getattr(self, methodName)
- self.script = join_dir(sys.modules[self.__class__.__module__].__file__,
- testMethod.__doc__)
-
- def countTestCases(self):
- return 1
-
- def run(self, result=None):
- if result is None: result = self.defaultTestResult()
- self._run(result)
-
- def debug(self):
- """Run the test without collecting errors in a TestResult"""
- self._run(testresult.TestResult())
-
- def _run(self, result):
- protocol = TestProtocolServer(result)
- process = subprocess.Popen(self.script, shell=True,
- stdout=subprocess.PIPE)
- _make_stream_binary(process.stdout)
- output = process.communicate()[0]
- protocol.readFrom(BytesIO(output))
-
-
-class IsolatedTestCase(unittest.TestCase):
- """A TestCase which executes in a forked process.
-
- Each test gets its own process, which has a performance overhead but will
- provide excellent isolation from global state (such as django configs,
- zope utilities and so on).
- """
-
- def run(self, result=None):
- if result is None: result = self.defaultTestResult()
- run_isolated(unittest.TestCase, self, result)
-
-
-class IsolatedTestSuite(unittest.TestSuite):
- """A TestSuite which runs its tests in a forked process.
-
- This decorator that will fork() before running the tests and report the
- results from the child process using a Subunit stream. This is useful for
- handling tests that mutate global state, or are testing C extensions that
- could crash the VM.
- """
-
- def run(self, result=None):
- if result is None: result = testresult.TestResult()
- run_isolated(unittest.TestSuite, self, result)
-
-
-def run_isolated(klass, self, result):
- """Run a test suite or case in a subprocess, using the run method on klass.
- """
- c2pread, c2pwrite = os.pipe()
- # fixme - error -> result
- # now fork
- pid = os.fork()
- if pid == 0:
- # Child
- # Close parent's pipe ends
- os.close(c2pread)
- # Dup fds for child
- os.dup2(c2pwrite, 1)
- # Close pipe fds.
- os.close(c2pwrite)
-
- # at this point, sys.stdin is redirected, now we want
- # to filter it to escape ]'s.
- ### XXX: test and write that bit.
- stream = os.fdopen(1, 'wb')
- result = TestProtocolClient(stream)
- klass.run(self, result)
- stream.flush()
- sys.stderr.flush()
- # exit HARD, exit NOW.
- os._exit(0)
- else:
- # Parent
- # Close child pipe ends
- os.close(c2pwrite)
- # hookup a protocol engine
- protocol = TestProtocolServer(result)
- fileobj = os.fdopen(c2pread, 'rb')
- protocol.readFrom(fileobj)
- os.waitpid(pid, 0)
- # TODO return code evaluation.
- return result
-
-
-def TAP2SubUnit(tap, subunit):
- """Filter a TAP pipe into a subunit pipe.
-
- :param tap: A tap pipe/stream/file object.
- :param subunit: A pipe/stream/file object to write subunit results to.
- :return: The exit code to exit with.
- """
- BEFORE_PLAN = 0
- AFTER_PLAN = 1
- SKIP_STREAM = 2
- state = BEFORE_PLAN
- plan_start = 1
- plan_stop = 0
- def _skipped_test(subunit, plan_start):
- # Some tests were skipped.
- subunit.write('test test %d\n' % plan_start)
- subunit.write('error test %d [\n' % plan_start)
- subunit.write('test missing from TAP output\n')
- subunit.write(']\n')
- return plan_start + 1
- # Test data for the next test to emit
- test_name = None
- log = []
- result = None
- def _emit_test():
- "write out a test"
- if test_name is None:
- return
- subunit.write("test %s\n" % test_name)
- if not log:
- subunit.write("%s %s\n" % (result, test_name))
- else:
- subunit.write("%s %s [\n" % (result, test_name))
- if log:
- for line in log:
- subunit.write("%s\n" % line)
- subunit.write("]\n")
- del log[:]
- for line in tap:
- if state == BEFORE_PLAN:
- match = re.match("(\d+)\.\.(\d+)\s*(?:\#\s+(.*))?\n", line)
- if match:
- state = AFTER_PLAN
- _, plan_stop, comment = match.groups()
- plan_stop = int(plan_stop)
- if plan_start > plan_stop and plan_stop == 0:
- # skipped file
- state = SKIP_STREAM
- subunit.write("test file skip\n")
- subunit.write("skip file skip [\n")
- subunit.write("%s\n" % comment)
- subunit.write("]\n")
- continue
- # not a plan line, or have seen one before
- match = re.match("(ok|not ok)(?:\s+(\d+)?)?(?:\s+([^#]*[^#\s]+)\s*)?(?:\s+#\s+(TODO|SKIP|skip|todo)(?:\s+(.*))?)?\n", line)
- if match:
- # new test, emit current one.
- _emit_test()
- status, number, description, directive, directive_comment = match.groups()
- if status == 'ok':
- result = 'success'
- else:
- result = "failure"
- if description is None:
- description = ''
- else:
- description = ' ' + description
- if directive is not None:
- if directive.upper() == 'TODO':
- result = 'xfail'
- elif directive.upper() == 'SKIP':
- result = 'skip'
- if directive_comment is not None:
- log.append(directive_comment)
- if number is not None:
- number = int(number)
- while plan_start < number:
- plan_start = _skipped_test(subunit, plan_start)
- test_name = "test %d%s" % (plan_start, description)
- plan_start += 1
- continue
- match = re.match("Bail out\!(?:\s*(.*))?\n", line)
- if match:
- reason, = match.groups()
- if reason is None:
- extra = ''
- else:
- extra = ' %s' % reason
- _emit_test()
- test_name = "Bail out!%s" % extra
- result = "error"
- state = SKIP_STREAM
- continue
- match = re.match("\#.*\n", line)
- if match:
- log.append(line[:-1])
- continue
- subunit.write(line)
- _emit_test()
- while plan_start <= plan_stop:
- # record missed tests
- plan_start = _skipped_test(subunit, plan_start)
- return 0
-
-
-def tag_stream(original, filtered, tags):
- """Alter tags on a stream.
-
- :param original: The input stream.
- :param filtered: The output stream.
- :param tags: The tags to apply. As in a normal stream - a list of 'TAG' or
- '-TAG' commands.
-
- A 'TAG' command will add the tag to the output stream,
- and override any existing '-TAG' command in that stream.
- Specifically:
- * A global 'tags: TAG' will be added to the start of the stream.
- * Any tags commands with -TAG will have the -TAG removed.
-
- A '-TAG' command will remove the TAG command from the stream.
- Specifically:
- * A 'tags: -TAG' command will be added to the start of the stream.
- * Any 'tags: TAG' command will have 'TAG' removed from it.
- Additionally, any redundant tagging commands (adding a tag globally
- present, or removing a tag globally removed) are stripped as a
- by-product of the filtering.
- :return: 0
- """
- new_tags, gone_tags = tags_to_new_gone(tags)
- def write_tags(new_tags, gone_tags):
- if new_tags or gone_tags:
- filtered.write("tags: " + ' '.join(new_tags))
- if gone_tags:
- for tag in gone_tags:
- filtered.write("-" + tag)
- filtered.write("\n")
- write_tags(new_tags, gone_tags)
- # TODO: use the protocol parser and thus don't mangle test comments.
- for line in original:
- if line.startswith("tags:"):
- line_tags = line[5:].split()
- line_new, line_gone = tags_to_new_gone(line_tags)
- line_new = line_new - gone_tags
- line_gone = line_gone - new_tags
- write_tags(line_new, line_gone)
- else:
- filtered.write(line)
- return 0
-
-
-class ProtocolTestCase(object):
- """Subunit wire protocol to unittest.TestCase adapter.
-
- ProtocolTestCase honours the core of ``unittest.TestCase`` protocol -
- calling a ProtocolTestCase or invoking the run() method will make a 'test
- run' happen. The 'test run' will simply be a replay of the test activity
- that has been encoded into the stream. The ``unittest.TestCase`` ``debug``
- and ``countTestCases`` methods are not supported because there isn't a
- sensible mapping for those methods.
-
- # Get a stream (any object with a readline() method), in this case the
- # stream output by the example from ``subunit.TestProtocolClient``.
- stream = file('tests.log', 'rb')
- # Create a parser which will read from the stream and emit
- # activity to a unittest.TestResult when run() is called.
- suite = subunit.ProtocolTestCase(stream)
- # Create a result object to accept the contents of that stream.
- result = unittest._TextTestResult(sys.stdout)
- # 'run' the tests - process the stream and feed its contents to result.
- suite.run(result)
- stream.close()
-
- :seealso: TestProtocolServer (the subunit wire protocol parser).
- """
-
- def __init__(self, stream, passthrough=None, forward=None):
- """Create a ProtocolTestCase reading from stream.
-
- :param stream: A filelike object which a subunit stream can be read
- from.
- :param passthrough: A stream pass non subunit input on to. If not
- supplied, the TestProtocolServer default is used.
- :param forward: A stream to pass subunit input on to. If not supplied
- subunit input is not forwarded.
- """
- stream = _make_stream_binary(stream)
- self._stream = stream
- self._passthrough = passthrough
- if forward is not None:
- forward = _make_stream_binary(forward)
- self._forward = forward
-
- def __call__(self, result=None):
- return self.run(result)
-
- def run(self, result=None):
- if result is None:
- result = self.defaultTestResult()
- protocol = TestProtocolServer(result, self._passthrough, self._forward)
- line = self._stream.readline()
- while line:
- protocol.lineReceived(line)
- line = self._stream.readline()
- protocol.lostConnection()
-
-
-class TestResultStats(testresult.TestResult):
- """A pyunit TestResult interface implementation for making statistics.
-
- :ivar total_tests: The total tests seen.
- :ivar passed_tests: The tests that passed.
- :ivar failed_tests: The tests that failed.
- :ivar seen_tags: The tags seen across all tests.
- """
-
- def __init__(self, stream):
- """Create a TestResultStats which outputs to stream."""
- testresult.TestResult.__init__(self)
- self._stream = stream
- self.failed_tests = 0
- self.skipped_tests = 0
- self.seen_tags = set()
-
- @property
- def total_tests(self):
- return self.testsRun
-
- def addError(self, test, err, details=None):
- self.failed_tests += 1
-
- def addFailure(self, test, err, details=None):
- self.failed_tests += 1
-
- def addSkip(self, test, reason, details=None):
- self.skipped_tests += 1
-
- def formatStats(self):
- self._stream.write("Total tests: %5d\n" % self.total_tests)
- self._stream.write("Passed tests: %5d\n" % self.passed_tests)
- self._stream.write("Failed tests: %5d\n" % self.failed_tests)
- self._stream.write("Skipped tests: %5d\n" % self.skipped_tests)
- tags = sorted(self.seen_tags)
- self._stream.write("Seen tags: %s\n" % (", ".join(tags)))
-
- @property
- def passed_tests(self):
- return self.total_tests - self.failed_tests - self.skipped_tests
-
- def tags(self, new_tags, gone_tags):
- """Accumulate the seen tags."""
- self.seen_tags.update(new_tags)
-
- def wasSuccessful(self):
- """Tells whether or not this result was a success"""
- return self.failed_tests == 0
-
-
-def get_default_formatter():
- """Obtain the default formatter to write to.
-
- :return: A file-like object.
- """
- formatter = os.getenv("SUBUNIT_FORMATTER")
- if formatter:
- return os.popen(formatter, "w")
- else:
- stream = sys.stdout
- if sys.version_info > (3, 0):
- stream = stream.buffer
- return stream
-
-
-def read_test_list(path):
- """Read a list of test ids from a file on disk.
-
- :param path: Path to the file
- :return: Sequence of test ids
- """
- f = open(path, 'rb')
- try:
- return [l.rstrip("\n") for l in f.readlines()]
- finally:
- f.close()
-
-
-def _make_stream_binary(stream):
- """Ensure that a stream will be binary safe. See _make_binary_on_windows.
-
- :return: A binary version of the same stream (some streams cannot be
- 'fixed' but can be unwrapped).
- """
- try:
- fileno = stream.fileno()
- except _UnsupportedOperation:
- pass
- else:
- _make_binary_on_windows(fileno)
- return _unwrap_text(stream)
-
-def _make_binary_on_windows(fileno):
- """Win32 mangles \r\n to \n and that breaks streams. See bug lp:505078."""
- if sys.platform == "win32":
- import msvcrt
- msvcrt.setmode(fileno, os.O_BINARY)
-
-
-def _unwrap_text(stream):
- """Unwrap stream if it is a text stream to get the original buffer."""
- if sys.version_info > (3, 0):
- try:
- # Read streams
- if type(stream.read(0)) is str:
- return stream.buffer
- except (_UnsupportedOperation, IOError):
- # Cannot read from the stream: try via writes
- try:
- stream.write(_b(''))
- except TypeError:
- return stream.buffer
- return stream
diff --git a/lib/subunit/python/subunit/chunked.py b/lib/subunit/python/subunit/chunked.py
deleted file mode 100644
index b9921291ea..0000000000
--- a/lib/subunit/python/subunit/chunked.py
+++ /dev/null
@@ -1,185 +0,0 @@
-#
-# subunit: extensions to python unittest to get test results from subprocesses.
-# Copyright (C) 2005 Robert Collins <robertc@robertcollins.net>
-# Copyright (C) 2011 Martin Pool <mbp@sourcefrog.net>
-#
-# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-# license at the users choice. A copy of both licenses are available in the
-# project source as Apache-2.0 and BSD. You may not use this file except in
-# compliance with one of these two licences.
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# license you chose for the specific language governing permissions and
-# limitations under that license.
-#
-
-"""Encoder/decoder for http style chunked encoding."""
-
-from testtools.compat import _b
-
-empty = _b('')
-
-class Decoder(object):
- """Decode chunked content to a byte stream."""
-
- def __init__(self, output, strict=True):
- """Create a decoder decoding to output.
-
- :param output: A file-like object. Bytes written to the Decoder are
- decoded to strip off the chunking and written to the output.
- Up to a full write worth of data or a single control line may be
- buffered (whichever is larger). The close method should be called
- when no more data is available, to detect short streams; the
- write method will return none-None when the end of a stream is
- detected. The output object must accept bytes objects.
-
- :param strict: If True (the default), the decoder will not knowingly
- accept input that is not conformant to the HTTP specification.
- (This does not imply that it will catch every nonconformance.)
- If False, it will accept incorrect input that is still
- unambiguous.
- """
- self.output = output
- self.buffered_bytes = []
- self.state = self._read_length
- self.body_length = 0
- self.strict = strict
- self._match_chars = _b("0123456789abcdefABCDEF\r\n")
- self._slash_n = _b('\n')
- self._slash_r = _b('\r')
- self._slash_rn = _b('\r\n')
- self._slash_nr = _b('\n\r')
-
- def close(self):
- """Close the decoder.
-
- :raises ValueError: If the stream is incomplete ValueError is raised.
- """
- if self.state != self._finished:
- raise ValueError("incomplete stream")
-
- def _finished(self):
- """Finished reading, return any remaining bytes."""
- if self.buffered_bytes:
- buffered_bytes = self.buffered_bytes
- self.buffered_bytes = []
- return empty.join(buffered_bytes)
- else:
- raise ValueError("stream is finished")
-
- def _read_body(self):
- """Pass body bytes to the output."""
- while self.body_length and self.buffered_bytes:
- if self.body_length >= len(self.buffered_bytes[0]):
- self.output.write(self.buffered_bytes[0])
- self.body_length -= len(self.buffered_bytes[0])
- del self.buffered_bytes[0]
- # No more data available.
- if not self.body_length:
- self.state = self._read_length
- else:
- self.output.write(self.buffered_bytes[0][:self.body_length])
- self.buffered_bytes[0] = \
- self.buffered_bytes[0][self.body_length:]
- self.body_length = 0
- self.state = self._read_length
- return self.state()
-
- def _read_length(self):
- """Try to decode a length from the bytes."""
- count_chars = []
- for bytes in self.buffered_bytes:
- for pos in range(len(bytes)):
- byte = bytes[pos:pos+1]
- if byte not in self._match_chars:
- break
- count_chars.append(byte)
- if byte == self._slash_n:
- break
- if not count_chars:
- return
- if count_chars[-1] != self._slash_n:
- return
- count_str = empty.join(count_chars)
- if self.strict:
- if count_str[-2:] != self._slash_rn:
- raise ValueError("chunk header invalid: %r" % count_str)
- if self._slash_r in count_str[:-2]:
- raise ValueError("too many CRs in chunk header %r" % count_str)
- self.body_length = int(count_str.rstrip(self._slash_nr), 16)
- excess_bytes = len(count_str)
- while excess_bytes:
- if excess_bytes >= len(self.buffered_bytes[0]):
- excess_bytes -= len(self.buffered_bytes[0])
- del self.buffered_bytes[0]
- else:
- self.buffered_bytes[0] = self.buffered_bytes[0][excess_bytes:]
- excess_bytes = 0
- if not self.body_length:
- self.state = self._finished
- if not self.buffered_bytes:
- # May not call into self._finished with no buffered data.
- return empty
- else:
- self.state = self._read_body
- return self.state()
-
- def write(self, bytes):
- """Decode bytes to the output stream.
-
- :raises ValueError: If the stream has already seen the end of file
- marker.
- :returns: None, or the excess bytes beyond the end of file marker.
- """
- if bytes:
- self.buffered_bytes.append(bytes)
- return self.state()
-
-
-class Encoder(object):
- """Encode content to a stream using HTTP Chunked coding."""
-
- def __init__(self, output):
- """Create an encoder encoding to output.
-
- :param output: A file-like object. Bytes written to the Encoder
- will be encoded using HTTP chunking. Small writes may be buffered
- and the ``close`` method must be called to finish the stream.
- """
- self.output = output
- self.buffered_bytes = []
- self.buffer_size = 0
-
- def flush(self, extra_len=0):
- """Flush the encoder to the output stream.
-
- :param extra_len: Increase the size of the chunk by this many bytes
- to allow for a subsequent write.
- """
- if not self.buffer_size and not extra_len:
- return
- buffered_bytes = self.buffered_bytes
- buffer_size = self.buffer_size
- self.buffered_bytes = []
- self.buffer_size = 0
- self.output.write(_b("%X\r\n" % (buffer_size + extra_len)))
- if buffer_size:
- self.output.write(empty.join(buffered_bytes))
- return True
-
- def write(self, bytes):
- """Encode bytes to the output stream."""
- bytes_len = len(bytes)
- if self.buffer_size + bytes_len >= 65536:
- self.flush(bytes_len)
- self.output.write(bytes)
- else:
- self.buffered_bytes.append(bytes)
- self.buffer_size += bytes_len
-
- def close(self):
- """Finish the stream. This does not close the output stream."""
- self.flush()
- self.output.write(_b("0\r\n"))
diff --git a/lib/subunit/python/subunit/details.py b/lib/subunit/python/subunit/details.py
deleted file mode 100644
index 9e5e005864..0000000000
--- a/lib/subunit/python/subunit/details.py
+++ /dev/null
@@ -1,119 +0,0 @@
-#
-# subunit: extensions to Python unittest to get test results from subprocesses.
-# Copyright (C) 2005 Robert Collins <robertc@robertcollins.net>
-#
-# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-# license at the users choice. A copy of both licenses are available in the
-# project source as Apache-2.0 and BSD. You may not use this file except in
-# compliance with one of these two licences.
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# license you chose for the specific language governing permissions and
-# limitations under that license.
-#
-
-"""Handlers for outcome details."""
-
-from testtools import content, content_type
-from testtools.compat import _b, BytesIO
-
-from subunit import chunked
-
-end_marker = _b("]\n")
-quoted_marker = _b(" ]")
-empty = _b('')
-
-
-class DetailsParser(object):
- """Base class/API reference for details parsing."""
-
-
-class SimpleDetailsParser(DetailsParser):
- """Parser for single-part [] delimited details."""
-
- def __init__(self, state):
- self._message = _b("")
- self._state = state
-
- def lineReceived(self, line):
- if line == end_marker:
- self._state.endDetails()
- return
- if line[0:2] == quoted_marker:
- # quoted ] start
- self._message += line[1:]
- else:
- self._message += line
-
- def get_details(self, style=None):
- result = {}
- if not style:
- # We know that subunit/testtools serialise [] formatted
- # tracebacks as utf8, but perhaps we need a ReplacingContent
- # or something like that.
- result['traceback'] = content.Content(
- content_type.ContentType("text", "x-traceback",
- {"charset": "utf8"}),
- lambda:[self._message])
- else:
- if style == 'skip':
- name = 'reason'
- else:
- name = 'message'
- result[name] = content.Content(
- content_type.ContentType("text", "plain"),
- lambda:[self._message])
- return result
-
- def get_message(self):
- return self._message
-
-
-class MultipartDetailsParser(DetailsParser):
- """Parser for multi-part [] surrounded MIME typed chunked details."""
-
- def __init__(self, state):
- self._state = state
- self._details = {}
- self._parse_state = self._look_for_content
-
- def _look_for_content(self, line):
- if line == end_marker:
- self._state.endDetails()
- return
- # TODO error handling
- field, value = line[:-1].decode('utf8').split(' ', 1)
- try:
- main, sub = value.split('/')
- except ValueError:
- raise ValueError("Invalid MIME type %r" % value)
- self._content_type = content_type.ContentType(main, sub)
- self._parse_state = self._get_name
-
- def _get_name(self, line):
- self._name = line[:-1].decode('utf8')
- self._body = BytesIO()
- self._chunk_parser = chunked.Decoder(self._body)
- self._parse_state = self._feed_chunks
-
- def _feed_chunks(self, line):
- residue = self._chunk_parser.write(line)
- if residue is not None:
- # Line based use always ends on no residue.
- assert residue == empty, 'residue: %r' % (residue,)
- body = self._body
- self._details[self._name] = content.Content(
- self._content_type, lambda:[body.getvalue()])
- self._chunk_parser.close()
- self._parse_state = self._look_for_content
-
- def get_details(self, for_skip=False):
- return self._details
-
- def get_message(self):
- return None
-
- def lineReceived(self, line):
- self._parse_state(line)
diff --git a/lib/subunit/python/subunit/filters.py b/lib/subunit/python/subunit/filters.py
deleted file mode 100644
index dc3fd8aedb..0000000000
--- a/lib/subunit/python/subunit/filters.py
+++ /dev/null
@@ -1,125 +0,0 @@
-# subunit: extensions to python unittest to get test results from subprocesses.
-# Copyright (C) 2009 Robert Collins <robertc@robertcollins.net>
-#
-# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-# license at the users choice. A copy of both licenses are available in the
-# project source as Apache-2.0 and BSD. You may not use this file except in
-# compliance with one of these two licences.
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# license you chose for the specific language governing permissions and
-# limitations under that license.
-#
-
-
-from optparse import OptionParser
-import sys
-
-from subunit import DiscardStream, ProtocolTestCase
-
-
-def make_options(description):
- parser = OptionParser(description=description)
- parser.add_option(
- "--no-passthrough", action="store_true",
- help="Hide all non subunit input.", default=False,
- dest="no_passthrough")
- parser.add_option(
- "-o", "--output-to",
- help="Send the output to this path rather than stdout.")
- parser.add_option(
- "-f", "--forward", action="store_true", default=False,
- help="Forward subunit stream on stdout.")
- return parser
-
-
-def run_tests_from_stream(input_stream, result, passthrough_stream=None,
- forward_stream=None):
- """Run tests from a subunit input stream through 'result'.
-
- :param input_stream: A stream containing subunit input.
- :param result: A TestResult that will receive the test events.
- :param passthrough_stream: All non-subunit input received will be
- sent to this stream. If not provided, uses the ``TestProtocolServer``
- default, which is ``sys.stdout``.
- :param forward_stream: All subunit input received will be forwarded
- to this stream. If not provided, uses the ``TestProtocolServer``
- default, which is to not forward any input.
- """
- test = ProtocolTestCase(
- input_stream, passthrough=passthrough_stream,
- forward=forward_stream)
- result.startTestRun()
- test.run(result)
- result.stopTestRun()
-
-
-def filter_by_result(result_factory, output_path, passthrough, forward,
- input_stream=sys.stdin):
- """Filter an input stream using a test result.
-
- :param result_factory: A callable that when passed an output stream
- returns a TestResult. It is expected that this result will output
- to the given stream.
- :param output_path: A path send output to. If None, output will be go
- to ``sys.stdout``.
- :param passthrough: If True, all non-subunit input will be sent to
- ``sys.stdout``. If False, that input will be discarded.
- :param forward: If True, all subunit input will be forwarded directly to
- ``sys.stdout`` as well as to the ``TestResult``.
- :param input_stream: The source of subunit input. Defaults to
- ``sys.stdin``.
- :return: A test result with the resultts of the run.
- """
- if passthrough:
- passthrough_stream = sys.stdout
- else:
- passthrough_stream = DiscardStream()
-
- if forward:
- forward_stream = sys.stdout
- else:
- forward_stream = DiscardStream()
-
- if output_path is None:
- output_to = sys.stdout
- else:
- output_to = file(output_path, 'wb')
-
- try:
- result = result_factory(output_to)
- run_tests_from_stream(
- input_stream, result, passthrough_stream, forward_stream)
- finally:
- if output_path:
- output_to.close()
- return result
-
-
-def run_filter_script(result_factory, description, post_run_hook=None):
- """Main function for simple subunit filter scripts.
-
- Many subunit filter scripts take a stream of subunit input and use a
- TestResult to handle the events generated by that stream. This function
- wraps a lot of the boiler-plate around that by making a script with
- options for handling passthrough information and stream forwarding, and
- that will exit with a successful return code (i.e. 0) if the input stream
- represents a successful test run.
-
- :param result_factory: A callable that takes an output stream and returns
- a test result that outputs to that stream.
- :param description: A description of the filter script.
- """
- parser = make_options(description)
- (options, args) = parser.parse_args()
- result = filter_by_result(
- result_factory, options.output_to, not options.no_passthrough,
- options.forward)
- if post_run_hook:
- post_run_hook(result)
- if result.wasSuccessful():
- sys.exit(0)
- else:
- sys.exit(1)
diff --git a/lib/subunit/python/subunit/iso8601.py b/lib/subunit/python/subunit/iso8601.py
deleted file mode 100644
index 07855d0975..0000000000
--- a/lib/subunit/python/subunit/iso8601.py
+++ /dev/null
@@ -1,133 +0,0 @@
-# Copyright (c) 2007 Michael Twomey
-#
-# Permission is hereby granted, free of charge, to any person obtaining a
-# copy of this software and associated documentation files (the
-# "Software"), to deal in the Software without restriction, including
-# without limitation the rights to use, copy, modify, merge, publish,
-# distribute, sublicense, and/or sell copies of the Software, and to
-# permit persons to whom the Software is furnished to do so, subject to
-# the following conditions:
-#
-# The above copyright notice and this permission notice shall be included
-# in all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
-# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-"""ISO 8601 date time string parsing
-
-Basic usage:
->>> import iso8601
->>> iso8601.parse_date("2007-01-25T12:00:00Z")
-datetime.datetime(2007, 1, 25, 12, 0, tzinfo=<iso8601.iso8601.Utc ...>)
->>>
-
-"""
-
-from datetime import datetime, timedelta, tzinfo
-import re
-import sys
-
-__all__ = ["parse_date", "ParseError"]
-
-# Adapted from http://delete.me.uk/2005/03/iso8601.html
-ISO8601_REGEX_PATTERN = (r"(?P<year>[0-9]{4})(-(?P<month>[0-9]{1,2})(-(?P<day>[0-9]{1,2})"
- r"((?P<separator>.)(?P<hour>[0-9]{2}):(?P<minute>[0-9]{2})(:(?P<second>[0-9]{2})(\.(?P<fraction>[0-9]+))?)?"
- r"(?P<timezone>Z|(([-+])([0-9]{2}):([0-9]{2})))?)?)?)?"
-)
-TIMEZONE_REGEX_PATTERN = "(?P<prefix>[+-])(?P<hours>[0-9]{2}).(?P<minutes>[0-9]{2})"
-ISO8601_REGEX = re.compile(ISO8601_REGEX_PATTERN.encode('utf8'))
-TIMEZONE_REGEX = re.compile(TIMEZONE_REGEX_PATTERN.encode('utf8'))
-
-zulu = "Z".encode('latin-1')
-minus = "-".encode('latin-1')
-
-if sys.version_info < (3, 0):
- bytes = str
-
-
-class ParseError(Exception):
- """Raised when there is a problem parsing a date string"""
-
-# Yoinked from python docs
-ZERO = timedelta(0)
-class Utc(tzinfo):
- """UTC
-
- """
- def utcoffset(self, dt):
- return ZERO
-
- def tzname(self, dt):
- return "UTC"
-
- def dst(self, dt):
- return ZERO
-UTC = Utc()
-
-class FixedOffset(tzinfo):
- """Fixed offset in hours and minutes from UTC
-
- """
- def __init__(self, offset_hours, offset_minutes, name):
- self.__offset = timedelta(hours=offset_hours, minutes=offset_minutes)
- self.__name = name
-
- def utcoffset(self, dt):
- return self.__offset
-
- def tzname(self, dt):
- return self.__name
-
- def dst(self, dt):
- return ZERO
-
- def __repr__(self):
- return "<FixedOffset %r>" % self.__name
-
-def parse_timezone(tzstring, default_timezone=UTC):
- """Parses ISO 8601 time zone specs into tzinfo offsets
-
- """
- if tzstring == zulu:
- return default_timezone
- # This isn't strictly correct, but it's common to encounter dates without
- # timezones so I'll assume the default (which defaults to UTC).
- # Addresses issue 4.
- if tzstring is None:
- return default_timezone
- m = TIMEZONE_REGEX.match(tzstring)
- prefix, hours, minutes = m.groups()
- hours, minutes = int(hours), int(minutes)
- if prefix == minus:
- hours = -hours
- minutes = -minutes
- return FixedOffset(hours, minutes, tzstring)
-
-def parse_date(datestring, default_timezone=UTC):
- """Parses ISO 8601 dates into datetime objects
-
- The timezone is parsed from the date string. However it is quite common to
- have dates without a timezone (not strictly correct). In this case the
- default timezone specified in default_timezone is used. This is UTC by
- default.
- """
- if not isinstance(datestring, bytes):
- raise ParseError("Expecting bytes %r" % datestring)
- m = ISO8601_REGEX.match(datestring)
- if not m:
- raise ParseError("Unable to parse date string %r" % datestring)
- groups = m.groupdict()
- tz = parse_timezone(groups["timezone"], default_timezone=default_timezone)
- if groups["fraction"] is None:
- groups["fraction"] = 0
- else:
- groups["fraction"] = int(float("0.%s" % groups["fraction"].decode()) * 1e6)
- return datetime(int(groups["year"]), int(groups["month"]), int(groups["day"]),
- int(groups["hour"]), int(groups["minute"]), int(groups["second"]),
- int(groups["fraction"]), tz)
diff --git a/lib/subunit/python/subunit/progress_model.py b/lib/subunit/python/subunit/progress_model.py
deleted file mode 100644
index 3a6af89a33..0000000000
--- a/lib/subunit/python/subunit/progress_model.py
+++ /dev/null
@@ -1,106 +0,0 @@
-#
-# subunit: extensions to Python unittest to get test results from subprocesses.
-# Copyright (C) 2009 Robert Collins <robertc@robertcollins.net>
-#
-# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-# license at the users choice. A copy of both licenses are available in the
-# project source as Apache-2.0 and BSD. You may not use this file except in
-# compliance with one of these two licences.
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# license you chose for the specific language governing permissions and
-# limitations under that license.
-#
-
-"""Support for dealing with progress state."""
-
-class ProgressModel(object):
- """A model of progress indicators as subunit defines it.
-
- Instances of this class represent a single logical operation that is
- progressing. The operation may have many steps, and some of those steps may
- supply their own progress information. ProgressModel uses a nested concept
- where the overall state can be pushed, creating new starting state, and
- later pushed to return to the prior state. Many user interfaces will want
- to display an overall summary though, and accordingly the pos() and width()
- methods return overall summary information rather than information on the
- current subtask.
-
- The default state is 0/0 - indicating that the overall progress is unknown.
- Anytime the denominator of pos/width is 0, rendering of a ProgressModel
- should should take this into consideration.
-
- :ivar: _tasks. This private attribute stores the subtasks. Each is a tuple:
- pos, width, overall_numerator, overall_denominator. The overall fields
- store the calculated overall numerator and denominator for the state
- that was pushed.
- """
-
- def __init__(self):
- """Create a ProgressModel.
-
- The new model has no progress data at all - it will claim a summary
- width of zero and position of 0.
- """
- self._tasks = []
- self.push()
-
- def adjust_width(self, offset):
- """Adjust the with of the current subtask."""
- self._tasks[-1][1] += offset
-
- def advance(self):
- """Advance the current subtask."""
- self._tasks[-1][0] += 1
-
- def pop(self):
- """Pop a subtask off the ProgressModel.
-
- See push for a description of how push and pop work.
- """
- self._tasks.pop()
-
- def pos(self):
- """Return how far through the operation has progressed."""
- if not self._tasks:
- return 0
- task = self._tasks[-1]
- if len(self._tasks) > 1:
- # scale up the overall pos by the current task or preserve it if
- # no current width is known.
- offset = task[2] * (task[1] or 1)
- else:
- offset = 0
- return offset + task[0]
-
- def push(self):
- """Push a new subtask.
-
- After pushing a new subtask, the overall progress hasn't changed. Calls
- to adjust_width, advance, set_width will only after the progress within
- the range that calling 'advance' would have before - the subtask
- represents progressing one step in the earlier task.
-
- Call pop() to restore the progress model to the state before push was
- called.
- """
- self._tasks.append([0, 0, self.pos(), self.width()])
-
- def set_width(self, width):
- """Set the width of the current subtask."""
- self._tasks[-1][1] = width
-
- def width(self):
- """Return the total width of the operation."""
- if not self._tasks:
- return 0
- task = self._tasks[-1]
- if len(self._tasks) > 1:
- # scale up the overall width by the current task or preserve it if
- # no current width is known.
- return task[3] * (task[1] or 1)
- else:
- return task[1]
-
diff --git a/lib/subunit/python/subunit/run.py b/lib/subunit/python/subunit/run.py
deleted file mode 100755
index b5ccea449d..0000000000
--- a/lib/subunit/python/subunit/run.py
+++ /dev/null
@@ -1,84 +0,0 @@
-#!/usr/bin/python
-#
-# Simple subunit testrunner for python
-# Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007
-#
-# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-# license at the users choice. A copy of both licenses are available in the
-# project source as Apache-2.0 and BSD. You may not use this file except in
-# compliance with one of these two licences.
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# license you chose for the specific language governing permissions and
-# limitations under that license.
-#
-
-"""Run a unittest testcase reporting results as Subunit.
-
- $ python -m subunit.run mylib.tests.test_suite
-"""
-
-import sys
-
-from subunit import TestProtocolClient, get_default_formatter
-from subunit.test_results import AutoTimingTestResultDecorator
-from testtools.run import (
- BUFFEROUTPUT,
- CATCHBREAK,
- FAILFAST,
- TestProgram,
- USAGE_AS_MAIN,
- )
-
-
-class SubunitTestRunner(object):
- def __init__(self, verbosity=None, failfast=None, buffer=None, stream=None):
- """Create a TestToolsTestRunner.
-
- :param verbosity: Ignored.
- :param failfast: Stop running tests at the first failure.
- :param buffer: Ignored.
- """
- self.failfast = failfast
- self.stream = stream or sys.stdout
-
- def run(self, test):
- "Run the given test case or test suite."
- result = TestProtocolClient(self.stream)
- result = AutoTimingTestResultDecorator(result)
- if self.failfast is not None:
- result.failfast = self.failfast
- test(result)
- return result
-
-
-class SubunitTestProgram(TestProgram):
-
- USAGE = USAGE_AS_MAIN
-
- def usageExit(self, msg=None):
- if msg:
- print (msg)
- usage = {'progName': self.progName, 'catchbreak': '', 'failfast': '',
- 'buffer': ''}
- if self.failfast != False:
- usage['failfast'] = FAILFAST
- if self.catchbreak != False:
- usage['catchbreak'] = CATCHBREAK
- if self.buffer != False:
- usage['buffer'] = BUFFEROUTPUT
- usage_text = self.USAGE % usage
- usage_lines = usage_text.split('\n')
- usage_lines.insert(2, "Run a test suite with a subunit reporter.")
- usage_lines.insert(3, "")
- print('\n'.join(usage_lines))
- sys.exit(2)
-
-
-if __name__ == '__main__':
- stream = get_default_formatter()
- runner = SubunitTestRunner
- SubunitTestProgram(module=None, argv=sys.argv, testRunner=runner,
- stdout=sys.stdout)
diff --git a/lib/subunit/python/subunit/test_results.py b/lib/subunit/python/subunit/test_results.py
deleted file mode 100644
index 91c9bbdc1e..0000000000
--- a/lib/subunit/python/subunit/test_results.py
+++ /dev/null
@@ -1,678 +0,0 @@
-#
-# subunit: extensions to Python unittest to get test results from subprocesses.
-# Copyright (C) 2009 Robert Collins <robertc@robertcollins.net>
-#
-# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-# license at the users choice. A copy of both licenses are available in the
-# project source as Apache-2.0 and BSD. You may not use this file except in
-# compliance with one of these two licences.
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# license you chose for the specific language governing permissions and
-# limitations under that license.
-#
-
-"""TestResult helper classes used to by subunit."""
-
-import csv
-import datetime
-
-import testtools
-from testtools.compat import all
-from testtools.content import (
- text_content,
- TracebackContent,
- )
-
-from subunit import iso8601
-
-
-# NOT a TestResult, because we are implementing the interface, not inheriting
-# it.
-class TestResultDecorator(object):
- """General pass-through decorator.
-
- This provides a base that other TestResults can inherit from to
- gain basic forwarding functionality. It also takes care of
- handling the case where the target doesn't support newer methods
- or features by degrading them.
- """
-
- # XXX: Since lp:testtools r250, this is in testtools. Once it's released,
- # we should gut this and just use that.
-
- def __init__(self, decorated):
- """Create a TestResultDecorator forwarding to decorated."""
- # Make every decorator degrade gracefully.
- self.decorated = testtools.ExtendedToOriginalDecorator(decorated)
-
- def startTest(self, test):
- return self.decorated.startTest(test)
-
- def startTestRun(self):
- return self.decorated.startTestRun()
-
- def stopTest(self, test):
- return self.decorated.stopTest(test)
-
- def stopTestRun(self):
- return self.decorated.stopTestRun()
-
- def addError(self, test, err=None, details=None):
- return self.decorated.addError(test, err, details=details)
-
- def addFailure(self, test, err=None, details=None):
- return self.decorated.addFailure(test, err, details=details)
-
- def addSuccess(self, test, details=None):
- return self.decorated.addSuccess(test, details=details)
-
- def addSkip(self, test, reason=None, details=None):
- return self.decorated.addSkip(test, reason, details=details)
-
- def addExpectedFailure(self, test, err=None, details=None):
- return self.decorated.addExpectedFailure(test, err, details=details)
-
- def addUnexpectedSuccess(self, test, details=None):
- return self.decorated.addUnexpectedSuccess(test, details=details)
-
- def _get_failfast(self):
- return getattr(self.decorated, 'failfast', False)
-
- def _set_failfast(self, value):
- self.decorated.failfast = value
- failfast = property(_get_failfast, _set_failfast)
-
- def progress(self, offset, whence):
- return self.decorated.progress(offset, whence)
-
- def wasSuccessful(self):
- return self.decorated.wasSuccessful()
-
- @property
- def shouldStop(self):
- return self.decorated.shouldStop
-
- def stop(self):
- return self.decorated.stop()
-
- @property
- def testsRun(self):
- return self.decorated.testsRun
-
- def tags(self, new_tags, gone_tags):
- return self.decorated.tags(new_tags, gone_tags)
-
- def time(self, a_datetime):
- return self.decorated.time(a_datetime)
-
-
-class HookedTestResultDecorator(TestResultDecorator):
- """A TestResult which calls a hook on every event."""
-
- def __init__(self, decorated):
- self.super = super(HookedTestResultDecorator, self)
- self.super.__init__(decorated)
-
- def startTest(self, test):
- self._before_event()
- return self.super.startTest(test)
-
- def startTestRun(self):
- self._before_event()
- return self.super.startTestRun()
-
- def stopTest(self, test):
- self._before_event()
- return self.super.stopTest(test)
-
- def stopTestRun(self):
- self._before_event()
- return self.super.stopTestRun()
-
- def addError(self, test, err=None, details=None):
- self._before_event()
- return self.super.addError(test, err, details=details)
-
- def addFailure(self, test, err=None, details=None):
- self._before_event()
- return self.super.addFailure(test, err, details=details)
-
- def addSuccess(self, test, details=None):
- self._before_event()
- return self.super.addSuccess(test, details=details)
-
- def addSkip(self, test, reason=None, details=None):
- self._before_event()
- return self.super.addSkip(test, reason, details=details)
-
- def addExpectedFailure(self, test, err=None, details=None):
- self._before_event()
- return self.super.addExpectedFailure(test, err, details=details)
-
- def addUnexpectedSuccess(self, test, details=None):
- self._before_event()
- return self.super.addUnexpectedSuccess(test, details=details)
-
- def progress(self, offset, whence):
- self._before_event()
- return self.super.progress(offset, whence)
-
- def wasSuccessful(self):
- self._before_event()
- return self.super.wasSuccessful()
-
- @property
- def shouldStop(self):
- self._before_event()
- return self.super.shouldStop
-
- def stop(self):
- self._before_event()
- return self.super.stop()
-
- def time(self, a_datetime):
- self._before_event()
- return self.super.time(a_datetime)
-
-
-class AutoTimingTestResultDecorator(HookedTestResultDecorator):
- """Decorate a TestResult to add time events to a test run.
-
- By default this will cause a time event before every test event,
- but if explicit time data is being provided by the test run, then
- this decorator will turn itself off to prevent causing confusion.
- """
-
- def __init__(self, decorated):
- self._time = None
- super(AutoTimingTestResultDecorator, self).__init__(decorated)
-
- def _before_event(self):
- time = self._time
- if time is not None:
- return
- time = datetime.datetime.utcnow().replace(tzinfo=iso8601.Utc())
- self.decorated.time(time)
-
- def progress(self, offset, whence):
- return self.decorated.progress(offset, whence)
-
- @property
- def shouldStop(self):
- return self.decorated.shouldStop
-
- def time(self, a_datetime):
- """Provide a timestamp for the current test activity.
-
- :param a_datetime: If None, automatically add timestamps before every
- event (this is the default behaviour if time() is not called at
- all). If not None, pass the provided time onto the decorated
- result object and disable automatic timestamps.
- """
- self._time = a_datetime
- return self.decorated.time(a_datetime)
-
-
-class TagsMixin(object):
-
- def __init__(self):
- self._clear_tags()
-
- def _clear_tags(self):
- self._global_tags = set(), set()
- self._test_tags = None
-
- def _get_active_tags(self):
- global_new, global_gone = self._global_tags
- if self._test_tags is None:
- return set(global_new)
- test_new, test_gone = self._test_tags
- return global_new.difference(test_gone).union(test_new)
-
- def _get_current_scope(self):
- if self._test_tags:
- return self._test_tags
- return self._global_tags
-
- def _flush_current_scope(self, tag_receiver):
- new_tags, gone_tags = self._get_current_scope()
- if new_tags or gone_tags:
- tag_receiver.tags(new_tags, gone_tags)
- if self._test_tags:
- self._test_tags = set(), set()
- else:
- self._global_tags = set(), set()
-
- def startTestRun(self):
- self._clear_tags()
-
- def startTest(self, test):
- self._test_tags = set(), set()
-
- def stopTest(self, test):
- self._test_tags = None
-
- def tags(self, new_tags, gone_tags):
- """Handle tag instructions.
-
- Adds and removes tags as appropriate. If a test is currently running,
- tags are not affected for subsequent tests.
-
- :param new_tags: Tags to add,
- :param gone_tags: Tags to remove.
- """
- current_new_tags, current_gone_tags = self._get_current_scope()
- current_new_tags.update(new_tags)
- current_new_tags.difference_update(gone_tags)
- current_gone_tags.update(gone_tags)
- current_gone_tags.difference_update(new_tags)
-
-
-class TagCollapsingDecorator(HookedTestResultDecorator, TagsMixin):
- """Collapses many 'tags' calls into one where possible."""
-
- def __init__(self, result):
- super(TagCollapsingDecorator, self).__init__(result)
- self._clear_tags()
-
- def _before_event(self):
- self._flush_current_scope(self.decorated)
-
- def tags(self, new_tags, gone_tags):
- TagsMixin.tags(self, new_tags, gone_tags)
-
-
-class TimeCollapsingDecorator(HookedTestResultDecorator):
- """Only pass on the first and last of a consecutive sequence of times."""
-
- def __init__(self, decorated):
- super(TimeCollapsingDecorator, self).__init__(decorated)
- self._last_received_time = None
- self._last_sent_time = None
-
- def _before_event(self):
- if self._last_received_time is None:
- return
- if self._last_received_time != self._last_sent_time:
- self.decorated.time(self._last_received_time)
- self._last_sent_time = self._last_received_time
- self._last_received_time = None
-
- def time(self, a_time):
- # Don't upcall, because we don't want to call _before_event, it's only
- # for non-time events.
- if self._last_received_time is None:
- self.decorated.time(a_time)
- self._last_sent_time = a_time
- self._last_received_time = a_time
-
-
-def and_predicates(predicates):
- """Return a predicate that is true iff all predicates are true."""
- # XXX: Should probably be in testtools to be better used by matchers. jml
- return lambda *args, **kwargs: all(p(*args, **kwargs) for p in predicates)
-
-
-def make_tag_filter(with_tags, without_tags):
- """Make a callback that checks tests against tags."""
-
- with_tags = with_tags and set(with_tags) or None
- without_tags = without_tags and set(without_tags) or None
-
- def check_tags(test, outcome, err, details, tags):
- if with_tags and not with_tags <= tags:
- return False
- if without_tags and bool(without_tags & tags):
- return False
- return True
-
- return check_tags
-
-
-class _PredicateFilter(TestResultDecorator, TagsMixin):
-
- def __init__(self, result, predicate):
- super(_PredicateFilter, self).__init__(result)
- self._clear_tags()
- self.decorated = TimeCollapsingDecorator(
- TagCollapsingDecorator(self.decorated))
- self._predicate = predicate
- # The current test (for filtering tags)
- self._current_test = None
- # Has the current test been filtered (for outputting test tags)
- self._current_test_filtered = None
- # Calls to this result that we don't know whether to forward on yet.
- self._buffered_calls = []
-
- def filter_predicate(self, test, outcome, error, details):
- return self._predicate(
- test, outcome, error, details, self._get_active_tags())
-
- def addError(self, test, err=None, details=None):
- if (self.filter_predicate(test, 'error', err, details)):
- self._buffered_calls.append(
- ('addError', [test, err], {'details': details}))
- else:
- self._filtered()
-
- def addFailure(self, test, err=None, details=None):
- if (self.filter_predicate(test, 'failure', err, details)):
- self._buffered_calls.append(
- ('addFailure', [test, err], {'details': details}))
- else:
- self._filtered()
-
- def addSkip(self, test, reason=None, details=None):
- if (self.filter_predicate(test, 'skip', reason, details)):
- self._buffered_calls.append(
- ('addSkip', [test, reason], {'details': details}))
- else:
- self._filtered()
-
- def addExpectedFailure(self, test, err=None, details=None):
- if self.filter_predicate(test, 'expectedfailure', err, details):
- self._buffered_calls.append(
- ('addExpectedFailure', [test, err], {'details': details}))
- else:
- self._filtered()
-
- def addUnexpectedSuccess(self, test, details=None):
- self._buffered_calls.append(
- ('addUnexpectedSuccess', [test], {'details': details}))
-
- def addSuccess(self, test, details=None):
- if (self.filter_predicate(test, 'success', None, details)):
- self._buffered_calls.append(
- ('addSuccess', [test], {'details': details}))
- else:
- self._filtered()
-
- def _filtered(self):
- self._current_test_filtered = True
-
- def startTest(self, test):
- """Start a test.
-
- Not directly passed to the client, but used for handling of tags
- correctly.
- """
- TagsMixin.startTest(self, test)
- self._current_test = test
- self._current_test_filtered = False
- self._buffered_calls.append(('startTest', [test], {}))
-
- def stopTest(self, test):
- """Stop a test.
-
- Not directly passed to the client, but used for handling of tags
- correctly.
- """
- if not self._current_test_filtered:
- for method, args, kwargs in self._buffered_calls:
- getattr(self.decorated, method)(*args, **kwargs)
- self.decorated.stopTest(test)
- self._current_test = None
- self._current_test_filtered = None
- self._buffered_calls = []
- TagsMixin.stopTest(self, test)
-
- def tags(self, new_tags, gone_tags):
- TagsMixin.tags(self, new_tags, gone_tags)
- if self._current_test is not None:
- self._buffered_calls.append(('tags', [new_tags, gone_tags], {}))
- else:
- return super(_PredicateFilter, self).tags(new_tags, gone_tags)
-
- def time(self, a_time):
- return self.decorated.time(a_time)
-
- def id_to_orig_id(self, id):
- if id.startswith("subunit.RemotedTestCase."):
- return id[len("subunit.RemotedTestCase."):]
- return id
-
-
-class TestResultFilter(TestResultDecorator):
- """A pyunit TestResult interface implementation which filters tests.
-
- Tests that pass the filter are handed on to another TestResult instance
- for further processing/reporting. To obtain the filtered results,
- the other instance must be interrogated.
-
- :ivar result: The result that tests are passed to after filtering.
- :ivar filter_predicate: The callback run to decide whether to pass
- a result.
- """
-
- def __init__(self, result, filter_error=False, filter_failure=False,
- filter_success=True, filter_skip=False, filter_xfail=False,
- filter_predicate=None, fixup_expected_failures=None):
- """Create a FilterResult object filtering to result.
-
- :param filter_error: Filter out errors.
- :param filter_failure: Filter out failures.
- :param filter_success: Filter out successful tests.
- :param filter_skip: Filter out skipped tests.
- :param filter_xfail: Filter out expected failure tests.
- :param filter_predicate: A callable taking (test, outcome, err,
- details, tags) and returning True if the result should be passed
- through. err and details may be none if no error or extra
- metadata is available. outcome is the name of the outcome such
- as 'success' or 'failure'. tags is new in 0.0.8; 0.0.7 filters
- are still supported but should be updated to accept the tags
- parameter for efficiency.
- :param fixup_expected_failures: Set of test ids to consider known
- failing.
- """
- predicates = []
- if filter_error:
- predicates.append(
- lambda t, outcome, e, d, tags: outcome != 'error')
- if filter_failure:
- predicates.append(
- lambda t, outcome, e, d, tags: outcome != 'failure')
- if filter_success:
- predicates.append(
- lambda t, outcome, e, d, tags: outcome != 'success')
- if filter_skip:
- predicates.append(
- lambda t, outcome, e, d, tags: outcome != 'skip')
- if filter_xfail:
- predicates.append(
- lambda t, outcome, e, d, tags: outcome != 'expectedfailure')
- if filter_predicate is not None:
- def compat(test, outcome, error, details, tags):
- # 0.0.7 and earlier did not support the 'tags' parameter.
- try:
- return filter_predicate(
- test, outcome, error, details, tags)
- except TypeError:
- return filter_predicate(test, outcome, error, details)
- predicates.append(compat)
- predicate = and_predicates(predicates)
- super(TestResultFilter, self).__init__(
- _PredicateFilter(result, predicate))
- if fixup_expected_failures is None:
- self._fixup_expected_failures = frozenset()
- else:
- self._fixup_expected_failures = fixup_expected_failures
-
- def addError(self, test, err=None, details=None):
- if self._failure_expected(test):
- self.addExpectedFailure(test, err=err, details=details)
- else:
- super(TestResultFilter, self).addError(
- test, err=err, details=details)
-
- def addFailure(self, test, err=None, details=None):
- if self._failure_expected(test):
- self.addExpectedFailure(test, err=err, details=details)
- else:
- super(TestResultFilter, self).addFailure(
- test, err=err, details=details)
-
- def addSuccess(self, test, details=None):
- if self._failure_expected(test):
- self.addUnexpectedSuccess(test, details=details)
- else:
- super(TestResultFilter, self).addSuccess(test, details=details)
-
- def _failure_expected(self, test):
- return (test.id() in self._fixup_expected_failures)
-
-
-class TestIdPrintingResult(testtools.TestResult):
-
- def __init__(self, stream, show_times=False):
- """Create a FilterResult object outputting to stream."""
- super(TestIdPrintingResult, self).__init__()
- self._stream = stream
- self.failed_tests = 0
- self.__time = None
- self.show_times = show_times
- self._test = None
- self._test_duration = 0
-
- def addError(self, test, err):
- self.failed_tests += 1
- self._test = test
-
- def addFailure(self, test, err):
- self.failed_tests += 1
- self._test = test
-
- def addSuccess(self, test):
- self._test = test
-
- def addSkip(self, test, reason=None, details=None):
- self._test = test
-
- def addUnexpectedSuccess(self, test, details=None):
- self.failed_tests += 1
- self._test = test
-
- def addExpectedFailure(self, test, err=None, details=None):
- self._test = test
-
- def reportTest(self, test, duration):
- if self.show_times:
- seconds = duration.seconds
- seconds += duration.days * 3600 * 24
- seconds += duration.microseconds / 1000000.0
- self._stream.write(test.id() + ' %0.3f\n' % seconds)
- else:
- self._stream.write(test.id() + '\n')
-
- def startTest(self, test):
- self._start_time = self._time()
-
- def stopTest(self, test):
- test_duration = self._time() - self._start_time
- self.reportTest(self._test, test_duration)
-
- def time(self, time):
- self.__time = time
-
- def _time(self):
- return self.__time
-
- def wasSuccessful(self):
- "Tells whether or not this result was a success"
- return self.failed_tests == 0
-
-
-class TestByTestResult(testtools.TestResult):
- """Call something every time a test completes."""
-
-# XXX: In testtools since lp:testtools r249. Once that's released, just
-# import that.
-
- def __init__(self, on_test):
- """Construct a ``TestByTestResult``.
-
- :param on_test: A callable that take a test case, a status (one of
- "success", "failure", "error", "skip", or "xfail"), a start time
- (a ``datetime`` with timezone), a stop time, an iterable of tags,
- and a details dict. Is called at the end of each test (i.e. on
- ``stopTest``) with the accumulated values for that test.
- """
- super(TestByTestResult, self).__init__()
- self._on_test = on_test
-
- def startTest(self, test):
- super(TestByTestResult, self).startTest(test)
- self._start_time = self._now()
- # There's no supported (i.e. tested) behaviour that relies on these
- # being set, but it makes me more comfortable all the same. -- jml
- self._status = None
- self._details = None
- self._stop_time = None
-
- def stopTest(self, test):
- self._stop_time = self._now()
- super(TestByTestResult, self).stopTest(test)
- self._on_test(
- test=test,
- status=self._status,
- start_time=self._start_time,
- stop_time=self._stop_time,
- # current_tags is new in testtools 0.9.13.
- tags=getattr(self, 'current_tags', None),
- details=self._details)
-
- def _err_to_details(self, test, err, details):
- if details:
- return details
- return {'traceback': TracebackContent(err, test)}
-
- def addSuccess(self, test, details=None):
- super(TestByTestResult, self).addSuccess(test)
- self._status = 'success'
- self._details = details
-
- def addFailure(self, test, err=None, details=None):
- super(TestByTestResult, self).addFailure(test, err, details)
- self._status = 'failure'
- self._details = self._err_to_details(test, err, details)
-
- def addError(self, test, err=None, details=None):
- super(TestByTestResult, self).addError(test, err, details)
- self._status = 'error'
- self._details = self._err_to_details(test, err, details)
-
- def addSkip(self, test, reason=None, details=None):
- super(TestByTestResult, self).addSkip(test, reason, details)
- self._status = 'skip'
- if details is None:
- details = {'reason': text_content(reason)}
- elif reason:
- # XXX: What if details already has 'reason' key?
- details['reason'] = text_content(reason)
- self._details = details
-
- def addExpectedFailure(self, test, err=None, details=None):
- super(TestByTestResult, self).addExpectedFailure(test, err, details)
- self._status = 'xfail'
- self._details = self._err_to_details(test, err, details)
-
- def addUnexpectedSuccess(self, test, details=None):
- super(TestByTestResult, self).addUnexpectedSuccess(test, details)
- self._status = 'success'
- self._details = details
-
-
-class CsvResult(TestByTestResult):
-
- def __init__(self, stream):
- super(CsvResult, self).__init__(self._on_test)
- self._write_row = csv.writer(stream).writerow
-
- def _on_test(self, test, status, start_time, stop_time, tags, details):
- self._write_row([test.id(), status, start_time, stop_time])
-
- def startTestRun(self):
- super(CsvResult, self).startTestRun()
- self._write_row(['test', 'status', 'start_time', 'stop_time'])
diff --git a/lib/subunit/python/subunit/tests/TestUtil.py b/lib/subunit/python/subunit/tests/TestUtil.py
deleted file mode 100644
index 39d901e0a9..0000000000
--- a/lib/subunit/python/subunit/tests/TestUtil.py
+++ /dev/null
@@ -1,80 +0,0 @@
-# Copyright (c) 2004 Canonical Limited
-# Author: Robert Collins <robert.collins@canonical.com>
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-#
-
-import sys
-import logging
-import unittest
-
-
-class LogCollector(logging.Handler):
- def __init__(self):
- logging.Handler.__init__(self)
- self.records=[]
- def emit(self, record):
- self.records.append(record.getMessage())
-
-
-def makeCollectingLogger():
- """I make a logger instance that collects its logs for programmatic analysis
- -> (logger, collector)"""
- logger=logging.Logger("collector")
- handler=LogCollector()
- handler.setFormatter(logging.Formatter("%(levelname)s: %(message)s"))
- logger.addHandler(handler)
- return logger, handler
-
-
-def visitTests(suite, visitor):
- """A foreign method for visiting the tests in a test suite."""
- for test in suite._tests:
- #Abusing types to avoid monkey patching unittest.TestCase.
- # Maybe that would be better?
- try:
- test.visit(visitor)
- except AttributeError:
- if isinstance(test, unittest.TestCase):
- visitor.visitCase(test)
- elif isinstance(test, unittest.TestSuite):
- visitor.visitSuite(test)
- visitTests(test, visitor)
- else:
- print ("unvisitable non-unittest.TestCase element %r (%r)" % (test, test.__class__))
-
-
-class TestSuite(unittest.TestSuite):
- """I am an extended TestSuite with a visitor interface.
- This is primarily to allow filtering of tests - and suites or
- more in the future. An iterator of just tests wouldn't scale..."""
-
- def visit(self, visitor):
- """visit the composite. Visiting is depth-first.
- current callbacks are visitSuite and visitCase."""
- visitor.visitSuite(self)
- visitTests(self, visitor)
-
-
-class TestLoader(unittest.TestLoader):
- """Custome TestLoader to set the right TestSuite class."""
- suiteClass = TestSuite
-
-class TestVisitor(object):
- """A visitor for Tests"""
- def visitSuite(self, aTestSuite):
- pass
- def visitCase(self, aTestCase):
- pass
diff --git a/lib/subunit/python/subunit/tests/__init__.py b/lib/subunit/python/subunit/tests/__init__.py
deleted file mode 100644
index e0e1eb1b04..0000000000
--- a/lib/subunit/python/subunit/tests/__init__.py
+++ /dev/null
@@ -1,43 +0,0 @@
-#
-# subunit: extensions to python unittest to get test results from subprocesses.
-# Copyright (C) 2005 Robert Collins <robertc@robertcollins.net>
-#
-# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-# license at the users choice. A copy of both licenses are available in the
-# project source as Apache-2.0 and BSD. You may not use this file except in
-# compliance with one of these two licences.
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# license you chose for the specific language governing permissions and
-# limitations under that license.
-#
-
-from subunit.tests import (
- TestUtil,
- test_chunked,
- test_details,
- test_progress_model,
- test_run,
- test_subunit_filter,
- test_subunit_stats,
- test_subunit_tags,
- test_tap2subunit,
- test_test_protocol,
- test_test_results,
- )
-
-def test_suite():
- result = TestUtil.TestSuite()
- result.addTest(test_chunked.test_suite())
- result.addTest(test_details.test_suite())
- result.addTest(test_progress_model.test_suite())
- result.addTest(test_test_results.test_suite())
- result.addTest(test_test_protocol.test_suite())
- result.addTest(test_tap2subunit.test_suite())
- result.addTest(test_subunit_filter.test_suite())
- result.addTest(test_subunit_tags.test_suite())
- result.addTest(test_subunit_stats.test_suite())
- result.addTest(test_run.test_suite())
- return result
diff --git a/lib/subunit/python/subunit/tests/sample-script.py b/lib/subunit/python/subunit/tests/sample-script.py
deleted file mode 100755
index 91838f6d6f..0000000000
--- a/lib/subunit/python/subunit/tests/sample-script.py
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/usr/bin/env python
-import sys
-if sys.platform == "win32":
- import msvcrt, os
- msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
-if len(sys.argv) == 2:
- # subunit.tests.test_test_protocol.TestExecTestCase.test_sample_method_args
- # uses this code path to be sure that the arguments were passed to
- # sample-script.py
- print("test fail")
- print("error fail")
- sys.exit(0)
-print("test old mcdonald")
-print("success old mcdonald")
-print("test bing crosby")
-print("failure bing crosby [")
-print("foo.c:53:ERROR invalid state")
-print("]")
-print("test an error")
-print("error an error")
-sys.exit(0)
diff --git a/lib/subunit/python/subunit/tests/sample-two-script.py b/lib/subunit/python/subunit/tests/sample-two-script.py
deleted file mode 100755
index fc73dfc409..0000000000
--- a/lib/subunit/python/subunit/tests/sample-two-script.py
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/usr/bin/env python
-import sys
-print("test old mcdonald")
-print("success old mcdonald")
-print("test bing crosby")
-print("success bing crosby")
-sys.exit(0)
diff --git a/lib/subunit/python/subunit/tests/test_chunked.py b/lib/subunit/python/subunit/tests/test_chunked.py
deleted file mode 100644
index e0742f1af3..0000000000
--- a/lib/subunit/python/subunit/tests/test_chunked.py
+++ /dev/null
@@ -1,152 +0,0 @@
-#
-# subunit: extensions to python unittest to get test results from subprocesses.
-# Copyright (C) 2005 Robert Collins <robertc@robertcollins.net>
-# Copyright (C) 2011 Martin Pool <mbp@sourcefrog.net>
-#
-# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-# license at the users choice. A copy of both licenses are available in the
-# project source as Apache-2.0 and BSD. You may not use this file except in
-# compliance with one of these two licences.
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# license you chose for the specific language governing permissions and
-# limitations under that license.
-#
-
-import unittest
-
-from testtools.compat import _b, BytesIO
-
-import subunit.chunked
-
-
-def test_suite():
- loader = subunit.tests.TestUtil.TestLoader()
- result = loader.loadTestsFromName(__name__)
- return result
-
-
-class TestDecode(unittest.TestCase):
-
- def setUp(self):
- unittest.TestCase.setUp(self)
- self.output = BytesIO()
- self.decoder = subunit.chunked.Decoder(self.output)
-
- def test_close_read_length_short_errors(self):
- self.assertRaises(ValueError, self.decoder.close)
-
- def test_close_body_short_errors(self):
- self.assertEqual(None, self.decoder.write(_b('2\r\na')))
- self.assertRaises(ValueError, self.decoder.close)
-
- def test_close_body_buffered_data_errors(self):
- self.assertEqual(None, self.decoder.write(_b('2\r')))
- self.assertRaises(ValueError, self.decoder.close)
-
- def test_close_after_finished_stream_safe(self):
- self.assertEqual(None, self.decoder.write(_b('2\r\nab')))
- self.assertEqual(_b(''), self.decoder.write(_b('0\r\n')))
- self.decoder.close()
-
- def test_decode_nothing(self):
- self.assertEqual(_b(''), self.decoder.write(_b('0\r\n')))
- self.assertEqual(_b(''), self.output.getvalue())
-
- def test_decode_serialised_form(self):
- self.assertEqual(None, self.decoder.write(_b("F\r\n")))
- self.assertEqual(None, self.decoder.write(_b("serialised\n")))
- self.assertEqual(_b(''), self.decoder.write(_b("form0\r\n")))
-
- def test_decode_short(self):
- self.assertEqual(_b(''), self.decoder.write(_b('3\r\nabc0\r\n')))
- self.assertEqual(_b('abc'), self.output.getvalue())
-
- def test_decode_combines_short(self):
- self.assertEqual(_b(''), self.decoder.write(_b('6\r\nabcdef0\r\n')))
- self.assertEqual(_b('abcdef'), self.output.getvalue())
-
- def test_decode_excess_bytes_from_write(self):
- self.assertEqual(_b('1234'), self.decoder.write(_b('3\r\nabc0\r\n1234')))
- self.assertEqual(_b('abc'), self.output.getvalue())
-
- def test_decode_write_after_finished_errors(self):
- self.assertEqual(_b('1234'), self.decoder.write(_b('3\r\nabc0\r\n1234')))
- self.assertRaises(ValueError, self.decoder.write, _b(''))
-
- def test_decode_hex(self):
- self.assertEqual(_b(''), self.decoder.write(_b('A\r\n12345678900\r\n')))
- self.assertEqual(_b('1234567890'), self.output.getvalue())
-
- def test_decode_long_ranges(self):
- self.assertEqual(None, self.decoder.write(_b('10000\r\n')))
- self.assertEqual(None, self.decoder.write(_b('1' * 65536)))
- self.assertEqual(None, self.decoder.write(_b('10000\r\n')))
- self.assertEqual(None, self.decoder.write(_b('2' * 65536)))
- self.assertEqual(_b(''), self.decoder.write(_b('0\r\n')))
- self.assertEqual(_b('1' * 65536 + '2' * 65536), self.output.getvalue())
-
- def test_decode_newline_nonstrict(self):
- """Tolerate chunk markers with no CR character."""
- # From <http://pad.lv/505078>
- self.decoder = subunit.chunked.Decoder(self.output, strict=False)
- self.assertEqual(None, self.decoder.write(_b('a\n')))
- self.assertEqual(None, self.decoder.write(_b('abcdeabcde')))
- self.assertEqual(_b(''), self.decoder.write(_b('0\n')))
- self.assertEqual(_b('abcdeabcde'), self.output.getvalue())
-
- def test_decode_strict_newline_only(self):
- """Reject chunk markers with no CR character in strict mode."""
- # From <http://pad.lv/505078>
- self.assertRaises(ValueError,
- self.decoder.write, _b('a\n'))
-
- def test_decode_strict_multiple_crs(self):
- self.assertRaises(ValueError,
- self.decoder.write, _b('a\r\r\n'))
-
- def test_decode_short_header(self):
- self.assertRaises(ValueError,
- self.decoder.write, _b('\n'))
-
-
-class TestEncode(unittest.TestCase):
-
- def setUp(self):
- unittest.TestCase.setUp(self)
- self.output = BytesIO()
- self.encoder = subunit.chunked.Encoder(self.output)
-
- def test_encode_nothing(self):
- self.encoder.close()
- self.assertEqual(_b('0\r\n'), self.output.getvalue())
-
- def test_encode_empty(self):
- self.encoder.write(_b(''))
- self.encoder.close()
- self.assertEqual(_b('0\r\n'), self.output.getvalue())
-
- def test_encode_short(self):
- self.encoder.write(_b('abc'))
- self.encoder.close()
- self.assertEqual(_b('3\r\nabc0\r\n'), self.output.getvalue())
-
- def test_encode_combines_short(self):
- self.encoder.write(_b('abc'))
- self.encoder.write(_b('def'))
- self.encoder.close()
- self.assertEqual(_b('6\r\nabcdef0\r\n'), self.output.getvalue())
-
- def test_encode_over_9_is_in_hex(self):
- self.encoder.write(_b('1234567890'))
- self.encoder.close()
- self.assertEqual(_b('A\r\n12345678900\r\n'), self.output.getvalue())
-
- def test_encode_long_ranges_not_combined(self):
- self.encoder.write(_b('1' * 65536))
- self.encoder.write(_b('2' * 65536))
- self.encoder.close()
- self.assertEqual(_b('10000\r\n' + '1' * 65536 + '10000\r\n' +
- '2' * 65536 + '0\r\n'), self.output.getvalue())
diff --git a/lib/subunit/python/subunit/tests/test_details.py b/lib/subunit/python/subunit/tests/test_details.py
deleted file mode 100644
index 746aa041e5..0000000000
--- a/lib/subunit/python/subunit/tests/test_details.py
+++ /dev/null
@@ -1,112 +0,0 @@
-#
-# subunit: extensions to python unittest to get test results from subprocesses.
-# Copyright (C) 2005 Robert Collins <robertc@robertcollins.net>
-#
-# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-# license at the users choice. A copy of both licenses are available in the
-# project source as Apache-2.0 and BSD. You may not use this file except in
-# compliance with one of these two licences.
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# license you chose for the specific language governing permissions and
-# limitations under that license.
-#
-
-import unittest
-
-from testtools.compat import _b, StringIO
-
-import subunit.tests
-from subunit import content, content_type, details
-
-
-def test_suite():
- loader = subunit.tests.TestUtil.TestLoader()
- result = loader.loadTestsFromName(__name__)
- return result
-
-
-class TestSimpleDetails(unittest.TestCase):
-
- def test_lineReceived(self):
- parser = details.SimpleDetailsParser(None)
- parser.lineReceived(_b("foo\n"))
- parser.lineReceived(_b("bar\n"))
- self.assertEqual(_b("foo\nbar\n"), parser._message)
-
- def test_lineReceived_escaped_bracket(self):
- parser = details.SimpleDetailsParser(None)
- parser.lineReceived(_b("foo\n"))
- parser.lineReceived(_b(" ]are\n"))
- parser.lineReceived(_b("bar\n"))
- self.assertEqual(_b("foo\n]are\nbar\n"), parser._message)
-
- def test_get_message(self):
- parser = details.SimpleDetailsParser(None)
- self.assertEqual(_b(""), parser.get_message())
-
- def test_get_details(self):
- parser = details.SimpleDetailsParser(None)
- traceback = ""
- expected = {}
- expected['traceback'] = content.Content(
- content_type.ContentType("text", "x-traceback",
- {'charset': 'utf8'}),
- lambda:[_b("")])
- found = parser.get_details()
- self.assertEqual(expected.keys(), found.keys())
- self.assertEqual(expected['traceback'].content_type,
- found['traceback'].content_type)
- self.assertEqual(_b('').join(expected['traceback'].iter_bytes()),
- _b('').join(found['traceback'].iter_bytes()))
-
- def test_get_details_skip(self):
- parser = details.SimpleDetailsParser(None)
- traceback = ""
- expected = {}
- expected['reason'] = content.Content(
- content_type.ContentType("text", "plain"),
- lambda:[_b("")])
- found = parser.get_details("skip")
- self.assertEqual(expected, found)
-
- def test_get_details_success(self):
- parser = details.SimpleDetailsParser(None)
- traceback = ""
- expected = {}
- expected['message'] = content.Content(
- content_type.ContentType("text", "plain"),
- lambda:[_b("")])
- found = parser.get_details("success")
- self.assertEqual(expected, found)
-
-
-class TestMultipartDetails(unittest.TestCase):
-
- def test_get_message_is_None(self):
- parser = details.MultipartDetailsParser(None)
- self.assertEqual(None, parser.get_message())
-
- def test_get_details(self):
- parser = details.MultipartDetailsParser(None)
- self.assertEqual({}, parser.get_details())
-
- def test_parts(self):
- parser = details.MultipartDetailsParser(None)
- parser.lineReceived(_b("Content-Type: text/plain\n"))
- parser.lineReceived(_b("something\n"))
- parser.lineReceived(_b("F\r\n"))
- parser.lineReceived(_b("serialised\n"))
- parser.lineReceived(_b("form0\r\n"))
- expected = {}
- expected['something'] = content.Content(
- content_type.ContentType("text", "plain"),
- lambda:[_b("serialised\nform")])
- found = parser.get_details()
- self.assertEqual(expected.keys(), found.keys())
- self.assertEqual(expected['something'].content_type,
- found['something'].content_type)
- self.assertEqual(_b('').join(expected['something'].iter_bytes()),
- _b('').join(found['something'].iter_bytes()))
diff --git a/lib/subunit/python/subunit/tests/test_progress_model.py b/lib/subunit/python/subunit/tests/test_progress_model.py
deleted file mode 100644
index 76200c6107..0000000000
--- a/lib/subunit/python/subunit/tests/test_progress_model.py
+++ /dev/null
@@ -1,118 +0,0 @@
-#
-# subunit: extensions to Python unittest to get test results from subprocesses.
-# Copyright (C) 2009 Robert Collins <robertc@robertcollins.net>
-#
-# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-# license at the users choice. A copy of both licenses are available in the
-# project source as Apache-2.0 and BSD. You may not use this file except in
-# compliance with one of these two licences.
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# license you chose for the specific language governing permissions and
-# limitations under that license.
-#
-
-import unittest
-
-import subunit
-from subunit.progress_model import ProgressModel
-
-
-class TestProgressModel(unittest.TestCase):
-
- def assertProgressSummary(self, pos, total, progress):
- """Assert that a progress model has reached a particular point."""
- self.assertEqual(pos, progress.pos())
- self.assertEqual(total, progress.width())
-
- def test_new_progress_0_0(self):
- progress = ProgressModel()
- self.assertProgressSummary(0, 0, progress)
-
- def test_advance_0_0(self):
- progress = ProgressModel()
- progress.advance()
- self.assertProgressSummary(1, 0, progress)
-
- def test_advance_1_0(self):
- progress = ProgressModel()
- progress.advance()
- self.assertProgressSummary(1, 0, progress)
-
- def test_set_width_absolute(self):
- progress = ProgressModel()
- progress.set_width(10)
- self.assertProgressSummary(0, 10, progress)
-
- def test_set_width_absolute_preserves_pos(self):
- progress = ProgressModel()
- progress.advance()
- progress.set_width(2)
- self.assertProgressSummary(1, 2, progress)
-
- def test_adjust_width(self):
- progress = ProgressModel()
- progress.adjust_width(10)
- self.assertProgressSummary(0, 10, progress)
- progress.adjust_width(-10)
- self.assertProgressSummary(0, 0, progress)
-
- def test_adjust_width_preserves_pos(self):
- progress = ProgressModel()
- progress.advance()
- progress.adjust_width(10)
- self.assertProgressSummary(1, 10, progress)
- progress.adjust_width(-10)
- self.assertProgressSummary(1, 0, progress)
-
- def test_push_preserves_progress(self):
- progress = ProgressModel()
- progress.adjust_width(3)
- progress.advance()
- progress.push()
- self.assertProgressSummary(1, 3, progress)
-
- def test_advance_advances_substack(self):
- progress = ProgressModel()
- progress.adjust_width(3)
- progress.advance()
- progress.push()
- progress.adjust_width(1)
- progress.advance()
- self.assertProgressSummary(2, 3, progress)
-
- def test_adjust_width_adjusts_substack(self):
- progress = ProgressModel()
- progress.adjust_width(3)
- progress.advance()
- progress.push()
- progress.adjust_width(2)
- progress.advance()
- self.assertProgressSummary(3, 6, progress)
-
- def test_set_width_adjusts_substack(self):
- progress = ProgressModel()
- progress.adjust_width(3)
- progress.advance()
- progress.push()
- progress.set_width(2)
- progress.advance()
- self.assertProgressSummary(3, 6, progress)
-
- def test_pop_restores_progress(self):
- progress = ProgressModel()
- progress.adjust_width(3)
- progress.advance()
- progress.push()
- progress.adjust_width(1)
- progress.advance()
- progress.pop()
- self.assertProgressSummary(1, 3, progress)
-
-
-def test_suite():
- loader = subunit.tests.TestUtil.TestLoader()
- result = loader.loadTestsFromName(__name__)
- return result
diff --git a/lib/subunit/python/subunit/tests/test_run.py b/lib/subunit/python/subunit/tests/test_run.py
deleted file mode 100644
index 10519ed086..0000000000
--- a/lib/subunit/python/subunit/tests/test_run.py
+++ /dev/null
@@ -1,52 +0,0 @@
-#
-# subunit: extensions to python unittest to get test results from subprocesses.
-# Copyright (C) 2011 Robert Collins <robertc@robertcollins.net>
-#
-# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-# license at the users choice. A copy of both licenses are available in the
-# project source as Apache-2.0 and BSD. You may not use this file except in
-# compliance with one of these two licences.
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# license you chose for the specific language governing permissions and
-# limitations under that license.
-#
-
-from testtools.compat import BytesIO
-import unittest
-
-from testtools import PlaceHolder
-
-import subunit
-from subunit.run import SubunitTestRunner
-
-
-def test_suite():
- loader = subunit.tests.TestUtil.TestLoader()
- result = loader.loadTestsFromName(__name__)
- return result
-
-
-class TimeCollectingTestResult(unittest.TestResult):
-
- def __init__(self, *args, **kwargs):
- super(TimeCollectingTestResult, self).__init__(*args, **kwargs)
- self.time_called = []
-
- def time(self, a_time):
- self.time_called.append(a_time)
-
-
-class TestSubunitTestRunner(unittest.TestCase):
-
- def test_includes_timing_output(self):
- io = BytesIO()
- runner = SubunitTestRunner(stream=io)
- test = PlaceHolder('name')
- runner.run(test)
- client = TimeCollectingTestResult()
- io.seek(0)
- subunit.TestProtocolServer(client).readFrom(io)
- self.assertTrue(len(client.time_called) > 0)
diff --git a/lib/subunit/python/subunit/tests/test_subunit_filter.py b/lib/subunit/python/subunit/tests/test_subunit_filter.py
deleted file mode 100644
index 33b924824d..0000000000
--- a/lib/subunit/python/subunit/tests/test_subunit_filter.py
+++ /dev/null
@@ -1,370 +0,0 @@
-#
-# subunit: extensions to python unittest to get test results from subprocesses.
-# Copyright (C) 2005 Robert Collins <robertc@robertcollins.net>
-#
-# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-# license at the users choice. A copy of both licenses are available in the
-# project source as Apache-2.0 and BSD. You may not use this file except in
-# compliance with one of these two licences.
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# license you chose for the specific language governing permissions and
-# limitations under that license.
-#
-
-"""Tests for subunit.TestResultFilter."""
-
-from datetime import datetime
-import os
-import subprocess
-import sys
-from subunit import iso8601
-import unittest
-
-from testtools import TestCase
-from testtools.compat import _b, BytesIO
-from testtools.testresult.doubles import ExtendedTestResult
-
-import subunit
-from subunit.test_results import make_tag_filter, TestResultFilter
-
-
-class TestTestResultFilter(TestCase):
- """Test for TestResultFilter, a TestResult object which filters tests."""
-
- # While TestResultFilter works on python objects, using a subunit stream
- # is an easy pithy way of getting a series of test objects to call into
- # the TestResult, and as TestResultFilter is intended for use with subunit
- # also has the benefit of detecting any interface skew issues.
- example_subunit_stream = _b("""\
-tags: global
-test passed
-success passed
-test failed
-tags: local
-failure failed
-test error
-error error [
-error details
-]
-test skipped
-skip skipped
-test todo
-xfail todo
-""")
-
- def run_tests(self, result_filter, input_stream=None):
- """Run tests through the given filter.
-
- :param result_filter: A filtering TestResult object.
- :param input_stream: Bytes of subunit stream data. If not provided,
- uses TestTestResultFilter.example_subunit_stream.
- """
- if input_stream is None:
- input_stream = self.example_subunit_stream
- test = subunit.ProtocolTestCase(BytesIO(input_stream))
- test.run(result_filter)
-
- def test_default(self):
- """The default is to exclude success and include everything else."""
- filtered_result = unittest.TestResult()
- result_filter = TestResultFilter(filtered_result)
- self.run_tests(result_filter)
- # skips are seen as success by default python TestResult.
- self.assertEqual(['error'],
- [error[0].id() for error in filtered_result.errors])
- self.assertEqual(['failed'],
- [failure[0].id() for failure in
- filtered_result.failures])
- self.assertEqual(4, filtered_result.testsRun)
-
- def test_tag_filter(self):
- tag_filter = make_tag_filter(['global'], ['local'])
- result = ExtendedTestResult()
- result_filter = TestResultFilter(
- result, filter_success=False, filter_predicate=tag_filter)
- self.run_tests(result_filter)
- tests_included = [
- event[1] for event in result._events if event[0] == 'startTest']
- tests_expected = list(map(
- subunit.RemotedTestCase,
- ['passed', 'error', 'skipped', 'todo']))
- self.assertEquals(tests_expected, tests_included)
-
- def test_tags_tracked_correctly(self):
- tag_filter = make_tag_filter(['a'], [])
- result = ExtendedTestResult()
- result_filter = TestResultFilter(
- result, filter_success=False, filter_predicate=tag_filter)
- input_stream = _b(
- "test: foo\n"
- "tags: a\n"
- "successful: foo\n"
- "test: bar\n"
- "successful: bar\n")
- self.run_tests(result_filter, input_stream)
- foo = subunit.RemotedTestCase('foo')
- self.assertEquals(
- [('startTest', foo),
- ('tags', set(['a']), set()),
- ('addSuccess', foo),
- ('stopTest', foo),
- ],
- result._events)
-
- def test_exclude_errors(self):
- filtered_result = unittest.TestResult()
- result_filter = TestResultFilter(filtered_result, filter_error=True)
- self.run_tests(result_filter)
- # skips are seen as errors by default python TestResult.
- self.assertEqual([], filtered_result.errors)
- self.assertEqual(['failed'],
- [failure[0].id() for failure in
- filtered_result.failures])
- self.assertEqual(3, filtered_result.testsRun)
-
- def test_fixup_expected_failures(self):
- filtered_result = unittest.TestResult()
- result_filter = TestResultFilter(filtered_result,
- fixup_expected_failures=set(["failed"]))
- self.run_tests(result_filter)
- self.assertEqual(['failed', 'todo'],
- [failure[0].id() for failure in filtered_result.expectedFailures])
- self.assertEqual([], filtered_result.failures)
- self.assertEqual(4, filtered_result.testsRun)
-
- def test_fixup_expected_errors(self):
- filtered_result = unittest.TestResult()
- result_filter = TestResultFilter(filtered_result,
- fixup_expected_failures=set(["error"]))
- self.run_tests(result_filter)
- self.assertEqual(['error', 'todo'],
- [failure[0].id() for failure in filtered_result.expectedFailures])
- self.assertEqual([], filtered_result.errors)
- self.assertEqual(4, filtered_result.testsRun)
-
- def test_fixup_unexpected_success(self):
- filtered_result = unittest.TestResult()
- result_filter = TestResultFilter(filtered_result, filter_success=False,
- fixup_expected_failures=set(["passed"]))
- self.run_tests(result_filter)
- self.assertEqual(['passed'],
- [passed.id() for passed in filtered_result.unexpectedSuccesses])
- self.assertEqual(5, filtered_result.testsRun)
-
- def test_exclude_failure(self):
- filtered_result = unittest.TestResult()
- result_filter = TestResultFilter(filtered_result, filter_failure=True)
- self.run_tests(result_filter)
- self.assertEqual(['error'],
- [error[0].id() for error in filtered_result.errors])
- self.assertEqual([],
- [failure[0].id() for failure in
- filtered_result.failures])
- self.assertEqual(3, filtered_result.testsRun)
-
- def test_exclude_skips(self):
- filtered_result = subunit.TestResultStats(None)
- result_filter = TestResultFilter(filtered_result, filter_skip=True)
- self.run_tests(result_filter)
- self.assertEqual(0, filtered_result.skipped_tests)
- self.assertEqual(2, filtered_result.failed_tests)
- self.assertEqual(3, filtered_result.testsRun)
-
- def test_include_success(self):
- """Successes can be included if requested."""
- filtered_result = unittest.TestResult()
- result_filter = TestResultFilter(filtered_result,
- filter_success=False)
- self.run_tests(result_filter)
- self.assertEqual(['error'],
- [error[0].id() for error in filtered_result.errors])
- self.assertEqual(['failed'],
- [failure[0].id() for failure in
- filtered_result.failures])
- self.assertEqual(5, filtered_result.testsRun)
-
- def test_filter_predicate(self):
- """You can filter by predicate callbacks"""
- # 0.0.7 and earlier did not support the 'tags' parameter, so we need
- # to test that we still support behaviour without it.
- filtered_result = unittest.TestResult()
- def filter_cb(test, outcome, err, details):
- return outcome == 'success'
- result_filter = TestResultFilter(filtered_result,
- filter_predicate=filter_cb,
- filter_success=False)
- self.run_tests(result_filter)
- # Only success should pass
- self.assertEqual(1, filtered_result.testsRun)
-
- def test_filter_predicate_with_tags(self):
- """You can filter by predicate callbacks that accept tags"""
- filtered_result = unittest.TestResult()
- def filter_cb(test, outcome, err, details, tags):
- return outcome == 'success'
- result_filter = TestResultFilter(filtered_result,
- filter_predicate=filter_cb,
- filter_success=False)
- self.run_tests(result_filter)
- # Only success should pass
- self.assertEqual(1, filtered_result.testsRun)
-
- def test_time_ordering_preserved(self):
- # Passing a subunit stream through TestResultFilter preserves the
- # relative ordering of 'time' directives and any other subunit
- # directives that are still included.
- date_a = datetime(year=2000, month=1, day=1, tzinfo=iso8601.UTC)
- date_b = datetime(year=2000, month=1, day=2, tzinfo=iso8601.UTC)
- date_c = datetime(year=2000, month=1, day=3, tzinfo=iso8601.UTC)
- subunit_stream = _b('\n'.join([
- "time: %s",
- "test: foo",
- "time: %s",
- "error: foo",
- "time: %s",
- ""]) % (date_a, date_b, date_c))
- result = ExtendedTestResult()
- result_filter = TestResultFilter(result)
- self.run_tests(result_filter, subunit_stream)
- foo = subunit.RemotedTestCase('foo')
- self.maxDiff = None
- self.assertEqual(
- [('time', date_a),
- ('time', date_b),
- ('startTest', foo),
- ('addError', foo, {}),
- ('stopTest', foo),
- ('time', date_c)], result._events)
-
- def test_time_passes_through_filtered_tests(self):
- # Passing a subunit stream through TestResultFilter preserves 'time'
- # directives even if a specific test is filtered out.
- date_a = datetime(year=2000, month=1, day=1, tzinfo=iso8601.UTC)
- date_b = datetime(year=2000, month=1, day=2, tzinfo=iso8601.UTC)
- date_c = datetime(year=2000, month=1, day=3, tzinfo=iso8601.UTC)
- subunit_stream = _b('\n'.join([
- "time: %s",
- "test: foo",
- "time: %s",
- "success: foo",
- "time: %s",
- ""]) % (date_a, date_b, date_c))
- result = ExtendedTestResult()
- result_filter = TestResultFilter(result)
- result_filter.startTestRun()
- self.run_tests(result_filter, subunit_stream)
- result_filter.stopTestRun()
- foo = subunit.RemotedTestCase('foo')
- self.maxDiff = None
- self.assertEqual(
- [('startTestRun',),
- ('time', date_a),
- ('time', date_c),
- ('stopTestRun',),], result._events)
-
- def test_skip_preserved(self):
- subunit_stream = _b('\n'.join([
- "test: foo",
- "skip: foo",
- ""]))
- result = ExtendedTestResult()
- result_filter = TestResultFilter(result)
- self.run_tests(result_filter, subunit_stream)
- foo = subunit.RemotedTestCase('foo')
- self.assertEquals(
- [('startTest', foo),
- ('addSkip', foo, {}),
- ('stopTest', foo), ], result._events)
-
- if sys.version_info < (2, 7):
- # These tests require Python >=2.7.
- del test_fixup_expected_failures, test_fixup_expected_errors, test_fixup_unexpected_success
-
-
-class TestFilterCommand(TestCase):
-
- example_subunit_stream = _b("""\
-tags: global
-test passed
-success passed
-test failed
-tags: local
-failure failed
-test error
-error error [
-error details
-]
-test skipped
-skip skipped
-test todo
-xfail todo
-""")
-
- def run_command(self, args, stream):
- root = os.path.dirname(
- os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
- script_path = os.path.join(root, 'filters', 'subunit-filter')
- command = [sys.executable, script_path] + list(args)
- ps = subprocess.Popen(
- command, stdin=subprocess.PIPE, stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
- out, err = ps.communicate(stream)
- if ps.returncode != 0:
- raise RuntimeError("%s failed: %s" % (command, err))
- return out
-
- def to_events(self, stream):
- test = subunit.ProtocolTestCase(BytesIO(stream))
- result = ExtendedTestResult()
- test.run(result)
- return result._events
-
- def test_default(self):
- output = self.run_command([], _b(
- "test: foo\n"
- "skip: foo\n"
- ))
- events = self.to_events(output)
- foo = subunit.RemotedTestCase('foo')
- self.assertEqual(
- [('startTest', foo),
- ('addSkip', foo, {}),
- ('stopTest', foo)],
- events)
-
- def test_tags(self):
- output = self.run_command(['-s', '--with-tag', 'a'], _b(
- "tags: a\n"
- "test: foo\n"
- "success: foo\n"
- "tags: -a\n"
- "test: bar\n"
- "success: bar\n"
- "test: baz\n"
- "tags: a\n"
- "success: baz\n"
- ))
- events = self.to_events(output)
- foo = subunit.RemotedTestCase('foo')
- baz = subunit.RemotedTestCase('baz')
- self.assertEqual(
- [('tags', set(['a']), set()),
- ('startTest', foo),
- ('addSuccess', foo),
- ('stopTest', foo),
- ('tags', set(), set(['a'])),
- ('startTest', baz),
- ('tags', set(['a']), set()),
- ('addSuccess', baz),
- ('stopTest', baz),
- ],
- events)
-
-
-def test_suite():
- loader = subunit.tests.TestUtil.TestLoader()
- result = loader.loadTestsFromName(__name__)
- return result
diff --git a/lib/subunit/python/subunit/tests/test_subunit_stats.py b/lib/subunit/python/subunit/tests/test_subunit_stats.py
deleted file mode 100644
index 6fd3301060..0000000000
--- a/lib/subunit/python/subunit/tests/test_subunit_stats.py
+++ /dev/null
@@ -1,84 +0,0 @@
-#
-# subunit: extensions to python unittest to get test results from subprocesses.
-# Copyright (C) 2005 Robert Collins <robertc@robertcollins.net>
-#
-# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-# license at the users choice. A copy of both licenses are available in the
-# project source as Apache-2.0 and BSD. You may not use this file except in
-# compliance with one of these two licences.
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# license you chose for the specific language governing permissions and
-# limitations under that license.
-#
-
-"""Tests for subunit.TestResultStats."""
-
-import unittest
-
-from testtools.compat import _b, BytesIO, StringIO
-
-import subunit
-
-
-class TestTestResultStats(unittest.TestCase):
- """Test for TestResultStats, a TestResult object that generates stats."""
-
- def setUp(self):
- self.output = StringIO()
- self.result = subunit.TestResultStats(self.output)
- self.input_stream = BytesIO()
- self.test = subunit.ProtocolTestCase(self.input_stream)
-
- def test_stats_empty(self):
- self.test.run(self.result)
- self.assertEqual(0, self.result.total_tests)
- self.assertEqual(0, self.result.passed_tests)
- self.assertEqual(0, self.result.failed_tests)
- self.assertEqual(set(), self.result.seen_tags)
-
- def setUpUsedStream(self):
- self.input_stream.write(_b("""tags: global
-test passed
-success passed
-test failed
-tags: local
-failure failed
-test error
-error error
-test skipped
-skip skipped
-test todo
-xfail todo
-"""))
- self.input_stream.seek(0)
- self.test.run(self.result)
-
- def test_stats_smoke_everything(self):
- # Statistics are calculated usefully.
- self.setUpUsedStream()
- self.assertEqual(5, self.result.total_tests)
- self.assertEqual(2, self.result.passed_tests)
- self.assertEqual(2, self.result.failed_tests)
- self.assertEqual(1, self.result.skipped_tests)
- self.assertEqual(set(["global", "local"]), self.result.seen_tags)
-
- def test_stat_formatting(self):
- expected = ("""
-Total tests: 5
-Passed tests: 2
-Failed tests: 2
-Skipped tests: 1
-Seen tags: global, local
-""")[1:]
- self.setUpUsedStream()
- self.result.formatStats()
- self.assertEqual(expected, self.output.getvalue())
-
-
-def test_suite():
- loader = subunit.tests.TestUtil.TestLoader()
- result = loader.loadTestsFromName(__name__)
- return result
diff --git a/lib/subunit/python/subunit/tests/test_subunit_tags.py b/lib/subunit/python/subunit/tests/test_subunit_tags.py
deleted file mode 100644
index c98506a737..0000000000
--- a/lib/subunit/python/subunit/tests/test_subunit_tags.py
+++ /dev/null
@@ -1,69 +0,0 @@
-#
-# subunit: extensions to python unittest to get test results from subprocesses.
-# Copyright (C) 2005 Robert Collins <robertc@robertcollins.net>
-#
-# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-# license at the users choice. A copy of both licenses are available in the
-# project source as Apache-2.0 and BSD. You may not use this file except in
-# compliance with one of these two licences.
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# license you chose for the specific language governing permissions and
-# limitations under that license.
-#
-
-"""Tests for subunit.tag_stream."""
-
-import unittest
-
-from testtools.compat import StringIO
-
-import subunit
-import subunit.test_results
-
-
-class TestSubUnitTags(unittest.TestCase):
-
- def setUp(self):
- self.original = StringIO()
- self.filtered = StringIO()
-
- def test_add_tag(self):
- self.original.write("tags: foo\n")
- self.original.write("test: test\n")
- self.original.write("tags: bar -quux\n")
- self.original.write("success: test\n")
- self.original.seek(0)
- result = subunit.tag_stream(self.original, self.filtered, ["quux"])
- self.assertEqual([
- "tags: quux",
- "tags: foo",
- "test: test",
- "tags: bar",
- "success: test",
- ],
- self.filtered.getvalue().splitlines())
-
- def test_remove_tag(self):
- self.original.write("tags: foo\n")
- self.original.write("test: test\n")
- self.original.write("tags: bar -quux\n")
- self.original.write("success: test\n")
- self.original.seek(0)
- result = subunit.tag_stream(self.original, self.filtered, ["-bar"])
- self.assertEqual([
- "tags: -bar",
- "tags: foo",
- "test: test",
- "tags: -quux",
- "success: test",
- ],
- self.filtered.getvalue().splitlines())
-
-
-def test_suite():
- loader = subunit.tests.TestUtil.TestLoader()
- result = loader.loadTestsFromName(__name__)
- return result
diff --git a/lib/subunit/python/subunit/tests/test_tap2subunit.py b/lib/subunit/python/subunit/tests/test_tap2subunit.py
deleted file mode 100644
index 11bc1916b3..0000000000
--- a/lib/subunit/python/subunit/tests/test_tap2subunit.py
+++ /dev/null
@@ -1,445 +0,0 @@
-#
-# subunit: extensions to python unittest to get test results from subprocesses.
-# Copyright (C) 2005 Robert Collins <robertc@robertcollins.net>
-#
-# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-# license at the users choice. A copy of both licenses are available in the
-# project source as Apache-2.0 and BSD. You may not use this file except in
-# compliance with one of these two licences.
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# license you chose for the specific language governing permissions and
-# limitations under that license.
-#
-
-"""Tests for TAP2SubUnit."""
-
-import unittest
-
-from testtools.compat import StringIO
-
-import subunit
-
-
-class TestTAP2SubUnit(unittest.TestCase):
- """Tests for TAP2SubUnit.
-
- These tests test TAP string data in, and subunit string data out.
- This is ok because the subunit protocol is intended to be stable,
- but it might be easier/pithier to write tests against TAP string in,
- parsed subunit objects out (by hooking the subunit stream to a subunit
- protocol server.
- """
-
- def setUp(self):
- self.tap = StringIO()
- self.subunit = StringIO()
-
- def test_skip_entire_file(self):
- # A file
- # 1..- # Skipped: comment
- # results in a single skipped test.
- self.tap.write("1..0 # Skipped: entire file skipped\n")
- self.tap.seek(0)
- result = subunit.TAP2SubUnit(self.tap, self.subunit)
- self.assertEqual(0, result)
- self.assertEqual([
- "test file skip",
- "skip file skip [",
- "Skipped: entire file skipped",
- "]",
- ],
- self.subunit.getvalue().splitlines())
-
- def test_ok_test_pass(self):
- # A file
- # ok
- # results in a passed test with name 'test 1' (a synthetic name as tap
- # does not require named fixtures - it is the first test in the tap
- # stream).
- self.tap.write("ok\n")
- self.tap.seek(0)
- result = subunit.TAP2SubUnit(self.tap, self.subunit)
- self.assertEqual(0, result)
- self.assertEqual([
- "test test 1",
- "success test 1",
- ],
- self.subunit.getvalue().splitlines())
-
- def test_ok_test_number_pass(self):
- # A file
- # ok 1
- # results in a passed test with name 'test 1'
- self.tap.write("ok 1\n")
- self.tap.seek(0)
- result = subunit.TAP2SubUnit(self.tap, self.subunit)
- self.assertEqual(0, result)
- self.assertEqual([
- "test test 1",
- "success test 1",
- ],
- self.subunit.getvalue().splitlines())
-
- def test_ok_test_number_description_pass(self):
- # A file
- # ok 1 - There is a description
- # results in a passed test with name 'test 1 - There is a description'
- self.tap.write("ok 1 - There is a description\n")
- self.tap.seek(0)
- result = subunit.TAP2SubUnit(self.tap, self.subunit)
- self.assertEqual(0, result)
- self.assertEqual([
- "test test 1 - There is a description",
- "success test 1 - There is a description",
- ],
- self.subunit.getvalue().splitlines())
-
- def test_ok_test_description_pass(self):
- # A file
- # ok There is a description
- # results in a passed test with name 'test 1 There is a description'
- self.tap.write("ok There is a description\n")
- self.tap.seek(0)
- result = subunit.TAP2SubUnit(self.tap, self.subunit)
- self.assertEqual(0, result)
- self.assertEqual([
- "test test 1 There is a description",
- "success test 1 There is a description",
- ],
- self.subunit.getvalue().splitlines())
-
- def test_ok_SKIP_skip(self):
- # A file
- # ok # SKIP
- # results in a skkip test with name 'test 1'
- self.tap.write("ok # SKIP\n")
- self.tap.seek(0)
- result = subunit.TAP2SubUnit(self.tap, self.subunit)
- self.assertEqual(0, result)
- self.assertEqual([
- "test test 1",
- "skip test 1",
- ],
- self.subunit.getvalue().splitlines())
-
- def test_ok_skip_number_comment_lowercase(self):
- self.tap.write("ok 1 # skip no samba environment available, skipping compilation\n")
- self.tap.seek(0)
- result = subunit.TAP2SubUnit(self.tap, self.subunit)
- self.assertEqual(0, result)
- self.assertEqual([
- "test test 1",
- "skip test 1 [",
- "no samba environment available, skipping compilation",
- "]"
- ],
- self.subunit.getvalue().splitlines())
-
- def test_ok_number_description_SKIP_skip_comment(self):
- # A file
- # ok 1 foo # SKIP Not done yet
- # results in a skip test with name 'test 1 foo' and a log of
- # Not done yet
- self.tap.write("ok 1 foo # SKIP Not done yet\n")
- self.tap.seek(0)
- result = subunit.TAP2SubUnit(self.tap, self.subunit)
- self.assertEqual(0, result)
- self.assertEqual([
- "test test 1 foo",
- "skip test 1 foo [",
- "Not done yet",
- "]",
- ],
- self.subunit.getvalue().splitlines())
-
- def test_ok_SKIP_skip_comment(self):
- # A file
- # ok # SKIP Not done yet
- # results in a skip test with name 'test 1' and a log of Not done yet
- self.tap.write("ok # SKIP Not done yet\n")
- self.tap.seek(0)
- result = subunit.TAP2SubUnit(self.tap, self.subunit)
- self.assertEqual(0, result)
- self.assertEqual([
- "test test 1",
- "skip test 1 [",
- "Not done yet",
- "]",
- ],
- self.subunit.getvalue().splitlines())
-
- def test_ok_TODO_xfail(self):
- # A file
- # ok # TODO
- # results in a xfail test with name 'test 1'
- self.tap.write("ok # TODO\n")
- self.tap.seek(0)
- result = subunit.TAP2SubUnit(self.tap, self.subunit)
- self.assertEqual(0, result)
- self.assertEqual([
- "test test 1",
- "xfail test 1",
- ],
- self.subunit.getvalue().splitlines())
-
- def test_ok_TODO_xfail_comment(self):
- # A file
- # ok # TODO Not done yet
- # results in a xfail test with name 'test 1' and a log of Not done yet
- self.tap.write("ok # TODO Not done yet\n")
- self.tap.seek(0)
- result = subunit.TAP2SubUnit(self.tap, self.subunit)
- self.assertEqual(0, result)
- self.assertEqual([
- "test test 1",
- "xfail test 1 [",
- "Not done yet",
- "]",
- ],
- self.subunit.getvalue().splitlines())
-
- def test_bail_out_errors(self):
- # A file with line in it
- # Bail out! COMMENT
- # is treated as an error
- self.tap.write("ok 1 foo\n")
- self.tap.write("Bail out! Lifejacket engaged\n")
- self.tap.seek(0)
- result = subunit.TAP2SubUnit(self.tap, self.subunit)
- self.assertEqual(0, result)
- self.assertEqual([
- "test test 1 foo",
- "success test 1 foo",
- "test Bail out! Lifejacket engaged",
- "error Bail out! Lifejacket engaged",
- ],
- self.subunit.getvalue().splitlines())
-
- def test_missing_test_at_end_with_plan_adds_error(self):
- # A file
- # 1..3
- # ok first test
- # not ok third test
- # results in three tests, with the third being created
- self.tap.write('1..3\n')
- self.tap.write('ok first test\n')
- self.tap.write('not ok second test\n')
- self.tap.seek(0)
- result = subunit.TAP2SubUnit(self.tap, self.subunit)
- self.assertEqual(0, result)
- self.assertEqual([
- 'test test 1 first test',
- 'success test 1 first test',
- 'test test 2 second test',
- 'failure test 2 second test',
- 'test test 3',
- 'error test 3 [',
- 'test missing from TAP output',
- ']',
- ],
- self.subunit.getvalue().splitlines())
-
- def test_missing_test_with_plan_adds_error(self):
- # A file
- # 1..3
- # ok first test
- # not ok 3 third test
- # results in three tests, with the second being created
- self.tap.write('1..3\n')
- self.tap.write('ok first test\n')
- self.tap.write('not ok 3 third test\n')
- self.tap.seek(0)
- result = subunit.TAP2SubUnit(self.tap, self.subunit)
- self.assertEqual(0, result)
- self.assertEqual([
- 'test test 1 first test',
- 'success test 1 first test',
- 'test test 2',
- 'error test 2 [',
- 'test missing from TAP output',
- ']',
- 'test test 3 third test',
- 'failure test 3 third test',
- ],
- self.subunit.getvalue().splitlines())
-
- def test_missing_test_no_plan_adds_error(self):
- # A file
- # ok first test
- # not ok 3 third test
- # results in three tests, with the second being created
- self.tap.write('ok first test\n')
- self.tap.write('not ok 3 third test\n')
- self.tap.seek(0)
- result = subunit.TAP2SubUnit(self.tap, self.subunit)
- self.assertEqual(0, result)
- self.assertEqual([
- 'test test 1 first test',
- 'success test 1 first test',
- 'test test 2',
- 'error test 2 [',
- 'test missing from TAP output',
- ']',
- 'test test 3 third test',
- 'failure test 3 third test',
- ],
- self.subunit.getvalue().splitlines())
-
- def test_four_tests_in_a_row_trailing_plan(self):
- # A file
- # ok 1 - first test in a script with no plan at all
- # not ok 2 - second
- # ok 3 - third
- # not ok 4 - fourth
- # 1..4
- # results in four tests numbered and named
- self.tap.write('ok 1 - first test in a script with trailing plan\n')
- self.tap.write('not ok 2 - second\n')
- self.tap.write('ok 3 - third\n')
- self.tap.write('not ok 4 - fourth\n')
- self.tap.write('1..4\n')
- self.tap.seek(0)
- result = subunit.TAP2SubUnit(self.tap, self.subunit)
- self.assertEqual(0, result)
- self.assertEqual([
- 'test test 1 - first test in a script with trailing plan',
- 'success test 1 - first test in a script with trailing plan',
- 'test test 2 - second',
- 'failure test 2 - second',
- 'test test 3 - third',
- 'success test 3 - third',
- 'test test 4 - fourth',
- 'failure test 4 - fourth'
- ],
- self.subunit.getvalue().splitlines())
-
- def test_four_tests_in_a_row_with_plan(self):
- # A file
- # 1..4
- # ok 1 - first test in a script with no plan at all
- # not ok 2 - second
- # ok 3 - third
- # not ok 4 - fourth
- # results in four tests numbered and named
- self.tap.write('1..4\n')
- self.tap.write('ok 1 - first test in a script with a plan\n')
- self.tap.write('not ok 2 - second\n')
- self.tap.write('ok 3 - third\n')
- self.tap.write('not ok 4 - fourth\n')
- self.tap.seek(0)
- result = subunit.TAP2SubUnit(self.tap, self.subunit)
- self.assertEqual(0, result)
- self.assertEqual([
- 'test test 1 - first test in a script with a plan',
- 'success test 1 - first test in a script with a plan',
- 'test test 2 - second',
- 'failure test 2 - second',
- 'test test 3 - third',
- 'success test 3 - third',
- 'test test 4 - fourth',
- 'failure test 4 - fourth'
- ],
- self.subunit.getvalue().splitlines())
-
- def test_four_tests_in_a_row_no_plan(self):
- # A file
- # ok 1 - first test in a script with no plan at all
- # not ok 2 - second
- # ok 3 - third
- # not ok 4 - fourth
- # results in four tests numbered and named
- self.tap.write('ok 1 - first test in a script with no plan at all\n')
- self.tap.write('not ok 2 - second\n')
- self.tap.write('ok 3 - third\n')
- self.tap.write('not ok 4 - fourth\n')
- self.tap.seek(0)
- result = subunit.TAP2SubUnit(self.tap, self.subunit)
- self.assertEqual(0, result)
- self.assertEqual([
- 'test test 1 - first test in a script with no plan at all',
- 'success test 1 - first test in a script with no plan at all',
- 'test test 2 - second',
- 'failure test 2 - second',
- 'test test 3 - third',
- 'success test 3 - third',
- 'test test 4 - fourth',
- 'failure test 4 - fourth'
- ],
- self.subunit.getvalue().splitlines())
-
- def test_todo_and_skip(self):
- # A file
- # not ok 1 - a fail but # TODO but is TODO
- # not ok 2 - another fail # SKIP instead
- # results in two tests, numbered and commented.
- self.tap.write("not ok 1 - a fail but # TODO but is TODO\n")
- self.tap.write("not ok 2 - another fail # SKIP instead\n")
- self.tap.seek(0)
- result = subunit.TAP2SubUnit(self.tap, self.subunit)
- self.assertEqual(0, result)
- self.assertEqual([
- 'test test 1 - a fail but',
- 'xfail test 1 - a fail but [',
- 'but is TODO',
- ']',
- 'test test 2 - another fail',
- 'skip test 2 - another fail [',
- 'instead',
- ']',
- ],
- self.subunit.getvalue().splitlines())
-
- def test_leading_comments_add_to_next_test_log(self):
- # A file
- # # comment
- # ok
- # ok
- # results in a single test with the comment included
- # in the first test and not the second.
- self.tap.write("# comment\n")
- self.tap.write("ok\n")
- self.tap.write("ok\n")
- self.tap.seek(0)
- result = subunit.TAP2SubUnit(self.tap, self.subunit)
- self.assertEqual(0, result)
- self.assertEqual([
- 'test test 1',
- 'success test 1 [',
- '# comment',
- ']',
- 'test test 2',
- 'success test 2',
- ],
- self.subunit.getvalue().splitlines())
-
- def test_trailing_comments_are_included_in_last_test_log(self):
- # A file
- # ok foo
- # ok foo
- # # comment
- # results in a two tests, with the second having the comment
- # attached to its log.
- self.tap.write("ok\n")
- self.tap.write("ok\n")
- self.tap.write("# comment\n")
- self.tap.seek(0)
- result = subunit.TAP2SubUnit(self.tap, self.subunit)
- self.assertEqual(0, result)
- self.assertEqual([
- 'test test 1',
- 'success test 1',
- 'test test 2',
- 'success test 2 [',
- '# comment',
- ']',
- ],
- self.subunit.getvalue().splitlines())
-
-
-def test_suite():
- loader = subunit.tests.TestUtil.TestLoader()
- result = loader.loadTestsFromName(__name__)
- return result
diff --git a/lib/subunit/python/subunit/tests/test_test_protocol.py b/lib/subunit/python/subunit/tests/test_test_protocol.py
deleted file mode 100644
index 7831ba16cd..0000000000
--- a/lib/subunit/python/subunit/tests/test_test_protocol.py
+++ /dev/null
@@ -1,1337 +0,0 @@
-#
-# subunit: extensions to Python unittest to get test results from subprocesses.
-# Copyright (C) 2005 Robert Collins <robertc@robertcollins.net>
-#
-# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-# license at the users choice. A copy of both licenses are available in the
-# project source as Apache-2.0 and BSD. You may not use this file except in
-# compliance with one of these two licences.
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# license you chose for the specific language governing permissions and
-# limitations under that license.
-#
-
-import datetime
-import unittest
-import os
-
-from testtools import PlaceHolder, skipIf, TestCase, TestResult
-from testtools.compat import _b, _u, BytesIO
-from testtools.content import Content, TracebackContent, text_content
-from testtools.content_type import ContentType
-try:
- from testtools.testresult.doubles import (
- Python26TestResult,
- Python27TestResult,
- ExtendedTestResult,
- )
-except ImportError:
- from testtools.tests.helpers import (
- Python26TestResult,
- Python27TestResult,
- ExtendedTestResult,
- )
-
-import subunit
-from subunit import _remote_exception_str, _remote_exception_str_chunked
-import subunit.iso8601 as iso8601
-
-
-def details_to_str(details):
- return TestResult()._err_details_to_string(None, details=details)
-
-
-class TestTestImports(unittest.TestCase):
-
- def test_imports(self):
- from subunit import DiscardStream
- from subunit import TestProtocolServer
- from subunit import RemotedTestCase
- from subunit import RemoteError
- from subunit import ExecTestCase
- from subunit import IsolatedTestCase
- from subunit import TestProtocolClient
- from subunit import ProtocolTestCase
-
-
-class TestDiscardStream(unittest.TestCase):
-
- def test_write(self):
- subunit.DiscardStream().write("content")
-
-
-class TestProtocolServerForward(unittest.TestCase):
-
- def test_story(self):
- client = unittest.TestResult()
- out = BytesIO()
- protocol = subunit.TestProtocolServer(client, forward_stream=out)
- pipe = BytesIO(_b("test old mcdonald\n"
- "success old mcdonald\n"))
- protocol.readFrom(pipe)
- self.assertEqual(client.testsRun, 1)
- self.assertEqual(pipe.getvalue(), out.getvalue())
-
- def test_not_command(self):
- client = unittest.TestResult()
- out = BytesIO()
- protocol = subunit.TestProtocolServer(client,
- stream=subunit.DiscardStream(), forward_stream=out)
- pipe = BytesIO(_b("success old mcdonald\n"))
- protocol.readFrom(pipe)
- self.assertEqual(client.testsRun, 0)
- self.assertEqual(_b(""), out.getvalue())
-
-
-class TestTestProtocolServerPipe(unittest.TestCase):
-
- def test_story(self):
- client = unittest.TestResult()
- protocol = subunit.TestProtocolServer(client)
- traceback = "foo.c:53:ERROR invalid state\n"
- pipe = BytesIO(_b("test old mcdonald\n"
- "success old mcdonald\n"
- "test bing crosby\n"
- "failure bing crosby [\n"
- + traceback +
- "]\n"
- "test an error\n"
- "error an error\n"))
- protocol.readFrom(pipe)
- bing = subunit.RemotedTestCase("bing crosby")
- an_error = subunit.RemotedTestCase("an error")
- self.assertEqual(client.errors,
- [(an_error, _remote_exception_str + '\n')])
- self.assertEqual(
- client.failures,
- [(bing, _remote_exception_str + ": "
- + details_to_str({'traceback': text_content(traceback)}) + "\n")])
- self.assertEqual(client.testsRun, 3)
-
- def test_non_test_characters_forwarded_immediately(self):
- pass
-
-
-class TestTestProtocolServerStartTest(unittest.TestCase):
-
- def setUp(self):
- self.client = Python26TestResult()
- self.stream = BytesIO()
- self.protocol = subunit.TestProtocolServer(self.client, self.stream)
-
- def test_start_test(self):
- self.protocol.lineReceived(_b("test old mcdonald\n"))
- self.assertEqual(self.client._events,
- [('startTest', subunit.RemotedTestCase("old mcdonald"))])
-
- def test_start_testing(self):
- self.protocol.lineReceived(_b("testing old mcdonald\n"))
- self.assertEqual(self.client._events,
- [('startTest', subunit.RemotedTestCase("old mcdonald"))])
-
- def test_start_test_colon(self):
- self.protocol.lineReceived(_b("test: old mcdonald\n"))
- self.assertEqual(self.client._events,
- [('startTest', subunit.RemotedTestCase("old mcdonald"))])
-
- def test_indented_test_colon_ignored(self):
- ignored_line = _b(" test: old mcdonald\n")
- self.protocol.lineReceived(ignored_line)
- self.assertEqual([], self.client._events)
- self.assertEqual(self.stream.getvalue(), ignored_line)
-
- def test_start_testing_colon(self):
- self.protocol.lineReceived(_b("testing: old mcdonald\n"))
- self.assertEqual(self.client._events,
- [('startTest', subunit.RemotedTestCase("old mcdonald"))])
-
-
-class TestTestProtocolServerPassThrough(unittest.TestCase):
-
- def setUp(self):
- self.stdout = BytesIO()
- self.test = subunit.RemotedTestCase("old mcdonald")
- self.client = ExtendedTestResult()
- self.protocol = subunit.TestProtocolServer(self.client, self.stdout)
-
- def keywords_before_test(self):
- self.protocol.lineReceived(_b("failure a\n"))
- self.protocol.lineReceived(_b("failure: a\n"))
- self.protocol.lineReceived(_b("error a\n"))
- self.protocol.lineReceived(_b("error: a\n"))
- self.protocol.lineReceived(_b("success a\n"))
- self.protocol.lineReceived(_b("success: a\n"))
- self.protocol.lineReceived(_b("successful a\n"))
- self.protocol.lineReceived(_b("successful: a\n"))
- self.protocol.lineReceived(_b("]\n"))
- self.assertEqual(self.stdout.getvalue(), _b("failure a\n"
- "failure: a\n"
- "error a\n"
- "error: a\n"
- "success a\n"
- "success: a\n"
- "successful a\n"
- "successful: a\n"
- "]\n"))
-
- def test_keywords_before_test(self):
- self.keywords_before_test()
- self.assertEqual(self.client._events, [])
-
- def test_keywords_after_error(self):
- self.protocol.lineReceived(_b("test old mcdonald\n"))
- self.protocol.lineReceived(_b("error old mcdonald\n"))
- self.keywords_before_test()
- self.assertEqual([
- ('startTest', self.test),
- ('addError', self.test, {}),
- ('stopTest', self.test),
- ], self.client._events)
-
- def test_keywords_after_failure(self):
- self.protocol.lineReceived(_b("test old mcdonald\n"))
- self.protocol.lineReceived(_b("failure old mcdonald\n"))
- self.keywords_before_test()
- self.assertEqual(self.client._events, [
- ('startTest', self.test),
- ('addFailure', self.test, {}),
- ('stopTest', self.test),
- ])
-
- def test_keywords_after_success(self):
- self.protocol.lineReceived(_b("test old mcdonald\n"))
- self.protocol.lineReceived(_b("success old mcdonald\n"))
- self.keywords_before_test()
- self.assertEqual([
- ('startTest', self.test),
- ('addSuccess', self.test),
- ('stopTest', self.test),
- ], self.client._events)
-
- def test_keywords_after_test(self):
- self.protocol.lineReceived(_b("test old mcdonald\n"))
- self.protocol.lineReceived(_b("test old mcdonald\n"))
- self.protocol.lineReceived(_b("failure a\n"))
- self.protocol.lineReceived(_b("failure: a\n"))
- self.protocol.lineReceived(_b("error a\n"))
- self.protocol.lineReceived(_b("error: a\n"))
- self.protocol.lineReceived(_b("success a\n"))
- self.protocol.lineReceived(_b("success: a\n"))
- self.protocol.lineReceived(_b("successful a\n"))
- self.protocol.lineReceived(_b("successful: a\n"))
- self.protocol.lineReceived(_b("]\n"))
- self.protocol.lineReceived(_b("failure old mcdonald\n"))
- self.assertEqual(self.stdout.getvalue(), _b("test old mcdonald\n"
- "failure a\n"
- "failure: a\n"
- "error a\n"
- "error: a\n"
- "success a\n"
- "success: a\n"
- "successful a\n"
- "successful: a\n"
- "]\n"))
- self.assertEqual(self.client._events, [
- ('startTest', self.test),
- ('addFailure', self.test, {}),
- ('stopTest', self.test),
- ])
-
- def test_keywords_during_failure(self):
- # A smoke test to make sure that the details parsers have control
- # appropriately.
- self.protocol.lineReceived(_b("test old mcdonald\n"))
- self.protocol.lineReceived(_b("failure: old mcdonald [\n"))
- self.protocol.lineReceived(_b("test old mcdonald\n"))
- self.protocol.lineReceived(_b("failure a\n"))
- self.protocol.lineReceived(_b("failure: a\n"))
- self.protocol.lineReceived(_b("error a\n"))
- self.protocol.lineReceived(_b("error: a\n"))
- self.protocol.lineReceived(_b("success a\n"))
- self.protocol.lineReceived(_b("success: a\n"))
- self.protocol.lineReceived(_b("successful a\n"))
- self.protocol.lineReceived(_b("successful: a\n"))
- self.protocol.lineReceived(_b(" ]\n"))
- self.protocol.lineReceived(_b("]\n"))
- self.assertEqual(self.stdout.getvalue(), _b(""))
- details = {}
- details['traceback'] = Content(ContentType("text", "x-traceback",
- {'charset': 'utf8'}),
- lambda:[_b(
- "test old mcdonald\n"
- "failure a\n"
- "failure: a\n"
- "error a\n"
- "error: a\n"
- "success a\n"
- "success: a\n"
- "successful a\n"
- "successful: a\n"
- "]\n")])
- self.assertEqual(self.client._events, [
- ('startTest', self.test),
- ('addFailure', self.test, details),
- ('stopTest', self.test),
- ])
-
- def test_stdout_passthrough(self):
- """Lines received which cannot be interpreted as any protocol action
- should be passed through to sys.stdout.
- """
- bytes = _b("randombytes\n")
- self.protocol.lineReceived(bytes)
- self.assertEqual(self.stdout.getvalue(), bytes)
-
-
-class TestTestProtocolServerLostConnection(unittest.TestCase):
-
- def setUp(self):
- self.client = Python26TestResult()
- self.protocol = subunit.TestProtocolServer(self.client)
- self.test = subunit.RemotedTestCase("old mcdonald")
-
- def test_lost_connection_no_input(self):
- self.protocol.lostConnection()
- self.assertEqual([], self.client._events)
-
- def test_lost_connection_after_start(self):
- self.protocol.lineReceived(_b("test old mcdonald\n"))
- self.protocol.lostConnection()
- failure = subunit.RemoteError(
- _u("lost connection during test 'old mcdonald'"))
- self.assertEqual([
- ('startTest', self.test),
- ('addError', self.test, failure),
- ('stopTest', self.test),
- ], self.client._events)
-
- def test_lost_connected_after_error(self):
- self.protocol.lineReceived(_b("test old mcdonald\n"))
- self.protocol.lineReceived(_b("error old mcdonald\n"))
- self.protocol.lostConnection()
- self.assertEqual([
- ('startTest', self.test),
- ('addError', self.test, subunit.RemoteError(_u(""))),
- ('stopTest', self.test),
- ], self.client._events)
-
- def do_connection_lost(self, outcome, opening):
- self.protocol.lineReceived(_b("test old mcdonald\n"))
- self.protocol.lineReceived(_b("%s old mcdonald %s" % (outcome, opening)))
- self.protocol.lostConnection()
- failure = subunit.RemoteError(
- _u("lost connection during %s report of test 'old mcdonald'") %
- outcome)
- self.assertEqual([
- ('startTest', self.test),
- ('addError', self.test, failure),
- ('stopTest', self.test),
- ], self.client._events)
-
- def test_lost_connection_during_error(self):
- self.do_connection_lost("error", "[\n")
-
- def test_lost_connection_during_error_details(self):
- self.do_connection_lost("error", "[ multipart\n")
-
- def test_lost_connected_after_failure(self):
- self.protocol.lineReceived(_b("test old mcdonald\n"))
- self.protocol.lineReceived(_b("failure old mcdonald\n"))
- self.protocol.lostConnection()
- self.assertEqual([
- ('startTest', self.test),
- ('addFailure', self.test, subunit.RemoteError(_u(""))),
- ('stopTest', self.test),
- ], self.client._events)
-
- def test_lost_connection_during_failure(self):
- self.do_connection_lost("failure", "[\n")
-
- def test_lost_connection_during_failure_details(self):
- self.do_connection_lost("failure", "[ multipart\n")
-
- def test_lost_connection_after_success(self):
- self.protocol.lineReceived(_b("test old mcdonald\n"))
- self.protocol.lineReceived(_b("success old mcdonald\n"))
- self.protocol.lostConnection()
- self.assertEqual([
- ('startTest', self.test),
- ('addSuccess', self.test),
- ('stopTest', self.test),
- ], self.client._events)
-
- def test_lost_connection_during_success(self):
- self.do_connection_lost("success", "[\n")
-
- def test_lost_connection_during_success_details(self):
- self.do_connection_lost("success", "[ multipart\n")
-
- def test_lost_connection_during_skip(self):
- self.do_connection_lost("skip", "[\n")
-
- def test_lost_connection_during_skip_details(self):
- self.do_connection_lost("skip", "[ multipart\n")
-
- def test_lost_connection_during_xfail(self):
- self.do_connection_lost("xfail", "[\n")
-
- def test_lost_connection_during_xfail_details(self):
- self.do_connection_lost("xfail", "[ multipart\n")
-
- def test_lost_connection_during_uxsuccess(self):
- self.do_connection_lost("uxsuccess", "[\n")
-
- def test_lost_connection_during_uxsuccess_details(self):
- self.do_connection_lost("uxsuccess", "[ multipart\n")
-
-
-class TestInTestMultipart(unittest.TestCase):
-
- def setUp(self):
- self.client = ExtendedTestResult()
- self.protocol = subunit.TestProtocolServer(self.client)
- self.protocol.lineReceived(_b("test mcdonalds farm\n"))
- self.test = subunit.RemotedTestCase(_u("mcdonalds farm"))
-
- def test__outcome_sets_details_parser(self):
- self.protocol._reading_success_details.details_parser = None
- self.protocol._state._outcome(0, _b("mcdonalds farm [ multipart\n"),
- None, self.protocol._reading_success_details)
- parser = self.protocol._reading_success_details.details_parser
- self.assertNotEqual(None, parser)
- self.assertTrue(isinstance(parser,
- subunit.details.MultipartDetailsParser))
-
-
-class TestTestProtocolServerAddError(unittest.TestCase):
-
- def setUp(self):
- self.client = ExtendedTestResult()
- self.protocol = subunit.TestProtocolServer(self.client)
- self.protocol.lineReceived(_b("test mcdonalds farm\n"))
- self.test = subunit.RemotedTestCase("mcdonalds farm")
-
- def simple_error_keyword(self, keyword):
- self.protocol.lineReceived(_b("%s mcdonalds farm\n" % keyword))
- details = {}
- self.assertEqual([
- ('startTest', self.test),
- ('addError', self.test, details),
- ('stopTest', self.test),
- ], self.client._events)
-
- def test_simple_error(self):
- self.simple_error_keyword("error")
-
- def test_simple_error_colon(self):
- self.simple_error_keyword("error:")
-
- def test_error_empty_message(self):
- self.protocol.lineReceived(_b("error mcdonalds farm [\n"))
- self.protocol.lineReceived(_b("]\n"))
- details = {}
- details['traceback'] = Content(ContentType("text", "x-traceback",
- {'charset': 'utf8'}), lambda:[_b("")])
- self.assertEqual([
- ('startTest', self.test),
- ('addError', self.test, details),
- ('stopTest', self.test),
- ], self.client._events)
-
- def error_quoted_bracket(self, keyword):
- self.protocol.lineReceived(_b("%s mcdonalds farm [\n" % keyword))
- self.protocol.lineReceived(_b(" ]\n"))
- self.protocol.lineReceived(_b("]\n"))
- details = {}
- details['traceback'] = Content(ContentType("text", "x-traceback",
- {'charset': 'utf8'}), lambda:[_b("]\n")])
- self.assertEqual([
- ('startTest', self.test),
- ('addError', self.test, details),
- ('stopTest', self.test),
- ], self.client._events)
-
- def test_error_quoted_bracket(self):
- self.error_quoted_bracket("error")
-
- def test_error_colon_quoted_bracket(self):
- self.error_quoted_bracket("error:")
-
-
-class TestTestProtocolServerAddFailure(unittest.TestCase):
-
- def setUp(self):
- self.client = ExtendedTestResult()
- self.protocol = subunit.TestProtocolServer(self.client)
- self.protocol.lineReceived(_b("test mcdonalds farm\n"))
- self.test = subunit.RemotedTestCase("mcdonalds farm")
-
- def assertFailure(self, details):
- self.assertEqual([
- ('startTest', self.test),
- ('addFailure', self.test, details),
- ('stopTest', self.test),
- ], self.client._events)
-
- def simple_failure_keyword(self, keyword):
- self.protocol.lineReceived(_b("%s mcdonalds farm\n" % keyword))
- details = {}
- self.assertFailure(details)
-
- def test_simple_failure(self):
- self.simple_failure_keyword("failure")
-
- def test_simple_failure_colon(self):
- self.simple_failure_keyword("failure:")
-
- def test_failure_empty_message(self):
- self.protocol.lineReceived(_b("failure mcdonalds farm [\n"))
- self.protocol.lineReceived(_b("]\n"))
- details = {}
- details['traceback'] = Content(ContentType("text", "x-traceback",
- {'charset': 'utf8'}), lambda:[_b("")])
- self.assertFailure(details)
-
- def failure_quoted_bracket(self, keyword):
- self.protocol.lineReceived(_b("%s mcdonalds farm [\n" % keyword))
- self.protocol.lineReceived(_b(" ]\n"))
- self.protocol.lineReceived(_b("]\n"))
- details = {}
- details['traceback'] = Content(ContentType("text", "x-traceback",
- {'charset': 'utf8'}), lambda:[_b("]\n")])
- self.assertFailure(details)
-
- def test_failure_quoted_bracket(self):
- self.failure_quoted_bracket("failure")
-
- def test_failure_colon_quoted_bracket(self):
- self.failure_quoted_bracket("failure:")
-
-
-class TestTestProtocolServerAddxFail(unittest.TestCase):
- """Tests for the xfail keyword.
-
- In Python this can thunk through to Success due to stdlib limitations (see
- README).
- """
-
- def capture_expected_failure(self, test, err):
- self._events.append((test, err))
-
- def setup_python26(self):
- """Setup a test object ready to be xfailed and thunk to success."""
- self.client = Python26TestResult()
- self.setup_protocol()
-
- def setup_python27(self):
- """Setup a test object ready to be xfailed."""
- self.client = Python27TestResult()
- self.setup_protocol()
-
- def setup_python_ex(self):
- """Setup a test object ready to be xfailed with details."""
- self.client = ExtendedTestResult()
- self.setup_protocol()
-
- def setup_protocol(self):
- """Setup the protocol based on self.client."""
- self.protocol = subunit.TestProtocolServer(self.client)
- self.protocol.lineReceived(_b("test mcdonalds farm\n"))
- self.test = self.client._events[-1][-1]
-
- def simple_xfail_keyword(self, keyword, as_success):
- self.protocol.lineReceived(_b("%s mcdonalds farm\n" % keyword))
- self.check_success_or_xfail(as_success)
-
- def check_success_or_xfail(self, as_success, error_message=None):
- if as_success:
- self.assertEqual([
- ('startTest', self.test),
- ('addSuccess', self.test),
- ('stopTest', self.test),
- ], self.client._events)
- else:
- details = {}
- if error_message is not None:
- details['traceback'] = Content(
- ContentType("text", "x-traceback", {'charset': 'utf8'}),
- lambda:[_b(error_message)])
- if isinstance(self.client, ExtendedTestResult):
- value = details
- else:
- if error_message is not None:
- value = subunit.RemoteError(details_to_str(details))
- else:
- value = subunit.RemoteError()
- self.assertEqual([
- ('startTest', self.test),
- ('addExpectedFailure', self.test, value),
- ('stopTest', self.test),
- ], self.client._events)
-
- def test_simple_xfail(self):
- self.setup_python26()
- self.simple_xfail_keyword("xfail", True)
- self.setup_python27()
- self.simple_xfail_keyword("xfail", False)
- self.setup_python_ex()
- self.simple_xfail_keyword("xfail", False)
-
- def test_simple_xfail_colon(self):
- self.setup_python26()
- self.simple_xfail_keyword("xfail:", True)
- self.setup_python27()
- self.simple_xfail_keyword("xfail:", False)
- self.setup_python_ex()
- self.simple_xfail_keyword("xfail:", False)
-
- def test_xfail_empty_message(self):
- self.setup_python26()
- self.empty_message(True)
- self.setup_python27()
- self.empty_message(False)
- self.setup_python_ex()
- self.empty_message(False, error_message="")
-
- def empty_message(self, as_success, error_message="\n"):
- self.protocol.lineReceived(_b("xfail mcdonalds farm [\n"))
- self.protocol.lineReceived(_b("]\n"))
- self.check_success_or_xfail(as_success, error_message)
-
- def xfail_quoted_bracket(self, keyword, as_success):
- # This tests it is accepted, but cannot test it is used today, because
- # of not having a way to expose it in Python so far.
- self.protocol.lineReceived(_b("%s mcdonalds farm [\n" % keyword))
- self.protocol.lineReceived(_b(" ]\n"))
- self.protocol.lineReceived(_b("]\n"))
- self.check_success_or_xfail(as_success, "]\n")
-
- def test_xfail_quoted_bracket(self):
- self.setup_python26()
- self.xfail_quoted_bracket("xfail", True)
- self.setup_python27()
- self.xfail_quoted_bracket("xfail", False)
- self.setup_python_ex()
- self.xfail_quoted_bracket("xfail", False)
-
- def test_xfail_colon_quoted_bracket(self):
- self.setup_python26()
- self.xfail_quoted_bracket("xfail:", True)
- self.setup_python27()
- self.xfail_quoted_bracket("xfail:", False)
- self.setup_python_ex()
- self.xfail_quoted_bracket("xfail:", False)
-
-
-class TestTestProtocolServerAddunexpectedSuccess(TestCase):
- """Tests for the uxsuccess keyword."""
-
- def capture_expected_failure(self, test, err):
- self._events.append((test, err))
-
- def setup_python26(self):
- """Setup a test object ready to be xfailed and thunk to success."""
- self.client = Python26TestResult()
- self.setup_protocol()
-
- def setup_python27(self):
- """Setup a test object ready to be xfailed."""
- self.client = Python27TestResult()
- self.setup_protocol()
-
- def setup_python_ex(self):
- """Setup a test object ready to be xfailed with details."""
- self.client = ExtendedTestResult()
- self.setup_protocol()
-
- def setup_protocol(self):
- """Setup the protocol based on self.client."""
- self.protocol = subunit.TestProtocolServer(self.client)
- self.protocol.lineReceived(_b("test mcdonalds farm\n"))
- self.test = self.client._events[-1][-1]
-
- def simple_uxsuccess_keyword(self, keyword, as_fail):
- self.protocol.lineReceived(_b("%s mcdonalds farm\n" % keyword))
- self.check_fail_or_uxsuccess(as_fail)
-
- def check_fail_or_uxsuccess(self, as_fail, error_message=None):
- details = {}
- if error_message is not None:
- details['traceback'] = Content(
- ContentType("text", "x-traceback", {'charset': 'utf8'}),
- lambda:[_b(error_message)])
- if isinstance(self.client, ExtendedTestResult):
- value = details
- else:
- value = None
- if as_fail:
- self.client._events[1] = self.client._events[1][:2]
- # The value is generated within the extended to original decorator:
- # todo use the testtools matcher to check on this.
- self.assertEqual([
- ('startTest', self.test),
- ('addFailure', self.test),
- ('stopTest', self.test),
- ], self.client._events)
- elif value:
- self.assertEqual([
- ('startTest', self.test),
- ('addUnexpectedSuccess', self.test, value),
- ('stopTest', self.test),
- ], self.client._events)
- else:
- self.assertEqual([
- ('startTest', self.test),
- ('addUnexpectedSuccess', self.test),
- ('stopTest', self.test),
- ], self.client._events)
-
- def test_simple_uxsuccess(self):
- self.setup_python26()
- self.simple_uxsuccess_keyword("uxsuccess", True)
- self.setup_python27()
- self.simple_uxsuccess_keyword("uxsuccess", False)
- self.setup_python_ex()
- self.simple_uxsuccess_keyword("uxsuccess", False)
-
- def test_simple_uxsuccess_colon(self):
- self.setup_python26()
- self.simple_uxsuccess_keyword("uxsuccess:", True)
- self.setup_python27()
- self.simple_uxsuccess_keyword("uxsuccess:", False)
- self.setup_python_ex()
- self.simple_uxsuccess_keyword("uxsuccess:", False)
-
- def test_uxsuccess_empty_message(self):
- self.setup_python26()
- self.empty_message(True)
- self.setup_python27()
- self.empty_message(False)
- self.setup_python_ex()
- self.empty_message(False, error_message="")
-
- def empty_message(self, as_fail, error_message="\n"):
- self.protocol.lineReceived(_b("uxsuccess mcdonalds farm [\n"))
- self.protocol.lineReceived(_b("]\n"))
- self.check_fail_or_uxsuccess(as_fail, error_message)
-
- def uxsuccess_quoted_bracket(self, keyword, as_fail):
- self.protocol.lineReceived(_b("%s mcdonalds farm [\n" % keyword))
- self.protocol.lineReceived(_b(" ]\n"))
- self.protocol.lineReceived(_b("]\n"))
- self.check_fail_or_uxsuccess(as_fail, "]\n")
-
- def test_uxsuccess_quoted_bracket(self):
- self.setup_python26()
- self.uxsuccess_quoted_bracket("uxsuccess", True)
- self.setup_python27()
- self.uxsuccess_quoted_bracket("uxsuccess", False)
- self.setup_python_ex()
- self.uxsuccess_quoted_bracket("uxsuccess", False)
-
- def test_uxsuccess_colon_quoted_bracket(self):
- self.setup_python26()
- self.uxsuccess_quoted_bracket("uxsuccess:", True)
- self.setup_python27()
- self.uxsuccess_quoted_bracket("uxsuccess:", False)
- self.setup_python_ex()
- self.uxsuccess_quoted_bracket("uxsuccess:", False)
-
-
-class TestTestProtocolServerAddSkip(unittest.TestCase):
- """Tests for the skip keyword.
-
- In Python this meets the testtools extended TestResult contract.
- (See https://launchpad.net/testtools).
- """
-
- def setUp(self):
- """Setup a test object ready to be skipped."""
- self.client = ExtendedTestResult()
- self.protocol = subunit.TestProtocolServer(self.client)
- self.protocol.lineReceived(_b("test mcdonalds farm\n"))
- self.test = self.client._events[-1][-1]
-
- def assertSkip(self, reason):
- details = {}
- if reason is not None:
- details['reason'] = Content(
- ContentType("text", "plain"), lambda:[reason])
- self.assertEqual([
- ('startTest', self.test),
- ('addSkip', self.test, details),
- ('stopTest', self.test),
- ], self.client._events)
-
- def simple_skip_keyword(self, keyword):
- self.protocol.lineReceived(_b("%s mcdonalds farm\n" % keyword))
- self.assertSkip(None)
-
- def test_simple_skip(self):
- self.simple_skip_keyword("skip")
-
- def test_simple_skip_colon(self):
- self.simple_skip_keyword("skip:")
-
- def test_skip_empty_message(self):
- self.protocol.lineReceived(_b("skip mcdonalds farm [\n"))
- self.protocol.lineReceived(_b("]\n"))
- self.assertSkip(_b(""))
-
- def skip_quoted_bracket(self, keyword):
- # This tests it is accepted, but cannot test it is used today, because
- # of not having a way to expose it in Python so far.
- self.protocol.lineReceived(_b("%s mcdonalds farm [\n" % keyword))
- self.protocol.lineReceived(_b(" ]\n"))
- self.protocol.lineReceived(_b("]\n"))
- self.assertSkip(_b("]\n"))
-
- def test_skip_quoted_bracket(self):
- self.skip_quoted_bracket("skip")
-
- def test_skip_colon_quoted_bracket(self):
- self.skip_quoted_bracket("skip:")
-
-
-class TestTestProtocolServerAddSuccess(unittest.TestCase):
-
- def setUp(self):
- self.client = ExtendedTestResult()
- self.protocol = subunit.TestProtocolServer(self.client)
- self.protocol.lineReceived(_b("test mcdonalds farm\n"))
- self.test = subunit.RemotedTestCase("mcdonalds farm")
-
- def simple_success_keyword(self, keyword):
- self.protocol.lineReceived(_b("%s mcdonalds farm\n" % keyword))
- self.assertEqual([
- ('startTest', self.test),
- ('addSuccess', self.test),
- ('stopTest', self.test),
- ], self.client._events)
-
- def test_simple_success(self):
- self.simple_success_keyword("successful")
-
- def test_simple_success_colon(self):
- self.simple_success_keyword("successful:")
-
- def assertSuccess(self, details):
- self.assertEqual([
- ('startTest', self.test),
- ('addSuccess', self.test, details),
- ('stopTest', self.test),
- ], self.client._events)
-
- def test_success_empty_message(self):
- self.protocol.lineReceived(_b("success mcdonalds farm [\n"))
- self.protocol.lineReceived(_b("]\n"))
- details = {}
- details['message'] = Content(ContentType("text", "plain"),
- lambda:[_b("")])
- self.assertSuccess(details)
-
- def success_quoted_bracket(self, keyword):
- # This tests it is accepted, but cannot test it is used today, because
- # of not having a way to expose it in Python so far.
- self.protocol.lineReceived(_b("%s mcdonalds farm [\n" % keyword))
- self.protocol.lineReceived(_b(" ]\n"))
- self.protocol.lineReceived(_b("]\n"))
- details = {}
- details['message'] = Content(ContentType("text", "plain"),
- lambda:[_b("]\n")])
- self.assertSuccess(details)
-
- def test_success_quoted_bracket(self):
- self.success_quoted_bracket("success")
-
- def test_success_colon_quoted_bracket(self):
- self.success_quoted_bracket("success:")
-
-
-class TestTestProtocolServerProgress(unittest.TestCase):
- """Test receipt of progress: directives."""
-
- def test_progress_accepted_stdlib(self):
- self.result = Python26TestResult()
- self.stream = BytesIO()
- self.protocol = subunit.TestProtocolServer(self.result,
- stream=self.stream)
- self.protocol.lineReceived(_b("progress: 23"))
- self.protocol.lineReceived(_b("progress: -2"))
- self.protocol.lineReceived(_b("progress: +4"))
- self.assertEqual(_b(""), self.stream.getvalue())
-
- def test_progress_accepted_extended(self):
- # With a progress capable TestResult, progress events are emitted.
- self.result = ExtendedTestResult()
- self.stream = BytesIO()
- self.protocol = subunit.TestProtocolServer(self.result,
- stream=self.stream)
- self.protocol.lineReceived(_b("progress: 23"))
- self.protocol.lineReceived(_b("progress: push"))
- self.protocol.lineReceived(_b("progress: -2"))
- self.protocol.lineReceived(_b("progress: pop"))
- self.protocol.lineReceived(_b("progress: +4"))
- self.assertEqual(_b(""), self.stream.getvalue())
- self.assertEqual([
- ('progress', 23, subunit.PROGRESS_SET),
- ('progress', None, subunit.PROGRESS_PUSH),
- ('progress', -2, subunit.PROGRESS_CUR),
- ('progress', None, subunit.PROGRESS_POP),
- ('progress', 4, subunit.PROGRESS_CUR),
- ], self.result._events)
-
-
-class TestTestProtocolServerStreamTags(unittest.TestCase):
- """Test managing tags on the protocol level."""
-
- def setUp(self):
- self.client = ExtendedTestResult()
- self.protocol = subunit.TestProtocolServer(self.client)
-
- def test_initial_tags(self):
- self.protocol.lineReceived(_b("tags: foo bar:baz quux\n"))
- self.assertEqual([
- ('tags', set(["foo", "bar:baz", "quux"]), set()),
- ], self.client._events)
-
- def test_minus_removes_tags(self):
- self.protocol.lineReceived(_b("tags: -bar quux\n"))
- self.assertEqual([
- ('tags', set(["quux"]), set(["bar"])),
- ], self.client._events)
-
- def test_tags_do_not_get_set_on_test(self):
- self.protocol.lineReceived(_b("test mcdonalds farm\n"))
- test = self.client._events[0][-1]
- self.assertEqual(None, getattr(test, 'tags', None))
-
- def test_tags_do_not_get_set_on_global_tags(self):
- self.protocol.lineReceived(_b("tags: foo bar\n"))
- self.protocol.lineReceived(_b("test mcdonalds farm\n"))
- test = self.client._events[-1][-1]
- self.assertEqual(None, getattr(test, 'tags', None))
-
- def test_tags_get_set_on_test_tags(self):
- self.protocol.lineReceived(_b("test mcdonalds farm\n"))
- test = self.client._events[-1][-1]
- self.protocol.lineReceived(_b("tags: foo bar\n"))
- self.protocol.lineReceived(_b("success mcdonalds farm\n"))
- self.assertEqual(None, getattr(test, 'tags', None))
-
-
-class TestTestProtocolServerStreamTime(unittest.TestCase):
- """Test managing time information at the protocol level."""
-
- def test_time_accepted_stdlib(self):
- self.result = Python26TestResult()
- self.stream = BytesIO()
- self.protocol = subunit.TestProtocolServer(self.result,
- stream=self.stream)
- self.protocol.lineReceived(_b("time: 2001-12-12 12:59:59Z\n"))
- self.assertEqual(_b(""), self.stream.getvalue())
-
- def test_time_accepted_extended(self):
- self.result = ExtendedTestResult()
- self.stream = BytesIO()
- self.protocol = subunit.TestProtocolServer(self.result,
- stream=self.stream)
- self.protocol.lineReceived(_b("time: 2001-12-12 12:59:59Z\n"))
- self.assertEqual(_b(""), self.stream.getvalue())
- self.assertEqual([
- ('time', datetime.datetime(2001, 12, 12, 12, 59, 59, 0,
- iso8601.Utc()))
- ], self.result._events)
-
-
-class TestRemotedTestCase(unittest.TestCase):
-
- def test_simple(self):
- test = subunit.RemotedTestCase("A test description")
- self.assertRaises(NotImplementedError, test.setUp)
- self.assertRaises(NotImplementedError, test.tearDown)
- self.assertEqual("A test description",
- test.shortDescription())
- self.assertEqual("A test description",
- test.id())
- self.assertEqual("A test description (subunit.RemotedTestCase)", "%s" % test)
- self.assertEqual("<subunit.RemotedTestCase description="
- "'A test description'>", "%r" % test)
- result = unittest.TestResult()
- test.run(result)
- self.assertEqual([(test, _remote_exception_str + ": "
- "Cannot run RemotedTestCases.\n\n")],
- result.errors)
- self.assertEqual(1, result.testsRun)
- another_test = subunit.RemotedTestCase("A test description")
- self.assertEqual(test, another_test)
- different_test = subunit.RemotedTestCase("ofo")
- self.assertNotEqual(test, different_test)
- self.assertNotEqual(another_test, different_test)
-
-
-class TestRemoteError(unittest.TestCase):
-
- def test_eq(self):
- error = subunit.RemoteError(_u("Something went wrong"))
- another_error = subunit.RemoteError(_u("Something went wrong"))
- different_error = subunit.RemoteError(_u("boo!"))
- self.assertEqual(error, another_error)
- self.assertNotEqual(error, different_error)
- self.assertNotEqual(different_error, another_error)
-
- def test_empty_constructor(self):
- self.assertEqual(subunit.RemoteError(), subunit.RemoteError(_u("")))
-
-
-class TestExecTestCase(unittest.TestCase):
-
- class SampleExecTestCase(subunit.ExecTestCase):
-
- def test_sample_method(self):
- """sample-script.py"""
- # the sample script runs three tests, one each
- # that fails, errors and succeeds
-
- def test_sample_method_args(self):
- """sample-script.py foo"""
- # sample that will run just one test.
-
- def test_construct(self):
- test = self.SampleExecTestCase("test_sample_method")
- self.assertEqual(test.script,
- subunit.join_dir(__file__, 'sample-script.py'))
-
- def test_args(self):
- result = unittest.TestResult()
- test = self.SampleExecTestCase("test_sample_method_args")
- test.run(result)
- self.assertEqual(1, result.testsRun)
-
- def test_run(self):
- result = ExtendedTestResult()
- test = self.SampleExecTestCase("test_sample_method")
- test.run(result)
- mcdonald = subunit.RemotedTestCase("old mcdonald")
- bing = subunit.RemotedTestCase("bing crosby")
- bing_details = {}
- bing_details['traceback'] = Content(ContentType("text", "x-traceback",
- {'charset': 'utf8'}), lambda:[_b("foo.c:53:ERROR invalid state\n")])
- an_error = subunit.RemotedTestCase("an error")
- error_details = {}
- self.assertEqual([
- ('startTest', mcdonald),
- ('addSuccess', mcdonald),
- ('stopTest', mcdonald),
- ('startTest', bing),
- ('addFailure', bing, bing_details),
- ('stopTest', bing),
- ('startTest', an_error),
- ('addError', an_error, error_details),
- ('stopTest', an_error),
- ], result._events)
-
- def test_debug(self):
- test = self.SampleExecTestCase("test_sample_method")
- test.debug()
-
- def test_count_test_cases(self):
- """TODO run the child process and count responses to determine the count."""
-
- def test_join_dir(self):
- sibling = subunit.join_dir(__file__, 'foo')
- filedir = os.path.abspath(os.path.dirname(__file__))
- expected = os.path.join(filedir, 'foo')
- self.assertEqual(sibling, expected)
-
-
-class DoExecTestCase(subunit.ExecTestCase):
-
- def test_working_script(self):
- """sample-two-script.py"""
-
-
-class TestIsolatedTestCase(TestCase):
-
- class SampleIsolatedTestCase(subunit.IsolatedTestCase):
-
- SETUP = False
- TEARDOWN = False
- TEST = False
-
- def setUp(self):
- TestIsolatedTestCase.SampleIsolatedTestCase.SETUP = True
-
- def tearDown(self):
- TestIsolatedTestCase.SampleIsolatedTestCase.TEARDOWN = True
-
- def test_sets_global_state(self):
- TestIsolatedTestCase.SampleIsolatedTestCase.TEST = True
-
-
- def test_construct(self):
- self.SampleIsolatedTestCase("test_sets_global_state")
-
- @skipIf(os.name != "posix", "Need a posix system for forking tests")
- def test_run(self):
- result = unittest.TestResult()
- test = self.SampleIsolatedTestCase("test_sets_global_state")
- test.run(result)
- self.assertEqual(result.testsRun, 1)
- self.assertEqual(self.SampleIsolatedTestCase.SETUP, False)
- self.assertEqual(self.SampleIsolatedTestCase.TEARDOWN, False)
- self.assertEqual(self.SampleIsolatedTestCase.TEST, False)
-
- def test_debug(self):
- pass
- #test = self.SampleExecTestCase("test_sample_method")
- #test.debug()
-
-
-class TestIsolatedTestSuite(TestCase):
-
- class SampleTestToIsolate(unittest.TestCase):
-
- SETUP = False
- TEARDOWN = False
- TEST = False
-
- def setUp(self):
- TestIsolatedTestSuite.SampleTestToIsolate.SETUP = True
-
- def tearDown(self):
- TestIsolatedTestSuite.SampleTestToIsolate.TEARDOWN = True
-
- def test_sets_global_state(self):
- TestIsolatedTestSuite.SampleTestToIsolate.TEST = True
-
-
- def test_construct(self):
- subunit.IsolatedTestSuite()
-
- @skipIf(os.name != "posix", "Need a posix system for forking tests")
- def test_run(self):
- result = unittest.TestResult()
- suite = subunit.IsolatedTestSuite()
- sub_suite = unittest.TestSuite()
- sub_suite.addTest(self.SampleTestToIsolate("test_sets_global_state"))
- sub_suite.addTest(self.SampleTestToIsolate("test_sets_global_state"))
- suite.addTest(sub_suite)
- suite.addTest(self.SampleTestToIsolate("test_sets_global_state"))
- suite.run(result)
- self.assertEqual(result.testsRun, 3)
- self.assertEqual(self.SampleTestToIsolate.SETUP, False)
- self.assertEqual(self.SampleTestToIsolate.TEARDOWN, False)
- self.assertEqual(self.SampleTestToIsolate.TEST, False)
-
-
-class TestTestProtocolClient(unittest.TestCase):
-
- def setUp(self):
- self.io = BytesIO()
- self.protocol = subunit.TestProtocolClient(self.io)
- self.unicode_test = PlaceHolder(_u('\u2603'))
- self.test = TestTestProtocolClient("test_start_test")
- self.sample_details = {'something':Content(
- ContentType('text', 'plain'), lambda:[_b('serialised\nform')])}
- self.sample_tb_details = dict(self.sample_details)
- self.sample_tb_details['traceback'] = TracebackContent(
- subunit.RemoteError(_u("boo qux")), self.test)
-
- def test_start_test(self):
- """Test startTest on a TestProtocolClient."""
- self.protocol.startTest(self.test)
- self.assertEqual(self.io.getvalue(), _b("test: %s\n" % self.test.id()))
-
- def test_start_test_unicode_id(self):
- """Test startTest on a TestProtocolClient."""
- self.protocol.startTest(self.unicode_test)
- expected = _b("test: ") + _u('\u2603').encode('utf8') + _b("\n")
- self.assertEqual(expected, self.io.getvalue())
-
- def test_stop_test(self):
- # stopTest doesn't output anything.
- self.protocol.stopTest(self.test)
- self.assertEqual(self.io.getvalue(), _b(""))
-
- def test_add_success(self):
- """Test addSuccess on a TestProtocolClient."""
- self.protocol.addSuccess(self.test)
- self.assertEqual(
- self.io.getvalue(), _b("successful: %s\n" % self.test.id()))
-
- def test_add_outcome_unicode_id(self):
- """Test addSuccess on a TestProtocolClient."""
- self.protocol.addSuccess(self.unicode_test)
- expected = _b("successful: ") + _u('\u2603').encode('utf8') + _b("\n")
- self.assertEqual(expected, self.io.getvalue())
-
- def test_add_success_details(self):
- """Test addSuccess on a TestProtocolClient with details."""
- self.protocol.addSuccess(self.test, details=self.sample_details)
- self.assertEqual(
- self.io.getvalue(), _b("successful: %s [ multipart\n"
- "Content-Type: text/plain\n"
- "something\n"
- "F\r\nserialised\nform0\r\n]\n" % self.test.id()))
-
- def test_add_failure(self):
- """Test addFailure on a TestProtocolClient."""
- self.protocol.addFailure(
- self.test, subunit.RemoteError(_u("boo qux")))
- self.assertEqual(
- self.io.getvalue(),
- _b(('failure: %s [\n' + _remote_exception_str + ': boo qux\n]\n')
- % self.test.id()))
-
- def test_add_failure_details(self):
- """Test addFailure on a TestProtocolClient with details."""
- self.protocol.addFailure(
- self.test, details=self.sample_tb_details)
- self.assertEqual(
- self.io.getvalue(),
- _b(("failure: %s [ multipart\n"
- "Content-Type: text/plain\n"
- "something\n"
- "F\r\nserialised\nform0\r\n"
- "Content-Type: text/x-traceback;charset=utf8,language=python\n"
- "traceback\n" + _remote_exception_str_chunked + ": boo qux\n0\r\n"
- "]\n") % self.test.id()))
-
- def test_add_error(self):
- """Test stopTest on a TestProtocolClient."""
- self.protocol.addError(
- self.test, subunit.RemoteError(_u("phwoar crikey")))
- self.assertEqual(
- self.io.getvalue(),
- _b(('error: %s [\n' +
- _remote_exception_str + ": phwoar crikey\n"
- "]\n") % self.test.id()))
-
- def test_add_error_details(self):
- """Test stopTest on a TestProtocolClient with details."""
- self.protocol.addError(
- self.test, details=self.sample_tb_details)
- self.assertEqual(
- self.io.getvalue(),
- _b(("error: %s [ multipart\n"
- "Content-Type: text/plain\n"
- "something\n"
- "F\r\nserialised\nform0\r\n"
- "Content-Type: text/x-traceback;charset=utf8,language=python\n"
- "traceback\n" + _remote_exception_str_chunked + ": boo qux\n0\r\n"
- "]\n") % self.test.id()))
-
- def test_add_expected_failure(self):
- """Test addExpectedFailure on a TestProtocolClient."""
- self.protocol.addExpectedFailure(
- self.test, subunit.RemoteError(_u("phwoar crikey")))
- self.assertEqual(
- self.io.getvalue(),
- _b(('xfail: %s [\n' +
- _remote_exception_str + ": phwoar crikey\n"
- "]\n") % self.test.id()))
-
- def test_add_expected_failure_details(self):
- """Test addExpectedFailure on a TestProtocolClient with details."""
- self.protocol.addExpectedFailure(
- self.test, details=self.sample_tb_details)
- self.assertEqual(
- self.io.getvalue(),
- _b(("xfail: %s [ multipart\n"
- "Content-Type: text/plain\n"
- "something\n"
- "F\r\nserialised\nform0\r\n"
- "Content-Type: text/x-traceback;charset=utf8,language=python\n"
- "traceback\n" + _remote_exception_str_chunked + ": boo qux\n0\r\n"
- "]\n") % self.test.id()))
-
-
- def test_add_skip(self):
- """Test addSkip on a TestProtocolClient."""
- self.protocol.addSkip(
- self.test, "Has it really?")
- self.assertEqual(
- self.io.getvalue(),
- _b('skip: %s [\nHas it really?\n]\n' % self.test.id()))
-
- def test_add_skip_details(self):
- """Test addSkip on a TestProtocolClient with details."""
- details = {'reason':Content(
- ContentType('text', 'plain'), lambda:[_b('Has it really?')])}
- self.protocol.addSkip(self.test, details=details)
- self.assertEqual(
- self.io.getvalue(),
- _b("skip: %s [ multipart\n"
- "Content-Type: text/plain\n"
- "reason\n"
- "E\r\nHas it really?0\r\n"
- "]\n" % self.test.id()))
-
- def test_progress_set(self):
- self.protocol.progress(23, subunit.PROGRESS_SET)
- self.assertEqual(self.io.getvalue(), _b('progress: 23\n'))
-
- def test_progress_neg_cur(self):
- self.protocol.progress(-23, subunit.PROGRESS_CUR)
- self.assertEqual(self.io.getvalue(), _b('progress: -23\n'))
-
- def test_progress_pos_cur(self):
- self.protocol.progress(23, subunit.PROGRESS_CUR)
- self.assertEqual(self.io.getvalue(), _b('progress: +23\n'))
-
- def test_progress_pop(self):
- self.protocol.progress(1234, subunit.PROGRESS_POP)
- self.assertEqual(self.io.getvalue(), _b('progress: pop\n'))
-
- def test_progress_push(self):
- self.protocol.progress(1234, subunit.PROGRESS_PUSH)
- self.assertEqual(self.io.getvalue(), _b('progress: push\n'))
-
- def test_time(self):
- # Calling time() outputs a time signal immediately.
- self.protocol.time(
- datetime.datetime(2009,10,11,12,13,14,15, iso8601.Utc()))
- self.assertEqual(
- _b("time: 2009-10-11 12:13:14.000015Z\n"),
- self.io.getvalue())
-
- def test_add_unexpected_success(self):
- """Test addUnexpectedSuccess on a TestProtocolClient."""
- self.protocol.addUnexpectedSuccess(self.test)
- self.assertEqual(
- self.io.getvalue(), _b("uxsuccess: %s\n" % self.test.id()))
-
- def test_add_unexpected_success_details(self):
- """Test addUnexpectedSuccess on a TestProtocolClient with details."""
- self.protocol.addUnexpectedSuccess(self.test, details=self.sample_details)
- self.assertEqual(
- self.io.getvalue(), _b("uxsuccess: %s [ multipart\n"
- "Content-Type: text/plain\n"
- "something\n"
- "F\r\nserialised\nform0\r\n]\n" % self.test.id()))
-
- def test_tags_empty(self):
- self.protocol.tags(set(), set())
- self.assertEqual(_b(""), self.io.getvalue())
-
- def test_tags_add(self):
- self.protocol.tags(set(['foo']), set())
- self.assertEqual(_b("tags: foo\n"), self.io.getvalue())
-
- def test_tags_both(self):
- self.protocol.tags(set(['quux']), set(['bar']))
- self.assertEqual(_b("tags: quux -bar\n"), self.io.getvalue())
-
- def test_tags_gone(self):
- self.protocol.tags(set(), set(['bar']))
- self.assertEqual(_b("tags: -bar\n"), self.io.getvalue())
-
-
-def test_suite():
- loader = subunit.tests.TestUtil.TestLoader()
- result = loader.loadTestsFromName(__name__)
- return result
diff --git a/lib/subunit/python/subunit/tests/test_test_results.py b/lib/subunit/python/subunit/tests/test_test_results.py
deleted file mode 100644
index ff74b9a818..0000000000
--- a/lib/subunit/python/subunit/tests/test_test_results.py
+++ /dev/null
@@ -1,572 +0,0 @@
-#
-# subunit: extensions to Python unittest to get test results from subprocesses.
-# Copyright (C) 2009 Robert Collins <robertc@robertcollins.net>
-#
-# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-# license at the users choice. A copy of both licenses are available in the
-# project source as Apache-2.0 and BSD. You may not use this file except in
-# compliance with one of these two licences.
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# license you chose for the specific language governing permissions and
-# limitations under that license.
-#
-
-import csv
-import datetime
-import sys
-import unittest
-
-from testtools import TestCase
-from testtools.compat import StringIO
-from testtools.content import (
- text_content,
- TracebackContent,
- )
-from testtools.testresult.doubles import ExtendedTestResult
-
-import subunit
-import subunit.iso8601 as iso8601
-import subunit.test_results
-
-import testtools
-
-
-class LoggingDecorator(subunit.test_results.HookedTestResultDecorator):
-
- def __init__(self, decorated):
- self._calls = 0
- super(LoggingDecorator, self).__init__(decorated)
-
- def _before_event(self):
- self._calls += 1
-
-
-class AssertBeforeTestResult(LoggingDecorator):
- """A TestResult for checking preconditions."""
-
- def __init__(self, decorated, test):
- self.test = test
- super(AssertBeforeTestResult, self).__init__(decorated)
-
- def _before_event(self):
- self.test.assertEqual(1, self.earlier._calls)
- super(AssertBeforeTestResult, self)._before_event()
-
-
-class TimeCapturingResult(unittest.TestResult):
-
- def __init__(self):
- super(TimeCapturingResult, self).__init__()
- self._calls = []
- self.failfast = False
-
- def time(self, a_datetime):
- self._calls.append(a_datetime)
-
-
-class TestHookedTestResultDecorator(unittest.TestCase):
-
- def setUp(self):
- # An end to the chain
- terminal = unittest.TestResult()
- # Asserts that the call was made to self.result before asserter was
- # called.
- asserter = AssertBeforeTestResult(terminal, self)
- # The result object we call, which much increase its call count.
- self.result = LoggingDecorator(asserter)
- asserter.earlier = self.result
- self.decorated = asserter
-
- def tearDown(self):
- # The hook in self.result must have been called
- self.assertEqual(1, self.result._calls)
- # The hook in asserter must have been called too, otherwise the
- # assertion about ordering won't have completed.
- self.assertEqual(1, self.decorated._calls)
-
- def test_startTest(self):
- self.result.startTest(self)
-
- def test_startTestRun(self):
- self.result.startTestRun()
-
- def test_stopTest(self):
- self.result.stopTest(self)
-
- def test_stopTestRun(self):
- self.result.stopTestRun()
-
- def test_addError(self):
- self.result.addError(self, subunit.RemoteError())
-
- def test_addError_details(self):
- self.result.addError(self, details={})
-
- def test_addFailure(self):
- self.result.addFailure(self, subunit.RemoteError())
-
- def test_addFailure_details(self):
- self.result.addFailure(self, details={})
-
- def test_addSuccess(self):
- self.result.addSuccess(self)
-
- def test_addSuccess_details(self):
- self.result.addSuccess(self, details={})
-
- def test_addSkip(self):
- self.result.addSkip(self, "foo")
-
- def test_addSkip_details(self):
- self.result.addSkip(self, details={})
-
- def test_addExpectedFailure(self):
- self.result.addExpectedFailure(self, subunit.RemoteError())
-
- def test_addExpectedFailure_details(self):
- self.result.addExpectedFailure(self, details={})
-
- def test_addUnexpectedSuccess(self):
- self.result.addUnexpectedSuccess(self)
-
- def test_addUnexpectedSuccess_details(self):
- self.result.addUnexpectedSuccess(self, details={})
-
- def test_progress(self):
- self.result.progress(1, subunit.PROGRESS_SET)
-
- def test_wasSuccessful(self):
- self.result.wasSuccessful()
-
- def test_shouldStop(self):
- self.result.shouldStop
-
- def test_stop(self):
- self.result.stop()
-
- def test_time(self):
- self.result.time(None)
-
-
-class TestAutoTimingTestResultDecorator(unittest.TestCase):
-
- def setUp(self):
- # And end to the chain which captures time events.
- terminal = TimeCapturingResult()
- # The result object under test.
- self.result = subunit.test_results.AutoTimingTestResultDecorator(
- terminal)
- self.decorated = terminal
-
- def test_without_time_calls_time_is_called_and_not_None(self):
- self.result.startTest(self)
- self.assertEqual(1, len(self.decorated._calls))
- self.assertNotEqual(None, self.decorated._calls[0])
-
- def test_no_time_from_progress(self):
- self.result.progress(1, subunit.PROGRESS_CUR)
- self.assertEqual(0, len(self.decorated._calls))
-
- def test_no_time_from_shouldStop(self):
- self.decorated.stop()
- self.result.shouldStop
- self.assertEqual(0, len(self.decorated._calls))
-
- def test_calling_time_inhibits_automatic_time(self):
- # Calling time() outputs a time signal immediately and prevents
- # automatically adding one when other methods are called.
- time = datetime.datetime(2009,10,11,12,13,14,15, iso8601.Utc())
- self.result.time(time)
- self.result.startTest(self)
- self.result.stopTest(self)
- self.assertEqual(1, len(self.decorated._calls))
- self.assertEqual(time, self.decorated._calls[0])
-
- def test_calling_time_None_enables_automatic_time(self):
- time = datetime.datetime(2009,10,11,12,13,14,15, iso8601.Utc())
- self.result.time(time)
- self.assertEqual(1, len(self.decorated._calls))
- self.assertEqual(time, self.decorated._calls[0])
- # Calling None passes the None through, in case other results care.
- self.result.time(None)
- self.assertEqual(2, len(self.decorated._calls))
- self.assertEqual(None, self.decorated._calls[1])
- # Calling other methods doesn't generate an automatic time event.
- self.result.startTest(self)
- self.assertEqual(3, len(self.decorated._calls))
- self.assertNotEqual(None, self.decorated._calls[2])
-
- def test_set_failfast_True(self):
- self.assertFalse(self.decorated.failfast)
- self.result.failfast = True
- self.assertTrue(self.decorated.failfast)
-
-
-class TestTagCollapsingDecorator(TestCase):
-
- def test_tags_collapsed_outside_of_tests(self):
- result = ExtendedTestResult()
- tag_collapser = subunit.test_results.TagCollapsingDecorator(result)
- tag_collapser.tags(set(['a']), set())
- tag_collapser.tags(set(['b']), set())
- tag_collapser.startTest(self)
- self.assertEquals(
- [('tags', set(['a', 'b']), set([])),
- ('startTest', self),
- ], result._events)
-
- def test_tags_collapsed_outside_of_tests_are_flushed(self):
- result = ExtendedTestResult()
- tag_collapser = subunit.test_results.TagCollapsingDecorator(result)
- tag_collapser.startTestRun()
- tag_collapser.tags(set(['a']), set())
- tag_collapser.tags(set(['b']), set())
- tag_collapser.startTest(self)
- tag_collapser.addSuccess(self)
- tag_collapser.stopTest(self)
- tag_collapser.stopTestRun()
- self.assertEquals(
- [('startTestRun',),
- ('tags', set(['a', 'b']), set([])),
- ('startTest', self),
- ('addSuccess', self),
- ('stopTest', self),
- ('stopTestRun',),
- ], result._events)
-
- def test_tags_forwarded_after_tests(self):
- test = subunit.RemotedTestCase('foo')
- result = ExtendedTestResult()
- tag_collapser = subunit.test_results.TagCollapsingDecorator(result)
- tag_collapser.startTestRun()
- tag_collapser.startTest(test)
- tag_collapser.addSuccess(test)
- tag_collapser.stopTest(test)
- tag_collapser.tags(set(['a']), set(['b']))
- tag_collapser.stopTestRun()
- self.assertEqual(
- [('startTestRun',),
- ('startTest', test),
- ('addSuccess', test),
- ('stopTest', test),
- ('tags', set(['a']), set(['b'])),
- ('stopTestRun',),
- ],
- result._events)
-
- def test_tags_collapsed_inside_of_tests(self):
- result = ExtendedTestResult()
- tag_collapser = subunit.test_results.TagCollapsingDecorator(result)
- test = subunit.RemotedTestCase('foo')
- tag_collapser.startTest(test)
- tag_collapser.tags(set(['a']), set())
- tag_collapser.tags(set(['b']), set(['a']))
- tag_collapser.tags(set(['c']), set())
- tag_collapser.stopTest(test)
- self.assertEquals(
- [('startTest', test),
- ('tags', set(['b', 'c']), set(['a'])),
- ('stopTest', test)],
- result._events)
-
- def test_tags_collapsed_inside_of_tests_different_ordering(self):
- result = ExtendedTestResult()
- tag_collapser = subunit.test_results.TagCollapsingDecorator(result)
- test = subunit.RemotedTestCase('foo')
- tag_collapser.startTest(test)
- tag_collapser.tags(set(), set(['a']))
- tag_collapser.tags(set(['a', 'b']), set())
- tag_collapser.tags(set(['c']), set())
- tag_collapser.stopTest(test)
- self.assertEquals(
- [('startTest', test),
- ('tags', set(['a', 'b', 'c']), set()),
- ('stopTest', test)],
- result._events)
-
- def test_tags_sent_before_result(self):
- # Because addSuccess and friends tend to send subunit output
- # immediately, and because 'tags:' before a result line means
- # something different to 'tags:' after a result line, we need to be
- # sure that tags are emitted before 'addSuccess' (or whatever).
- result = ExtendedTestResult()
- tag_collapser = subunit.test_results.TagCollapsingDecorator(result)
- test = subunit.RemotedTestCase('foo')
- tag_collapser.startTest(test)
- tag_collapser.tags(set(['a']), set())
- tag_collapser.addSuccess(test)
- tag_collapser.stopTest(test)
- self.assertEquals(
- [('startTest', test),
- ('tags', set(['a']), set()),
- ('addSuccess', test),
- ('stopTest', test)],
- result._events)
-
-
-class TestTimeCollapsingDecorator(TestCase):
-
- def make_time(self):
- # Heh heh.
- return datetime.datetime(
- 2000, 1, self.getUniqueInteger(), tzinfo=iso8601.UTC)
-
- def test_initial_time_forwarded(self):
- # We always forward the first time event we see.
- result = ExtendedTestResult()
- tag_collapser = subunit.test_results.TimeCollapsingDecorator(result)
- a_time = self.make_time()
- tag_collapser.time(a_time)
- self.assertEquals([('time', a_time)], result._events)
-
- def test_time_collapsed_to_first_and_last(self):
- # If there are many consecutive time events, only the first and last
- # are sent through.
- result = ExtendedTestResult()
- tag_collapser = subunit.test_results.TimeCollapsingDecorator(result)
- times = [self.make_time() for i in range(5)]
- for a_time in times:
- tag_collapser.time(a_time)
- tag_collapser.startTest(subunit.RemotedTestCase('foo'))
- self.assertEquals(
- [('time', times[0]), ('time', times[-1])], result._events[:-1])
-
- def test_only_one_time_sent(self):
- # If we receive a single time event followed by a non-time event, we
- # send exactly one time event.
- result = ExtendedTestResult()
- tag_collapser = subunit.test_results.TimeCollapsingDecorator(result)
- a_time = self.make_time()
- tag_collapser.time(a_time)
- tag_collapser.startTest(subunit.RemotedTestCase('foo'))
- self.assertEquals([('time', a_time)], result._events[:-1])
-
- def test_duplicate_times_not_sent(self):
- # Many time events with the exact same time are collapsed into one
- # time event.
- result = ExtendedTestResult()
- tag_collapser = subunit.test_results.TimeCollapsingDecorator(result)
- a_time = self.make_time()
- for i in range(5):
- tag_collapser.time(a_time)
- tag_collapser.startTest(subunit.RemotedTestCase('foo'))
- self.assertEquals([('time', a_time)], result._events[:-1])
-
- def test_no_times_inserted(self):
- result = ExtendedTestResult()
- tag_collapser = subunit.test_results.TimeCollapsingDecorator(result)
- a_time = self.make_time()
- tag_collapser.time(a_time)
- foo = subunit.RemotedTestCase('foo')
- tag_collapser.startTest(foo)
- tag_collapser.addSuccess(foo)
- tag_collapser.stopTest(foo)
- self.assertEquals(
- [('time', a_time),
- ('startTest', foo),
- ('addSuccess', foo),
- ('stopTest', foo)], result._events)
-
-
-class TestByTestResultTests(testtools.TestCase):
-
- def setUp(self):
- super(TestByTestResultTests, self).setUp()
- self.log = []
- self.result = subunit.test_results.TestByTestResult(self.on_test)
- if sys.version_info >= (3, 0):
- self.result._now = iter(range(5)).__next__
- else:
- self.result._now = iter(range(5)).next
-
- def assertCalled(self, **kwargs):
- defaults = {
- 'test': self,
- 'tags': set(),
- 'details': None,
- 'start_time': 0,
- 'stop_time': 1,
- }
- defaults.update(kwargs)
- self.assertEqual([defaults], self.log)
-
- def on_test(self, **kwargs):
- self.log.append(kwargs)
-
- def test_no_tests_nothing_reported(self):
- self.result.startTestRun()
- self.result.stopTestRun()
- self.assertEqual([], self.log)
-
- def test_add_success(self):
- self.result.startTest(self)
- self.result.addSuccess(self)
- self.result.stopTest(self)
- self.assertCalled(status='success')
-
- def test_add_success_details(self):
- self.result.startTest(self)
- details = {'foo': 'bar'}
- self.result.addSuccess(self, details=details)
- self.result.stopTest(self)
- self.assertCalled(status='success', details=details)
-
- def test_tags(self):
- if not getattr(self.result, 'tags', None):
- self.skipTest("No tags in testtools")
- self.result.tags(['foo'], [])
- self.result.startTest(self)
- self.result.addSuccess(self)
- self.result.stopTest(self)
- self.assertCalled(status='success', tags=set(['foo']))
-
- def test_add_error(self):
- self.result.startTest(self)
- try:
- 1/0
- except ZeroDivisionError:
- error = sys.exc_info()
- self.result.addError(self, error)
- self.result.stopTest(self)
- self.assertCalled(
- status='error',
- details={'traceback': TracebackContent(error, self)})
-
- def test_add_error_details(self):
- self.result.startTest(self)
- details = {"foo": text_content("bar")}
- self.result.addError(self, details=details)
- self.result.stopTest(self)
- self.assertCalled(status='error', details=details)
-
- def test_add_failure(self):
- self.result.startTest(self)
- try:
- self.fail("intentional failure")
- except self.failureException:
- failure = sys.exc_info()
- self.result.addFailure(self, failure)
- self.result.stopTest(self)
- self.assertCalled(
- status='failure',
- details={'traceback': TracebackContent(failure, self)})
-
- def test_add_failure_details(self):
- self.result.startTest(self)
- details = {"foo": text_content("bar")}
- self.result.addFailure(self, details=details)
- self.result.stopTest(self)
- self.assertCalled(status='failure', details=details)
-
- def test_add_xfail(self):
- self.result.startTest(self)
- try:
- 1/0
- except ZeroDivisionError:
- error = sys.exc_info()
- self.result.addExpectedFailure(self, error)
- self.result.stopTest(self)
- self.assertCalled(
- status='xfail',
- details={'traceback': TracebackContent(error, self)})
-
- def test_add_xfail_details(self):
- self.result.startTest(self)
- details = {"foo": text_content("bar")}
- self.result.addExpectedFailure(self, details=details)
- self.result.stopTest(self)
- self.assertCalled(status='xfail', details=details)
-
- def test_add_unexpected_success(self):
- self.result.startTest(self)
- details = {'foo': 'bar'}
- self.result.addUnexpectedSuccess(self, details=details)
- self.result.stopTest(self)
- self.assertCalled(status='success', details=details)
-
- def test_add_skip_reason(self):
- self.result.startTest(self)
- reason = self.getUniqueString()
- self.result.addSkip(self, reason)
- self.result.stopTest(self)
- self.assertCalled(
- status='skip', details={'reason': text_content(reason)})
-
- def test_add_skip_details(self):
- self.result.startTest(self)
- details = {'foo': 'bar'}
- self.result.addSkip(self, details=details)
- self.result.stopTest(self)
- self.assertCalled(status='skip', details=details)
-
- def test_twice(self):
- self.result.startTest(self)
- self.result.addSuccess(self, details={'foo': 'bar'})
- self.result.stopTest(self)
- self.result.startTest(self)
- self.result.addSuccess(self)
- self.result.stopTest(self)
- self.assertEqual(
- [{'test': self,
- 'status': 'success',
- 'start_time': 0,
- 'stop_time': 1,
- 'tags': set(),
- 'details': {'foo': 'bar'}},
- {'test': self,
- 'status': 'success',
- 'start_time': 2,
- 'stop_time': 3,
- 'tags': set(),
- 'details': None},
- ],
- self.log)
-
-
-class TestCsvResult(testtools.TestCase):
-
- def parse_stream(self, stream):
- stream.seek(0)
- reader = csv.reader(stream)
- return list(reader)
-
- def test_csv_output(self):
- stream = StringIO()
- result = subunit.test_results.CsvResult(stream)
- if sys.version_info >= (3, 0):
- result._now = iter(range(5)).__next__
- else:
- result._now = iter(range(5)).next
- result.startTestRun()
- result.startTest(self)
- result.addSuccess(self)
- result.stopTest(self)
- result.stopTestRun()
- self.assertEqual(
- [['test', 'status', 'start_time', 'stop_time'],
- [self.id(), 'success', '0', '1'],
- ],
- self.parse_stream(stream))
-
- def test_just_header_when_no_tests(self):
- stream = StringIO()
- result = subunit.test_results.CsvResult(stream)
- result.startTestRun()
- result.stopTestRun()
- self.assertEqual(
- [['test', 'status', 'start_time', 'stop_time']],
- self.parse_stream(stream))
-
- def test_no_output_before_events(self):
- stream = StringIO()
- subunit.test_results.CsvResult(stream)
- self.assertEqual([], self.parse_stream(stream))
-
-
-def test_suite():
- loader = subunit.tests.TestUtil.TestLoader()
- result = loader.loadTestsFromName(__name__)
- return result
diff --git a/lib/subunit/runtests.py b/lib/subunit/runtests.py
deleted file mode 100755
index 8ecc6cd3fb..0000000000
--- a/lib/subunit/runtests.py
+++ /dev/null
@@ -1,138 +0,0 @@
-#!/usr/bin/env python
-# -*- Mode: python -*-
-#
-# Copyright (C) 2004 Canonical.com
-# Author: Robert Collins <robert.collins@canonical.com>
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-#
-
-import unittest
-from subunit.tests.TestUtil import TestVisitor, TestSuite
-import subunit
-import sys
-import os
-import shutil
-import logging
-
-class ParameterisableTextTestRunner(unittest.TextTestRunner):
- """I am a TextTestRunner whose result class is
- parameterisable without further subclassing"""
- def __init__(self, **args):
- unittest.TextTestRunner.__init__(self, **args)
- self._resultFactory=None
- def resultFactory(self, *args):
- """set or retrieve the result factory"""
- if args:
- self._resultFactory=args[0]
- return self
- if self._resultFactory is None:
- self._resultFactory=unittest._TextTestResult
- return self._resultFactory
-
- def _makeResult(self):
- return self.resultFactory()(self.stream, self.descriptions, self.verbosity)
-
-
-class EarlyStoppingTextTestResult(unittest._TextTestResult):
- """I am a TextTestResult that can optionally stop at the first failure
- or error"""
-
- def addError(self, test, err):
- unittest._TextTestResult.addError(self, test, err)
- if self.stopOnError():
- self.stop()
-
- def addFailure(self, test, err):
- unittest._TextTestResult.addError(self, test, err)
- if self.stopOnFailure():
- self.stop()
-
- def stopOnError(self, *args):
- """should this result indicate an abort when an error occurs?
- TODO parameterise this"""
- return True
-
- def stopOnFailure(self, *args):
- """should this result indicate an abort when a failure error occurs?
- TODO parameterise this"""
- return True
-
-
-def earlyStopFactory(*args, **kwargs):
- """return a an early stopping text test result"""
- result=EarlyStoppingTextTestResult(*args, **kwargs)
- return result
-
-
-class ShellTests(subunit.ExecTestCase):
-
- def test_sourcing(self):
- """./shell/tests/test_source_library.sh"""
-
- def test_functions(self):
- """./shell/tests/test_function_output.sh"""
-
-
-def test_suite():
- result = TestSuite()
- result.addTest(subunit.test_suite())
- result.addTest(ShellTests('test_sourcing'))
- result.addTest(ShellTests('test_functions'))
- return result
-
-
-class filteringVisitor(TestVisitor):
- """I accrue all the testCases I visit that pass a regexp filter on id
- into my suite
- """
-
- def __init__(self, filter):
- import re
- TestVisitor.__init__(self)
- self._suite=None
- self.filter=re.compile(filter)
-
- def suite(self):
- """answer the suite we are building"""
- if self._suite is None:
- self._suite=TestSuite()
- return self._suite
-
- def visitCase(self, aCase):
- if self.filter.match(aCase.id()):
- self.suite().addTest(aCase)
-
-
-def main(argv):
- """To parameterise what tests are run, run this script like so:
- python test_all.py REGEX
- i.e.
- python test_all.py .*Protocol.*
- to run all tests with Protocol in their id."""
- if len(argv) > 1:
- pattern = argv[1]
- else:
- pattern = ".*"
- visitor = filteringVisitor(pattern)
- test_suite().visit(visitor)
- runner = ParameterisableTextTestRunner(verbosity=2)
- runner.resultFactory(unittest._TextTestResult)
- if not runner.run(visitor.suite()).wasSuccessful():
- return 1
- return 0
-
-if __name__ == '__main__':
- sys.exit(main(sys.argv))
diff --git a/lib/subunit/setup.py b/lib/subunit/setup.py
deleted file mode 100755
index 1a0b192b1b..0000000000
--- a/lib/subunit/setup.py
+++ /dev/null
@@ -1,63 +0,0 @@
-#!/usr/bin/env python
-try:
- # If the user has setuptools / distribute installed, use it
- from setuptools import setup
-except ImportError:
- # Otherwise, fall back to distutils.
- from distutils.core import setup
- extra = {}
-else:
- extra = {
- 'install_requires': [
- 'testtools>=0.9.23',
- ]
- }
-
-
-def _get_version_from_file(filename, start_of_line, split_marker):
- """Extract version from file, giving last matching value or None"""
- try:
- return [x for x in open(filename)
- if x.startswith(start_of_line)][-1].split(split_marker)[1].strip()
- except (IOError, IndexError):
- return None
-
-
-VERSION = (
- # Assume we are in a distribution, which has PKG-INFO
- _get_version_from_file('PKG-INFO', 'Version:', ':')
- # Must be a development checkout, so use the Makefile
- or _get_version_from_file('Makefile', 'VERSION', '=')
- or "0.0")
-
-
-setup(
- name='python-subunit',
- version=VERSION,
- description=('Python implementation of subunit test streaming protocol'),
- long_description=open('README').read(),
- classifiers=[
- 'Intended Audience :: Developers',
- 'Programming Language :: Python :: 3',
- 'Programming Language :: Python',
- 'Topic :: Software Development :: Testing',
- ],
- keywords='python test streaming',
- author='Robert Collins',
- author_email='subunit-dev@lists.launchpad.net',
- url='http://launchpad.net/subunit',
- packages=['subunit', 'subunit.tests'],
- package_dir={'subunit': 'python/subunit'},
- scripts = [
- 'filters/subunit2gtk',
- 'filters/subunit2junitxml',
- 'filters/subunit2pyunit',
- 'filters/subunit-filter',
- 'filters/subunit-ls',
- 'filters/subunit-notify',
- 'filters/subunit-stats',
- 'filters/subunit-tags',
- 'filters/tap2subunit',
- ],
- **extra
-)
diff --git a/lib/subunit/shell/README b/lib/subunit/shell/README
deleted file mode 100644
index af894a2bd3..0000000000
--- a/lib/subunit/shell/README
+++ /dev/null
@@ -1,62 +0,0 @@
-#
-# subunit shell bindings.
-# Copyright (C) 2006 Robert Collins <robertc@robertcollins.net>
-#
-# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-# license at the users choice. A copy of both licenses are available in the
-# project source as Apache-2.0 and BSD. You may not use this file except in
-# compliance with one of these two licences.
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# license you chose for the specific language governing permissions and
-# limitations under that license.
-#
-
-This tree contains shell bindings to the subunit protocol. They are written
-entirely in shell, and unit tested in shell. See the tests/ directory for the
-test scripts. You can use `make check` to run the tests. There is a trivial
-python test_shell.py which uses the pyunit gui to expose the test results in a
-compact form.
-
-The shell bindings consist of four functions which you can use to output test
-metadata trivially. See share/subunit.sh for the functions and comments.
-
-However, this is not a full test environment, its support code for reporting to
-subunit. You can look at ShUnit (http://shunit.sourceforge.net) for 'proper'
-shell based xUnit functionality. There is a patch for ShUnit 1.3
-(subunit-ui.patch) in the subunit source tree. I hope to have that integrated
-upstream in the near future. I will delete the copy of the patch in the subunit
-tree a release or two later.
-
-If you are a test environment maintainer - either homegrown, or ShUnit or some
-such, you will need to see how the subunit calls should be used. Here is what
-a manually written test using the bindings might look like:
-
-
-subunit_start_test "test name"
-# determine if test passes or fails
-result=$(something)
-if [ $result == 0 ]; then
- subunit_pass_test "test name"
-else
- subunit_fail_test "test name" <<END
-Something went wrong running something:
-exited with result: '$func_status'
-END
-fi
-
-Which when run with a subunit test runner will generate something like:
-test name ... ok
-
-on success, and:
-
-test name ... FAIL
-
-======================================================================
-FAIL: test name
-----------------------------------------------------------------------
-RemoteError:
-Something went wrong running something:
-exited with result: '1'
diff --git a/lib/subunit/shell/share/subunit.sh b/lib/subunit/shell/share/subunit.sh
deleted file mode 100644
index a532388252..0000000000
--- a/lib/subunit/shell/share/subunit.sh
+++ /dev/null
@@ -1,61 +0,0 @@
-#
-# subunit.sh: shell functions to report test status via the subunit protocol.
-# Copyright (C) 2006 Robert Collins <robertc@robertcollins.net>
-#
-# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-# license at the users choice. A copy of both licenses are available in the
-# project source as Apache-2.0 and BSD. You may not use this file except in
-# compliance with one of these two licences.
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# license you chose for the specific language governing permissions and
-# limitations under that license.
-#
-
-subunit_start_test () {
- # emit the current protocol start-marker for test $1
- echo "time: `date -u '+%Y-%m-%d %H:%M:%SZ'`"
- echo "test: $1"
-}
-
-
-subunit_pass_test () {
- # emit the current protocol test passed marker for test $1
- echo "time: `date -u '+%Y-%m-%d %H:%M:%SZ'`"
- echo "success: $1"
-}
-
-
-subunit_fail_test () {
- # emit the current protocol fail-marker for test $1, and emit stdin as
- # the error text.
- # we use stdin because the failure message can be arbitrarily long, and this
- # makes it convenient to write in scripts (using <<END syntax.
- echo "time: `date -u '+%Y-%m-%d %H:%M:%SZ'`"
- echo "failure: $1 ["
- cat -
- echo "]"
-}
-
-
-subunit_error_test () {
- # emit the current protocol error-marker for test $1, and emit stdin as
- # the error text.
- # we use stdin because the failure message can be arbitrarily long, and this
- # makes it convenient to write in scripts (using <<END syntax.
- echo "time: `date -u '+%Y-%m-%d %H:%M:%SZ'`"
- echo "error: $1 ["
- cat -
- echo "]"
-}
-
-
-subunit_skip_test () {
- # emit the current protocol test skipped marker for test $1
- echo "time: `date -u '+%Y-%m-%d %H:%M:%SZ'`"
- echo "skip: $1"
-}
-
-
diff --git a/lib/subunit/shell/tests/test_function_output.sh b/lib/subunit/shell/tests/test_function_output.sh
deleted file mode 100755
index 00b0844dda..0000000000
--- a/lib/subunit/shell/tests/test_function_output.sh
+++ /dev/null
@@ -1,97 +0,0 @@
-#!/bin/bash
-# subunit shell bindings.
-# Copyright (C) 2006 Robert Collins <robertc@robertcollins.net>
-#
-# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-# license at the users choice. A copy of both licenses are available in the
-# project source as Apache-2.0 and BSD. You may not use this file except in
-# compliance with one of these two licences.
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# license you chose for the specific language governing permissions and
-# limitations under that license.
-#
-
-
-# this script tests the output of the methods. As each is tested we start using
-# it.
-# So the first test manually implements the entire protocol, the next uses the
-# start method and so on.
-# it is assumed that we are running from the 'shell' tree root in the source
-# of subunit, and that the library sourcing tests have all passed - if they
-# have not, this test script may well fail strangely.
-
-# import the library.
-. ${SHELL_SHARE}subunit.sh
-
-echo 'test: subunit_start_test output'
-func_output=$(subunit_start_test "foo bar"|grep -v 'time:')
-func_status=$?
-if [ $func_status == 0 -a "x$func_output" = "xtest: foo bar" ]; then
- echo 'success: subunit_start_test output'
-else
- echo 'failure: subunit_start_test output ['
- echo 'got an error code or incorrect output:'
- echo "exit: $func_status"
- echo "output: '$func_output'"
- echo ']' ;
-fi
-
-subunit_start_test "subunit_pass_test output"
-func_output=$(subunit_pass_test "foo bar"|grep -v 'time:')
-func_status=$?
-if [ $func_status == 0 -a "x$func_output" = "xsuccess: foo bar" ]; then
- subunit_pass_test "subunit_pass_test output"
-else
- echo 'failure: subunit_pass_test output ['
- echo 'got an error code or incorrect output:'
- echo "exit: $func_status"
- echo "output: '$func_output'"
- echo ']' ;
-fi
-
-subunit_start_test "subunit_fail_test output"
-func_output=$((subunit_fail_test "foo bar" <<END
-something
- wrong
-here
-END
-)|grep -v 'time:')
-func_status=$?
-if [ $func_status == 0 -a "x$func_output" = "xfailure: foo bar [
-something
- wrong
-here
-]" ]; then
- subunit_pass_test "subunit_fail_test output"
-else
- echo 'failure: subunit_fail_test output ['
- echo 'got an error code or incorrect output:'
- echo "exit: $func_status"
- echo "output: '$func_output'"
- echo ']' ;
-fi
-
-subunit_start_test "subunit_error_test output"
-func_output=$((subunit_error_test "foo bar" <<END
-something
- died
-here
-END
-)| grep -v 'time:')
-func_status=$?
-if [ $func_status == 0 -a "x$func_output" = "xerror: foo bar [
-something
- died
-here
-]" ]; then
- subunit_pass_test "subunit_error_test output"
-else
- subunit_fail_test "subunit_error_test output" <<END
-got an error code or incorrect output:
-exit: $func_status
-output: '$func_output'
-END
-fi
diff --git a/lib/subunit/shell/tests/test_source_library.sh b/lib/subunit/shell/tests/test_source_library.sh
deleted file mode 100755
index 699f1281bc..0000000000
--- a/lib/subunit/shell/tests/test_source_library.sh
+++ /dev/null
@@ -1,108 +0,0 @@
-#!/bin/bash
-# subunit shell bindings.
-# Copyright (C) 2006 Robert Collins <robertc@robertcollins.net>
-#
-# Licensed under either the Apache License, Version 2.0 or the BSD 3-clause
-# license at the users choice. A copy of both licenses are available in the
-# project source as Apache-2.0 and BSD. You may not use this file except in
-# compliance with one of these two licences.
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under these licenses is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# license you chose for the specific language governing permissions and
-# limitations under that license.
-#
-
-
-# this script tests that we can source the subunit shell bindings successfully.
-# It manually implements the control protocol so that it des not depend on the
-# bindings being complete yet.
-
-# we expect to be run from the tree root.
-
-echo 'test: shell bindings can be sourced'
-# if any output occurs, this has failed to source cleanly
-source_output=$(. ${SHELL_SHARE}subunit.sh 2>&1)
-if [ $? == 0 -a "x$source_output" = "x" ]; then
- echo 'success: shell bindings can be sourced'
-else
- echo 'failure: shell bindings can be sourced ['
- echo 'got an error code or output during sourcing.:'
- echo $source_output
- echo ']' ;
-fi
-
-# now source it for real
-. ${SHELL_SHARE}subunit.sh
-
-# we should have a start_test function
-echo 'test: subunit_start_test exists'
-found_type=$(type -t subunit_start_test)
-status=$?
-if [ $status == 0 -a "x$found_type" = "xfunction" ]; then
- echo 'success: subunit_start_test exists'
-else
- echo 'failure: subunit_start_test exists ['
- echo 'subunit_start_test is not a function:'
- echo "type -t status: $status"
- echo "output: $found_type"
- echo ']' ;
-fi
-
-# we should have a pass_test function
-echo 'test: subunit_pass_test exists'
-found_type=$(type -t subunit_pass_test)
-status=$?
-if [ $status == 0 -a "x$found_type" = "xfunction" ]; then
- echo 'success: subunit_pass_test exists'
-else
- echo 'failure: subunit_pass_test exists ['
- echo 'subunit_pass_test is not a function:'
- echo "type -t status: $status"
- echo "output: $found_type"
- echo ']' ;
-fi
-
-# we should have a fail_test function
-echo 'test: subunit_fail_test exists'
-found_type=$(type -t subunit_fail_test)
-status=$?
-if [ $status == 0 -a "x$found_type" = "xfunction" ]; then
- echo 'success: subunit_fail_test exists'
-else
- echo 'failure: subunit_fail_test exists ['
- echo 'subunit_fail_test is not a function:'
- echo "type -t status: $status"
- echo "output: $found_type"
- echo ']' ;
-fi
-
-# we should have a error_test function
-echo 'test: subunit_error_test exists'
-found_type=$(type -t subunit_error_test)
-status=$?
-if [ $status == 0 -a "x$found_type" = "xfunction" ]; then
- echo 'success: subunit_error_test exists'
-else
- echo 'failure: subunit_error_test exists ['
- echo 'subunit_error_test is not a function:'
- echo "type -t status: $status"
- echo "output: $found_type"
- echo ']' ;
-fi
-
-# we should have a skip_test function
-echo 'test: subunit_skip_test exists'
-found_type=$(type -t subunit_skip_test)
-status=$?
-if [ $status == 0 -a "x$found_type" = "xfunction" ]; then
- echo 'success: subunit_skip_test exists'
-else
- echo 'failure: subunit_skip_test exists ['
- echo 'subunit_skip_test is not a function:'
- echo "type -t status: $status"
- echo "output: $found_type"
- echo ']' ;
-fi
-