aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorElie Kheirallah <khei@google.com>2023-04-19 00:32:15 +0000
committerAutomerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>2023-04-19 00:32:15 +0000
commite2b40672c075396c00ccfc32d36da5b966bbe314 (patch)
tree19d7d7ae78b8ecf5e0e528d4c72f27566a677cb2
parent5fb38dd8ad881e6d177fa38be6bb303604d55609 (diff)
parent725310cd0a9cd4d24204053df687ae7d255ead5b (diff)
downloadlibtest-mimic-e2b40672c075396c00ccfc32d36da5b966bbe314.tar.gz
Import libtest-mimic crate. am: e2bd850ae7 am: f6c6671e0c am: 9681cdf00a am: 725310cd0aaml_wif_341711020aml_wif_341610000aml_wif_341510000aml_wif_341410080aml_wif_341310010aml_wif_341110010aml_wif_341011010aml_wif_340913010aml_uwb_341710010aml_uwb_341513070aml_uwb_341511050aml_uwb_341310300aml_uwb_341310030aml_uwb_341111010aml_uwb_341011000aml_tz5_341510070aml_tz5_341510050aml_tz5_341510010aml_tet_341712060aml_tet_341610020aml_tet_341511010aml_tet_341411060aml_tet_341310230aml_tet_341112070aml_tet_341010040aml_tet_340913030aml_swc_341711000aml_swc_341619000aml_swc_341513600aml_swc_341312300aml_swc_341312020aml_swc_341111000aml_swc_341011020aml_swc_340922010aml_sta_341710000aml_sta_341615000aml_sta_341511040aml_sta_341410000aml_sta_341311010aml_sta_341114000aml_sta_341111000aml_sta_341010020aml_sta_340912000aml_sta_340911000aml_sdk_341710000aml_sdk_341510000aml_sdk_341410000aml_sdk_341110080aml_sdk_341110000aml_sdk_341010000aml_sdk_340912010aml_sch_341510000aml_res_341510000aml_res_341410010aml_res_341311030aml_res_341110000aml_res_340912000aml_per_341711000aml_per_341614000aml_per_341510010aml_per_341410020aml_per_341311000aml_per_341110020aml_per_341110010aml_per_341011100aml_per_341011020aml_per_340916010aml_odp_341717000aml_odp_341610000aml_neu_341510000aml_neu_341010080aml_neu_341010000aml_net_341710020aml_net_341610030aml_net_341510050aml_net_341510000aml_net_341411030aml_net_341311010aml_net_341310020aml_net_341111030aml_net_341014000aml_net_340913000aml_mpr_341713020aml_mpr_341614010aml_mpr_341511070aml_mpr_341411070aml_mpr_341313030aml_mpr_341111030aml_mpr_341111020aml_mpr_341015090aml_mpr_341015030aml_mpr_340919000aml_med_341711000aml_med_341619000aml_med_341513600aml_med_341312300aml_med_341312020aml_med_341111000aml_med_341011000aml_med_340922010aml_ips_341611000aml_ips_341510000aml_ips_340914280aml_ips_340914200aml_ips_340914000aml_ext_341716000aml_ext_341620040aml_ext_341518010aml_ext_341414010aml_ext_341317010aml_ext_341131030aml_ext_341027030aml_doc_341713000aml_doc_341610010aml_doc_341510050aml_doc_341312010aml_doc_341112000aml_doc_341012000aml_doc_340916000aml_con_341614000aml_con_341511080aml_con_341410300aml_con_341310090aml_con_341110000aml_cfg_341510000aml_cbr_341710000aml_cbr_341610000aml_cbr_341510010aml_cbr_341410010aml_cbr_341311010aml_cbr_341110000aml_cbr_341011000aml_cbr_340914000aml_ase_341510000aml_ase_341410000aml_ase_341310010aml_ase_341113000aml_ase_340913000aml_art_341711000aml_art_341615020aml_art_341514450aml_art_341514410aml_art_341411300aml_art_341311100aml_art_341110110aml_art_341110060aml_art_341010050aml_art_340915060aml_ads_341720000aml_ads_341615050aml_ads_341517040aml_ads_341413000aml_ads_341316030aml_ads_341131050aml_ads_341027030aml_ads_340915050aml_adb_341520010aml_adb_341517070aml_adb_340912530aml_adb_340912350aml_adb_340912200aml_adb_340912000android14-mainline-wifi-releaseandroid14-mainline-uwb-releaseandroid14-mainline-tethering-releaseandroid14-mainline-sdkext-releaseandroid14-mainline-resolv-releaseandroid14-mainline-permission-releaseandroid14-mainline-os-statsd-releaseandroid14-mainline-networking-releaseandroid14-mainline-mediaprovider-releaseandroid14-mainline-media-swcodec-releaseandroid14-mainline-media-releaseandroid14-mainline-extservices-releaseandroid14-mainline-conscrypt-releaseandroid14-mainline-cellbroadcast-releaseandroid14-mainline-art-releaseandroid14-mainline-appsearch-releaseandroid14-mainline-adservices-releaseandroid14-mainline-adbd-releaseaml_tz5_341510010
Original change: https://android-review.googlesource.com/c/platform/external/rust/crates/libtest-mimic/+/2498258 Change-Id: Ibdad3fdf9af3df4c03b02c0a07fb75918531cf04 Signed-off-by: Automerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>
-rw-r--r--CHANGELOG.md103
-rw-r--r--Cargo.lock314
-rw-r--r--Cargo.toml52
-rw-r--r--Cargo.toml.orig28
l---------LICENSE1
-rw-r--r--LICENSE-APACHE201
-rw-r--r--METADATA20
-rw-r--r--MODULE_LICENSE_APACHE20
-rw-r--r--OWNERS5
-rw-r--r--README.md37
-rw-r--r--examples/README.md5
-rw-r--r--examples/simple.rs39
-rw-r--r--examples/tidy.rs83
-rw-r--r--src/args.rs197
-rw-r--r--src/lib.rs514
-rw-r--r--src/printer.rs292
-rw-r--r--tests/all_passing.rs162
-rw-r--r--tests/common/mod.rs131
-rw-r--r--tests/main_thread.rs16
-rw-r--r--tests/mixed_bag.rs529
-rw-r--r--tests/panic.rs39
-rw-r--r--tests/real/mixed_bag.rs45
22 files changed, 2813 insertions, 0 deletions
diff --git a/CHANGELOG.md b/CHANGELOG.md
new file mode 100644
index 0000000..0b600c7
--- /dev/null
+++ b/CHANGELOG.md
@@ -0,0 +1,103 @@
+# Changelog
+All notable changes to this project will be documented in this file.
+
+The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/)
+and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html).
+
+## [Unreleased]
+
+## [0.6.0] - 2022-11-05
+### Changed
+- **Breaking**: Updated `clap` to version 4 (thanks @msrd0)
+- **Breaking**: Bump MSRV to 1.60 (due to the clap update)
+
+### Removed
+- **Breaking**: Remove `FromStr` impls for `args::{ColorSetting, FormatSetting}` (use `clap::ValueEnum` instead).
+
+## [0.5.2] - 2022-08-14
+### Added
+- Re-add `--nocapture` as a noop argument [#18](https://github.com/LukasKalbertodt/libtest-mimic/pull/18) (thanks @sunshowers)
+
+### Fixed
+- Link in documentation
+
+## [0.5.1] - 2022-08-13
+### Added
+- `Trial::{name, kind, has_ignored_flag, is_test, is_bench}` getters
+
+## [0.5.0] - 2022-08-13
+
+Most parts of this library have been rewritten and the API has changed a lot.
+You might be better of just reading the new docs instead of this change log.
+I do think the new API is better in many regards.
+Apart from an improved API, changes that motivated the rewrite are marked with ⭐.
+
+### Changed
+- **Breaking**: bump MSRV to 1.58
+- **Breaking**: Rename `Test` to `Trial`
+- **Breaking**: Rename `run_tests` to `run`
+- ⭐ **Breaking**: Make every `Trial` have a runner function instead of `data` + a
+ global runner function. Thus, the third parameter of `run` is no more. I think
+ this model is more intuitive.
+- **Breaking**: Add `Trial::{test, bench}` constructor functions, use builder
+ pattern, and make fields private.
+- **Breaking**: rename `Args::num_threads` to `test_threads`
+- **Breaking**: make fields of `Conclusion` public and remove getter methods
+- **Breaking**: remove `RunnerEvent`. This should not have been public.
+- ⭐ Tests are now run in main thread when `--test-threads=1` is specified
+- ⭐ Reduce number of indirect dependencies considerably
+- Fix `rust-version` field in `Cargo.toml` (thanks @hellow554)
+- Fix `--ignored` behavior
+- Fix some CLI error messages
+
+### Added
+- ⭐Panics in test runners are caught and treated as failure
+- ⭐ Lots of integration tests (should make any future development of this library way easier)
+- Add `must_use` message for `Conclusion`
+- Print total execution time at the end of the run
+- Allow benchmarks to run in test mode
+- `--include-ignored`
+
+### Removed
+- **Breaking**: remove unsupported CLI options. They were ignored anyway, but
+ the CLI would accept them.
+
+
+## [0.4.1] - 2022-06-07
+
+- Add `rust = "1.56"` to `Cargo.toml`, stating the existing MSRV.
+- Update `crossbeam-channel` to deduplicate some indirect dependencies.
+
+## [0.4.0] - 2022-05-13
+- **Breaking**: Update to Rust 2021, bumping MSRV to 1.56
+- Fix `--list --ignored` behavior
+
+
+## [0.3.0] - 2020-06-28
+### Added
+- Add support for running tests in parallel #4
+- Add `Arguments::from_iter` #5
+
+## [0.2.0] - 2019-10-02
+### Changed
+- Upgrade dependencies #3
+- Flush stdout after printing test name 4a36b3318b69df233b0db7d1af3caf276e6bb070
+
+### Fixed
+- Fix overflow bug when calculating number of passed tests 264fe6f8a986ab0c02f4a85e64e42ee17596923c
+
+## 0.1.0 - 2018-07-23
+### Added
+- Everything.
+
+
+[Unreleased]: https://github.com/LukasKalbertodt/libtest-mimic/compare/v0.6.0...HEAD
+[0.6.0]: https://github.com/LukasKalbertodt/libtest-mimic/compare/v0.5.2...v0.6.0
+[0.5.2]: https://github.com/LukasKalbertodt/libtest-mimic/compare/v0.5.1...v0.5.2
+[0.5.1]: https://github.com/LukasKalbertodt/libtest-mimic/compare/v0.5.0...v0.5.1
+[0.5.0]: https://github.com/LukasKalbertodt/libtest-mimic/compare/v0.4.1...v0.5.0
+[0.4.1]: https://github.com/LukasKalbertodt/libtest-mimic/compare/v0.4.0...v0.4.1
+[0.4.0]: https://github.com/LukasKalbertodt/libtest-mimic/compare/v0.3.0...v0.4.0
+[0.3.0]: https://github.com/LukasKalbertodt/libtest-mimic/compare/v0.2.0...v0.3.0
+[0.2.0]: https://github.com/LukasKalbertodt/libtest-mimic/compare/v0.1.0...v0.2.0
+[0.1.1]: https://github.com/LukasKalbertodt/libtest-mimic/compare/v0.1.0...v0.1.1
diff --git a/Cargo.lock b/Cargo.lock
new file mode 100644
index 0000000..fb49431
--- /dev/null
+++ b/Cargo.lock
@@ -0,0 +1,314 @@
+# This file is automatically @generated by Cargo.
+# It is not intended for manual editing.
+version = 3
+
+[[package]]
+name = "ansi_term"
+version = "0.12.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2"
+dependencies = [
+ "winapi",
+]
+
+[[package]]
+name = "atty"
+version = "0.2.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8"
+dependencies = [
+ "hermit-abi",
+ "libc",
+ "winapi",
+]
+
+[[package]]
+name = "bitflags"
+version = "1.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693"
+
+[[package]]
+name = "cfg-if"
+version = "1.0.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
+
+[[package]]
+name = "clap"
+version = "4.0.19"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8e67816e006b17427c9b4386915109b494fec2d929c63e3bd3561234cbf1bf1e"
+dependencies = [
+ "atty",
+ "bitflags",
+ "clap_derive",
+ "clap_lex",
+ "once_cell",
+ "strsim",
+ "termcolor",
+]
+
+[[package]]
+name = "clap_derive"
+version = "4.0.18"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "16a1b0f6422af32d5da0c58e2703320f379216ee70198241c84173a8c5ac28f3"
+dependencies = [
+ "heck",
+ "proc-macro-error",
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "clap_lex"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0d4198f73e42b4936b35b5bb248d81d2b595ecb170da0bac7655c54eedfa8da8"
+dependencies = [
+ "os_str_bytes",
+]
+
+[[package]]
+name = "ctor"
+version = "0.1.23"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "cdffe87e1d521a10f9696f833fe502293ea446d7f256c06128293a4119bdf4cb"
+dependencies = [
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "diff"
+version = "0.1.13"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "56254986775e3233ffa9c4d7d3faaf6d36a2c09d30b20687e9f88bc8bafc16c8"
+
+[[package]]
+name = "fastrand"
+version = "1.8.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a7a407cfaa3385c4ae6b23e84623d48c2798d06e3e6a1878f7f59f17b3f86499"
+dependencies = [
+ "instant",
+]
+
+[[package]]
+name = "heck"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "2540771e65fc8cb83cd6e8a237f70c319bd5c29f78ed1084ba5d50eeac86f7f9"
+
+[[package]]
+name = "hermit-abi"
+version = "0.1.14"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b9586eedd4ce6b3c498bc3b4dd92fc9f11166aa908a914071953768066c67909"
+dependencies = [
+ "libc",
+]
+
+[[package]]
+name = "instant"
+version = "0.1.12"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c"
+dependencies = [
+ "cfg-if",
+]
+
+[[package]]
+name = "libc"
+version = "0.2.71"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9457b06509d27052635f90d6466700c65095fdf75409b3fbdd903e988b886f49"
+
+[[package]]
+name = "libtest-mimic"
+version = "0.6.0"
+dependencies = [
+ "clap",
+ "fastrand",
+ "pretty_assertions",
+ "termcolor",
+ "threadpool",
+]
+
+[[package]]
+name = "num_cpus"
+version = "1.13.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "19e64526ebdee182341572e50e9ad03965aa510cd94427a4549448f285e957a1"
+dependencies = [
+ "hermit-abi",
+ "libc",
+]
+
+[[package]]
+name = "once_cell"
+version = "1.13.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "18a6dbe30758c9f83eb00cbea4ac95966305f5a7772f3f42ebfc7fc7eddbd8e1"
+
+[[package]]
+name = "os_str_bytes"
+version = "6.2.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "648001efe5d5c0102d8cea768e348da85d90af8ba91f0bea908f157951493cd4"
+
+[[package]]
+name = "output_vt100"
+version = "0.1.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "628223faebab4e3e40667ee0b2336d34a5b960ff60ea743ddfdbcf7770bcfb66"
+dependencies = [
+ "winapi",
+]
+
+[[package]]
+name = "pretty_assertions"
+version = "1.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c89f989ac94207d048d92db058e4f6ec7342b0971fc58d1271ca148b799b3563"
+dependencies = [
+ "ansi_term",
+ "ctor",
+ "diff",
+ "output_vt100",
+]
+
+[[package]]
+name = "proc-macro-error"
+version = "1.0.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "fc175e9777c3116627248584e8f8b3e2987405cabe1c0adf7d1dd28f09dc7880"
+dependencies = [
+ "proc-macro-error-attr",
+ "proc-macro2",
+ "quote",
+ "syn",
+ "version_check",
+]
+
+[[package]]
+name = "proc-macro-error-attr"
+version = "1.0.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3cc9795ca17eb581285ec44936da7fc2335a3f34f2ddd13118b6f4d515435c50"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+ "syn-mid",
+ "version_check",
+]
+
+[[package]]
+name = "proc-macro2"
+version = "1.0.42"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c278e965f1d8cf32d6e0e96de3d3e79712178ae67986d9cf9151f51e95aac89b"
+dependencies = [
+ "unicode-ident",
+]
+
+[[package]]
+name = "quote"
+version = "1.0.20"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "3bcdf212e9776fbcb2d23ab029360416bb1706b1aea2d1a5ba002727cbcab804"
+dependencies = [
+ "proc-macro2",
+]
+
+[[package]]
+name = "strsim"
+version = "0.10.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623"
+
+[[package]]
+name = "syn"
+version = "1.0.98"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c50aef8a904de4c23c788f104b7dddc7d6f79c647c7c8ce4cc8f73eb0ca773dd"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "unicode-ident",
+]
+
+[[package]]
+name = "syn-mid"
+version = "0.5.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7be3539f6c128a931cf19dcee741c1af532c7fd387baa739c03dd2e96479338a"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "termcolor"
+version = "1.1.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bab24d30b911b2376f3a13cc2cd443142f0c81dda04c118693e35b3835757755"
+dependencies = [
+ "winapi-util",
+]
+
+[[package]]
+name = "threadpool"
+version = "1.8.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d050e60b33d41c19108b32cea32164033a9013fe3b46cbd4457559bfbf77afaa"
+dependencies = [
+ "num_cpus",
+]
+
+[[package]]
+name = "unicode-ident"
+version = "1.0.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "15c61ba63f9235225a22310255a29b806b907c9b8c964bcbd0a2c70f3f2deea7"
+
+[[package]]
+name = "version_check"
+version = "0.9.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b5a972e5669d67ba988ce3dc826706fb0a8b01471c088cb0b6110b805cc36aed"
+
+[[package]]
+name = "winapi"
+version = "0.3.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
+dependencies = [
+ "winapi-i686-pc-windows-gnu",
+ "winapi-x86_64-pc-windows-gnu",
+]
+
+[[package]]
+name = "winapi-i686-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
+
+[[package]]
+name = "winapi-util"
+version = "0.1.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "70ec6ce85bb158151cae5e5c87f95a8e97d2c0c4b001223f33a334e3ce5de178"
+dependencies = [
+ "winapi",
+]
+
+[[package]]
+name = "winapi-x86_64-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
diff --git a/Cargo.toml b/Cargo.toml
new file mode 100644
index 0000000..65bfa57
--- /dev/null
+++ b/Cargo.toml
@@ -0,0 +1,52 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies.
+#
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
+
+[package]
+edition = "2021"
+rust-version = "1.60"
+name = "libtest-mimic"
+version = "0.6.0"
+authors = ["Lukas Kalbertodt <lukas.kalbertodt@gmail.com>"]
+exclude = [".github"]
+description = """
+Write your own test harness that looks and behaves like the built-in test harness used by `rustc --test`
+"""
+documentation = "https://docs.rs/libtest-mimic"
+readme = "README.md"
+keywords = [
+ "libtest",
+ "test",
+ "built-in",
+ "framework",
+ "harness",
+]
+categories = [
+ "development-tools::testing",
+ "development-tools::build-utils",
+]
+license = "MIT/Apache-2.0"
+repository = "https://github.com/LukasKalbertodt/libtest-mimic"
+
+[dependencies.clap]
+version = "4.0.8"
+features = ["derive"]
+
+[dependencies.termcolor]
+version = "1.0.5"
+
+[dependencies.threadpool]
+version = "1.8.1"
+
+[dev-dependencies.fastrand]
+version = "1.8.0"
+
+[dev-dependencies.pretty_assertions]
+version = "1.2.1"
diff --git a/Cargo.toml.orig b/Cargo.toml.orig
new file mode 100644
index 0000000..72ef332
--- /dev/null
+++ b/Cargo.toml.orig
@@ -0,0 +1,28 @@
+[package]
+name = "libtest-mimic"
+version = "0.6.0"
+authors = ["Lukas Kalbertodt <lukas.kalbertodt@gmail.com>"]
+edition = "2021"
+rust-version = "1.60"
+
+description = """
+Write your own test harness that looks and behaves like the built-in test \
+harness used by `rustc --test`
+"""
+documentation = "https://docs.rs/libtest-mimic"
+repository = "https://github.com/LukasKalbertodt/libtest-mimic"
+license = "MIT/Apache-2.0"
+keywords = ["libtest", "test", "built-in", "framework", "harness"]
+categories = ["development-tools::testing", "development-tools::build-utils"]
+readme = "README.md"
+
+exclude = [".github"]
+
+[dependencies]
+clap = { version = "4.0.8", features = ["derive"] }
+threadpool = "1.8.1"
+termcolor = "1.0.5"
+
+[dev-dependencies]
+fastrand = "1.8.0"
+pretty_assertions = "1.2.1"
diff --git a/LICENSE b/LICENSE
new file mode 120000
index 0000000..6b579aa
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1 @@
+LICENSE-APACHE \ No newline at end of file
diff --git a/LICENSE-APACHE b/LICENSE-APACHE
new file mode 100644
index 0000000..11069ed
--- /dev/null
+++ b/LICENSE-APACHE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+Copyright [yyyy] [name of copyright owner]
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/METADATA b/METADATA
new file mode 100644
index 0000000..d9629d5
--- /dev/null
+++ b/METADATA
@@ -0,0 +1,20 @@
+name: "libtest-mimic"
+description: "()"
+third_party {
+ url {
+ type: HOMEPAGE
+ value: "https://crates.io/crates/libtest-mimic"
+ }
+ url {
+ type: ARCHIVE
+ value: "https://static.crates.io/crates/libtest-mimic/libtest-mimic-0.6.0.crate"
+ }
+ version: "0.6.0"
+ # Dual-licensed, using the least restrictive per go/thirdpartylicenses#same.
+ license_type: NOTICE
+ last_upgrade_date {
+ year: 2023
+ month: 3
+ day: 9
+ }
+}
diff --git a/MODULE_LICENSE_APACHE2 b/MODULE_LICENSE_APACHE2
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/MODULE_LICENSE_APACHE2
diff --git a/OWNERS b/OWNERS
new file mode 100644
index 0000000..3abd431
--- /dev/null
+++ b/OWNERS
@@ -0,0 +1,5 @@
+include platform/prebuilts/rust:master:/OWNERS
+devinmoore@google.com
+fmayle@google.com
+khei@google.com
+smoreland@google.com
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..107ba13
--- /dev/null
+++ b/README.md
@@ -0,0 +1,37 @@
+# libtest-mimic
+
+[<img alt="CI status of master" src="https://img.shields.io/github/workflow/status/LukasKalbertodt/libtest-mimic/CI/master?label=CI&logo=github&logoColor=white&style=for-the-badge" height="23">](https://github.com/LukasKalbertodt/libtest-mimic/actions?query=workflow%3ACI+branch%3Amaster)
+[<img alt="Crates.io Version" src="https://img.shields.io/crates/v/libtest-mimic?logo=rust&style=for-the-badge" height="23">](https://crates.io/crates/libtest-mimic)
+[<img alt="docs.rs" src="https://img.shields.io/crates/v/libtest-mimic?color=blue&label=docs&style=for-the-badge" height="23">](https://docs.rs/libtest-mimic)
+
+Write your own test harness that looks and behaves like the built-in test harness (used by `rustc --test`)!
+
+This is a simple and small testing framework that mimics the original `libtest`.
+That means: all output looks pretty much like `cargo test` and most CLI arguments are understood and used.
+With that plumbing work out of the way, your test runner can focus on the actual testing.
+(MSRV: 1.60)
+
+See [**the documentation**](https://docs.rs/libtest-mimic) or [the `examples/` folder](/examples) for more information.
+
+
+<p align="center">
+ <img src=".github/readme.png" width="95%"></img>
+</p>
+
+
+---
+
+## License
+
+Licensed under either of
+
+ * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
+ * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
+
+at your option.
+
+### Contribution
+
+Unless you explicitly state otherwise, any contribution intentionally submitted
+for inclusion in the work by you, as defined in the Apache-2.0 license, shall
+be dual licensed as above, without any additional terms or conditions.
diff --git a/examples/README.md b/examples/README.md
new file mode 100644
index 0000000..0cb3733
--- /dev/null
+++ b/examples/README.md
@@ -0,0 +1,5 @@
+Examples
+========
+
+- `simple`: five dummy tests are created and executed
+- **`tidy`**: most useful example. Generates a test for each `.rs` file and runs a simply tidy script as test.
diff --git a/examples/simple.rs b/examples/simple.rs
new file mode 100644
index 0000000..0249596
--- /dev/null
+++ b/examples/simple.rs
@@ -0,0 +1,39 @@
+extern crate libtest_mimic;
+
+use std::{thread, time};
+use libtest_mimic::{Arguments, Trial, Failed};
+
+
+fn main() {
+ let args = Arguments::from_args();
+
+ let tests = vec![
+ Trial::test("check_toph", check_toph),
+ Trial::test("check_sokka", check_sokka),
+ Trial::test("long_computation", long_computation).with_ignored_flag(true),
+ Trial::test("foo", compile_fail_dummy).with_kind("compile-fail"),
+ Trial::test("check_katara", check_katara),
+ ];
+
+ libtest_mimic::run(&args, tests).exit();
+}
+
+
+// Tests
+
+fn check_toph() -> Result<(), Failed> {
+ Ok(())
+}
+fn check_katara() -> Result<(), Failed> {
+ Ok(())
+}
+fn check_sokka() -> Result<(), Failed> {
+ Err("Sokka tripped and fell :(".into())
+}
+fn long_computation() -> Result<(), Failed> {
+ thread::sleep(time::Duration::from_secs(1));
+ Ok(())
+}
+fn compile_fail_dummy() -> Result<(), Failed> {
+ Ok(())
+}
diff --git a/examples/tidy.rs b/examples/tidy.rs
new file mode 100644
index 0000000..18b26bf
--- /dev/null
+++ b/examples/tidy.rs
@@ -0,0 +1,83 @@
+extern crate libtest_mimic;
+
+use libtest_mimic::{Arguments, Trial, Failed};
+
+use std::{
+ env,
+ error::Error,
+ ffi::OsStr,
+ fs,
+ path::Path,
+};
+
+
+fn main() -> Result<(), Box<dyn Error>> {
+ let args = Arguments::from_args();
+ let tests = collect_tests()?;
+ libtest_mimic::run(&args, tests).exit();
+}
+
+/// Creates one test for each `.rs` file in the current directory or
+/// sub-directories of the current directory.
+fn collect_tests() -> Result<Vec<Trial>, Box<dyn Error>> {
+ fn visit_dir(path: &Path, tests: &mut Vec<Trial>) -> Result<(), Box<dyn Error>> {
+ for entry in fs::read_dir(path)? {
+ let entry = entry?;
+ let file_type = entry.file_type()?;
+
+ // Handle files
+ let path = entry.path();
+ if file_type.is_file() {
+ if path.extension() == Some(OsStr::new("rs")) {
+ let name = path
+ .strip_prefix(env::current_dir()?)?
+ .display()
+ .to_string();
+
+ let test = Trial::test(name, move || check_file(&path))
+ .with_kind("tidy");
+ tests.push(test);
+ }
+ } else if file_type.is_dir() {
+ // Handle directories
+ visit_dir(&path, tests)?;
+ }
+ }
+
+ Ok(())
+ }
+
+ // We recursively look for `.rs` files, starting from the current
+ // directory.
+ let mut tests = Vec::new();
+ let current_dir = env::current_dir()?;
+ visit_dir(&current_dir, &mut tests)?;
+
+ Ok(tests)
+}
+
+/// Performs a couple of tidy tests.
+fn check_file(path: &Path) -> Result<(), Failed> {
+ let content = fs::read(path).map_err(|e| format!("Cannot read file: {e}"))?;
+
+ // Check that the file is valid UTF-8
+ let content = String::from_utf8(content)
+ .map_err(|_| "The file's contents are not a valid UTF-8 string!")?;
+
+ // Check for `\r`: we only want `\n` line breaks!
+ if content.contains('\r') {
+ return Err("Contains '\\r' chars. Please use ' \\n' line breaks only!".into());
+ }
+
+ // Check for tab characters `\t`
+ if content.contains('\t') {
+ return Err("Contains tab characters ('\\t'). Indent with four spaces!".into());
+ }
+
+ // Check for too long lines
+ if content.lines().any(|line| line.chars().count() > 100) {
+ return Err("Contains lines longer than 100 codepoints!".into());
+ }
+
+ Ok(())
+}
diff --git a/src/args.rs b/src/args.rs
new file mode 100644
index 0000000..b8b1d80
--- /dev/null
+++ b/src/args.rs
@@ -0,0 +1,197 @@
+use clap::{Parser, ValueEnum};
+
+/// Command line arguments.
+///
+/// This type represents everything the user can specify via CLI args. The main
+/// method is [`from_args`][Arguments::from_args] which reads the global
+/// `std::env::args()` and parses them into this type.
+///
+/// `libtest-mimic` supports a subset of all args/flags supported by the
+/// official test harness. There are also some other minor CLI differences, but
+/// the main use cases should work exactly like with the built-in harness.
+#[derive(Parser, Debug, Clone, Default)]
+#[command(
+ help_template = "USAGE: [OPTIONS] [FILTER]\n\n{all-args}\n\n\n{after-help}",
+ disable_version_flag = true,
+ after_help = "By default, all tests are run in parallel. This can be altered with the \n\
+ --test-threads flag when running tests (set it to 1).",
+)]
+pub struct Arguments {
+ // ============== FLAGS ===================================================
+ /// Run ignored and non-ignored tests.
+ #[arg(long = "include-ignored", help = "Run ignored tests")]
+ pub include_ignored: bool,
+
+ /// Run only ignored tests.
+ #[arg(long = "ignored", help = "Run ignored tests")]
+ pub ignored: bool,
+
+ /// Run tests, but not benchmarks.
+ #[arg(
+ long = "test",
+ conflicts_with = "bench",
+ help = "Run tests and not benchmarks",
+ )]
+ pub test: bool,
+
+ /// Run benchmarks, but not tests.
+ #[arg(long = "bench", help = "Run benchmarks instead of tests")]
+ pub bench: bool,
+
+ /// Only list all tests and benchmarks.
+ #[arg(long = "list", help = "List all tests and benchmarks")]
+ pub list: bool,
+
+ /// No-op, ignored (libtest-mimic always runs in no-capture mode)
+ #[arg(long = "nocapture", help = "No-op (libtest-mimic always runs in no-capture mode)")]
+ pub nocapture: bool,
+
+ /// If set, filters are matched exactly rather than by substring.
+ #[arg(
+ long = "exact",
+ help = "Exactly match filters rather than by substring",
+ )]
+ pub exact: bool,
+
+ /// If set, display only one character per test instead of one line.
+ /// Especially useful for huge test suites.
+ ///
+ /// This is an alias for `--format=terse`. If this is set, `format` is
+ /// `None`.
+ #[arg(
+ short = 'q',
+ long = "quiet",
+ conflicts_with = "format",
+ help = "Display one character per test instead of one line. Alias to --format=terse",
+ )]
+ pub quiet: bool,
+
+ // ============== OPTIONS =================================================
+ /// Number of threads used for parallel testing.
+ #[arg(
+ long = "test-threads",
+ help = "Number of threads used for running tests in parallel. If set to 1, \n\
+ all tests are run in the main thread.",
+ )]
+ pub test_threads: Option<usize>,
+
+ /// Path of the logfile. If specified, everything will be written into the
+ /// file instead of stdout.
+ #[arg(
+ long = "logfile",
+ value_name = "PATH",
+ help = "Write logs to the specified file instead of stdout",
+ )]
+ pub logfile: Option<String>,
+
+ /// A list of filters. Tests whose names contain parts of any of these
+ /// filters are skipped.
+ #[arg(
+ long = "skip",
+ value_name = "FILTER",
+ num_args = 1,
+ help = "Skip tests whose names contain FILTER (this flag can be used multiple times)",
+ )]
+ pub skip: Vec<String>,
+
+ /// Specifies whether or not to color the output.
+ #[arg(
+ long = "color",
+ value_enum,
+ value_name = "auto|always|never",
+ help = "Configure coloring of output: \n\
+ - auto = colorize if stdout is a tty and tests are run on serially (default)\n\
+ - always = always colorize output\n\
+ - never = never colorize output\n",
+ )]
+ pub color: Option<ColorSetting>,
+
+ /// Specifies the format of the output.
+ #[arg(
+ long = "format",
+ value_enum,
+ value_name = "pretty|terse|json",
+ help = "Configure formatting of output: \n\
+ - pretty = Print verbose output\n\
+ - terse = Display one character per test\n",
+ )]
+ pub format: Option<FormatSetting>,
+
+ // ============== POSITIONAL VALUES =======================================
+ /// Filter string. Only tests which contain this string are run.
+ #[arg(
+ value_name = "FILTER",
+ help = "The FILTER string is tested against the name of all tests, and only those tests \
+ whose names contain the filter are run.",
+ )]
+ pub filter: Option<String>,
+}
+
+impl Arguments {
+ /// Parses the global CLI arguments given to the application.
+ ///
+ /// If the parsing fails (due to incorrect CLI args), an error is shown and
+ /// the application exits. If help is requested (`-h` or `--help`), a help
+ /// message is shown and the application exits, too.
+ pub fn from_args() -> Self {
+ Parser::parse()
+ }
+
+ /// Like `from_args()`, but operates on an explicit iterator and not the
+ /// global arguments. Note that the first element is the executable name!
+ pub fn from_iter<I>(iter: I) -> Self
+ where
+ Self: Sized,
+ I: IntoIterator,
+ I::Item: Into<std::ffi::OsString> + Clone,
+ {
+ Parser::parse_from(iter)
+ }
+}
+
+/// Possible values for the `--color` option.
+#[derive(Debug, Clone, Copy, PartialEq, Eq, ValueEnum)]
+pub enum ColorSetting {
+ /// Colorize output if stdout is a tty and tests are run on serially
+ /// (default).
+ Auto,
+
+ /// Always colorize output.
+ Always,
+
+ /// Never colorize output.
+ Never,
+}
+
+impl Default for ColorSetting {
+ fn default() -> Self {
+ ColorSetting::Auto
+ }
+}
+
+/// Possible values for the `--format` option.
+#[derive(Debug, Clone, Copy, PartialEq, Eq, ValueEnum)]
+pub enum FormatSetting {
+ /// One line per test. Output for humans. (default)
+ Pretty,
+
+ /// One character per test. Usefull for test suites with many tests.
+ Terse,
+}
+
+impl Default for FormatSetting {
+ fn default() -> Self {
+ FormatSetting::Pretty
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn verify_cli() {
+ use clap::CommandFactory;
+ Arguments::command().debug_assert();
+ }
+}
diff --git a/src/lib.rs b/src/lib.rs
new file mode 100644
index 0000000..634ce89
--- /dev/null
+++ b/src/lib.rs
@@ -0,0 +1,514 @@
+//! Write your own tests and benchmarks that look and behave like built-in tests!
+//!
+//! This is a simple and small test harness that mimics the original `libtest`
+//! (used by `cargo test`/`rustc --test`). That means: all output looks pretty
+//! much like `cargo test` and most CLI arguments are understood and used. With
+//! that plumbing work out of the way, your test runner can focus on the actual
+//! testing.
+//!
+//! For a small real world example, see [`examples/tidy.rs`][1].
+//!
+//! [1]: https://github.com/LukasKalbertodt/libtest-mimic/blob/master/examples/tidy.rs
+//!
+//! # Usage
+//!
+//! To use this, you most likely want to add a manual `[[test]]` section to
+//! `Cargo.toml` and set `harness = false`. For example:
+//!
+//! ```toml
+//! [[test]]
+//! name = "mytest"
+//! path = "tests/mytest.rs"
+//! harness = false
+//! ```
+//!
+//! And in `tests/mytest.rs` you would call [`run`] in the `main` function:
+//!
+//! ```no_run
+//! use libtest_mimic::{Arguments, Trial};
+//!
+//!
+//! // Parse command line arguments
+//! let args = Arguments::from_args();
+//!
+//! // Create a list of tests and/or benchmarks (in this case: two dummy tests).
+//! let tests = vec![
+//! Trial::test("succeeding_test", move || Ok(())),
+//! Trial::test("failing_test", move || Err("Woops".into())),
+//! ];
+//!
+//! // Run all tests and exit the application appropriatly.
+//! libtest_mimic::run(&args, tests).exit();
+//! ```
+//!
+//! Instead of returning `Ok` or `Err` directly, you want to actually perform
+//! your tests, of course. See [`Trial::test`] for more information on how to
+//! define a test. You can of course list all your tests manually. But in many
+//! cases it is useful to generate one test per file in a directory, for
+//! example.
+//!
+//! You can then run `cargo test --test mytest` to run it. To see the CLI
+//! arguments supported by this crate, run `cargo test --test mytest -- -h`.
+//!
+//!
+//! # Known limitations and differences to the official test harness
+//!
+//! `libtest-mimic` works on a best-effort basis: it tries to be as close to
+//! `libtest` as possible, but there are differences for a variety of reasons.
+//! For example, some rarely used features might not be implemented, some
+//! features are extremely difficult to implement, and removing minor,
+//! unimportant differences is just not worth the hassle.
+//!
+//! Some of the notable differences:
+//!
+//! - Output capture and `--nocapture`: simply not supported. The official
+//! `libtest` uses internal `std` functions to temporarily redirect output.
+//! `libtest-mimic` cannot use those. See [this issue][capture] for more
+//! information.
+//! - `--format=json|junit`
+//!
+//! [capture]: https://github.com/LukasKalbertodt/libtest-mimic/issues/9
+
+use std::{process, sync::mpsc, fmt, time::Instant};
+
+mod args;
+mod printer;
+
+use printer::Printer;
+use threadpool::ThreadPool;
+
+pub use crate::args::{Arguments, ColorSetting, FormatSetting};
+
+
+
+/// A single test or benchmark.
+///
+/// `libtest` often treats benchmarks as "tests", which is a bit confusing. So
+/// in this library, it is called "trial".
+///
+/// A trial is create via [`Trial::test`] or [`Trial::bench`]. The trial's
+/// `name` is printed and used for filtering. The `runner` is called when the
+/// test/benchmark is executed to determine its outcome. If `runner` panics,
+/// the trial is considered "failed". If you need the behavior of
+/// `#[should_panic]` you need to catch the panic yourself. You likely want to
+/// compare the panic payload to an expected value anyway.
+pub struct Trial {
+ runner: Box<dyn FnOnce(bool) -> Outcome + Send>,
+ info: TestInfo,
+}
+
+impl Trial {
+ /// Creates a (non-benchmark) test with the given name and runner.
+ ///
+ /// The runner returning `Ok(())` is interpreted as the test passing. If the
+ /// runner returns `Err(_)`, the test is considered failed.
+ pub fn test<R>(name: impl Into<String>, runner: R) -> Self
+ where
+ R: FnOnce() -> Result<(), Failed> + Send + 'static,
+ {
+ Self {
+ runner: Box::new(move |_test_mode| match runner() {
+ Ok(()) => Outcome::Passed,
+ Err(failed) => Outcome::Failed(failed),
+ }),
+ info: TestInfo {
+ name: name.into(),
+ kind: String::new(),
+ is_ignored: false,
+ is_bench: false,
+ },
+ }
+ }
+
+ /// Creates a benchmark with the given name and runner.
+ ///
+ /// If the runner's parameter `test_mode` is `true`, the runner function
+ /// should run all code just once, without measuring, just to make sure it
+ /// does not panic. If the parameter is `false`, it should perform the
+ /// actual benchmark. If `test_mode` is `true` you may return `Ok(None)`,
+ /// but if it's `false`, you have to return a `Measurement`, or else the
+ /// benchmark is considered a failure.
+ ///
+ /// `test_mode` is `true` if neither `--bench` nor `--test` are set, and
+ /// `false` when `--bench` is set. If `--test` is set, benchmarks are not
+ /// ran at all, and both flags cannot be set at the same time.
+ pub fn bench<R>(name: impl Into<String>, runner: R) -> Self
+ where
+ R: FnOnce(bool) -> Result<Option<Measurement>, Failed> + Send + 'static,
+ {
+ Self {
+ runner: Box::new(move |test_mode| match runner(test_mode) {
+ Err(failed) => Outcome::Failed(failed),
+ Ok(_) if test_mode => Outcome::Passed,
+ Ok(Some(measurement)) => Outcome::Measured(measurement),
+ Ok(None)
+ => Outcome::Failed("bench runner returned `Ok(None)` in bench mode".into()),
+ }),
+ info: TestInfo {
+ name: name.into(),
+ kind: String::new(),
+ is_ignored: false,
+ is_bench: true,
+ },
+ }
+ }
+
+ /// Sets the "kind" of this test/benchmark. If this string is not
+ /// empty, it is printed in brackets before the test name (e.g.
+ /// `test [my-kind] test_name`). (Default: *empty*)
+ ///
+ /// This is the only extension to the original libtest.
+ pub fn with_kind(self, kind: impl Into<String>) -> Self {
+ Self {
+ info: TestInfo {
+ kind: kind.into(),
+ ..self.info
+ },
+ ..self
+ }
+ }
+
+ /// Sets whether or not this test is considered "ignored". (Default: `false`)
+ ///
+ /// With the built-in test suite, you can annotate `#[ignore]` on tests to
+ /// not execute them by default (for example because they take a long time
+ /// or require a special environment). If the `--ignored` flag is set,
+ /// ignored tests are executed, too.
+ pub fn with_ignored_flag(self, is_ignored: bool) -> Self {
+ Self {
+ info: TestInfo {
+ is_ignored,
+ ..self.info
+ },
+ ..self
+ }
+ }
+
+ /// Returns the name of this trial.
+ pub fn name(&self) -> &str {
+ &self.info.name
+ }
+
+ /// Returns the kind of this trial. If you have not set a kind, this is an
+ /// empty string.
+ pub fn kind(&self) -> &str {
+ &self.info.kind
+ }
+
+ /// Returns whether this trial has been marked as *ignored*.
+ pub fn has_ignored_flag(&self) -> bool {
+ self.info.is_ignored
+ }
+
+ /// Returns `true` iff this trial is a test (as opposed to a benchmark).
+ pub fn is_test(&self) -> bool {
+ !self.info.is_bench
+ }
+
+ /// Returns `true` iff this trial is a benchmark (as opposed to a test).
+ pub fn is_bench(&self) -> bool {
+ self.info.is_bench
+ }
+}
+
+impl fmt::Debug for Trial {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ struct OpaqueRunner;
+ impl fmt::Debug for OpaqueRunner {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.write_str("<runner>")
+ }
+ }
+
+ f.debug_struct("Test")
+ .field("runner", &OpaqueRunner)
+ .field("name", &self.info.name)
+ .field("kind", &self.info.kind)
+ .field("is_ignored", &self.info.is_ignored)
+ .field("is_bench", &self.info.is_bench)
+ .finish()
+ }
+}
+
+#[derive(Debug)]
+struct TestInfo {
+ name: String,
+ kind: String,
+ is_ignored: bool,
+ is_bench: bool,
+}
+
+/// Output of a benchmark.
+#[derive(Debug, Clone, Copy, PartialEq, Eq)]
+pub struct Measurement {
+ /// Average time in ns.
+ pub avg: u64,
+
+ /// Variance in ns.
+ pub variance: u64,
+}
+
+/// Indicates that a test/benchmark has failed. Optionally carries a message.
+///
+/// You usually want to use the `From` impl of this type, which allows you to
+/// convert any `T: fmt::Display` (e.g. `String`, `&str`, ...) into `Failed`.
+#[derive(Debug, Clone)]
+pub struct Failed {
+ msg: Option<String>,
+}
+
+impl Failed {
+ /// Creates an instance without message.
+ pub fn without_message() -> Self {
+ Self { msg: None }
+ }
+
+ /// Returns the message of this instance.
+ pub fn message(&self) -> Option<&str> {
+ self.msg.as_deref()
+ }
+}
+
+impl<M: std::fmt::Display> From<M> for Failed {
+ fn from(msg: M) -> Self {
+ Self {
+ msg: Some(msg.to_string())
+ }
+ }
+}
+
+
+
+/// The outcome of performing a test/benchmark.
+#[derive(Debug, Clone)]
+enum Outcome {
+ /// The test passed.
+ Passed,
+
+ /// The test or benchmark failed.
+ Failed(Failed),
+
+ /// The test or benchmark was ignored.
+ Ignored,
+
+ /// The benchmark was successfully run.
+ Measured(Measurement),
+}
+
+/// Contains information about the entire test run. Is returned by[`run`].
+///
+/// This type is marked as `#[must_use]`. Usually, you just call
+/// [`exit()`][Conclusion::exit] on the result of `run` to exit the application
+/// with the correct exit code. But you can also store this value and inspect
+/// its data.
+#[derive(Clone, Debug, PartialEq, Eq)]
+#[must_use = "Call `exit()` or `exit_if_failed()` to set the correct return code"]
+pub struct Conclusion {
+ /// Number of tests and benchmarks that were filtered out (either by the
+ /// filter-in pattern or by `--skip` arguments).
+ pub num_filtered_out: u64,
+
+ /// Number of passed tests.
+ pub num_passed: u64,
+
+ /// Number of failed tests and benchmarks.
+ pub num_failed: u64,
+
+ /// Number of ignored tests and benchmarks.
+ pub num_ignored: u64,
+
+ /// Number of benchmarks that successfully ran.
+ pub num_measured: u64,
+}
+
+impl Conclusion {
+ /// Exits the application with an appropriate error code (0 if all tests
+ /// have passed, 101 if there have been failures).
+ pub fn exit(&self) -> ! {
+ self.exit_if_failed();
+ process::exit(0);
+ }
+
+ /// Exits the application with error code 101 if there were any failures.
+ /// Otherwise, returns normally.
+ pub fn exit_if_failed(&self) {
+ if self.has_failed() {
+ process::exit(101)
+ }
+ }
+
+ /// Returns whether there have been any failures.
+ pub fn has_failed(&self) -> bool {
+ self.num_failed > 0
+ }
+
+ fn empty() -> Self {
+ Self {
+ num_filtered_out: 0,
+ num_passed: 0,
+ num_failed: 0,
+ num_ignored: 0,
+ num_measured: 0,
+ }
+ }
+}
+
+impl Arguments {
+ /// Returns `true` if the given test should be ignored.
+ fn is_ignored(&self, test: &Trial) -> bool {
+ (test.info.is_ignored && !self.ignored && !self.include_ignored)
+ || (test.info.is_bench && self.test)
+ || (!test.info.is_bench && self.bench)
+ }
+
+ fn is_filtered_out(&self, test: &Trial) -> bool {
+ let test_name = &test.info.name;
+
+ // If a filter was specified, apply this
+ if let Some(filter) = &self.filter {
+ match self.exact {
+ true if test_name != filter => return true,
+ false if !test_name.contains(filter) => return true,
+ _ => {}
+ };
+ }
+
+ // If any skip pattern were specified, test for all patterns.
+ for skip_filter in &self.skip {
+ match self.exact {
+ true if test_name == skip_filter => return true,
+ false if test_name.contains(skip_filter) => return true,
+ _ => {}
+ }
+ }
+
+ if self.ignored && !test.info.is_ignored {
+ return true;
+ }
+
+ false
+ }
+}
+
+/// Runs all given tests.
+///
+/// This is the central function of this crate. It provides the framework for
+/// the testing harness. It does all the printing and house keeping.
+///
+/// The returned value contains a couple of useful information. See
+/// [`Conclusion`] for more information. If `--list` was specified, a list is
+/// printed and a dummy `Conclusion` is returned.
+pub fn run(args: &Arguments, mut tests: Vec<Trial>) -> Conclusion {
+ let start_instant = Instant::now();
+ let mut conclusion = Conclusion::empty();
+
+ // Apply filtering
+ if args.filter.is_some() || !args.skip.is_empty() || args.ignored {
+ let len_before = tests.len() as u64;
+ tests.retain(|test| !args.is_filtered_out(test));
+ conclusion.num_filtered_out = len_before - tests.len() as u64;
+ }
+ let tests = tests;
+
+ // Create printer which is used for all output.
+ let mut printer = printer::Printer::new(args, &tests);
+
+ // If `--list` is specified, just print the list and return.
+ if args.list {
+ printer.print_list(&tests, args.ignored);
+ return Conclusion::empty();
+ }
+
+ // Print number of tests
+ printer.print_title(tests.len() as u64);
+
+ let mut failed_tests = Vec::new();
+ let mut handle_outcome = |outcome: Outcome, test: TestInfo, printer: &mut Printer| {
+ printer.print_single_outcome(&outcome);
+
+ // Handle outcome
+ match outcome {
+ Outcome::Passed => conclusion.num_passed += 1,
+ Outcome::Failed(failed) => {
+ failed_tests.push((test, failed.msg));
+ conclusion.num_failed += 1;
+ },
+ Outcome::Ignored => conclusion.num_ignored += 1,
+ Outcome::Measured(_) => conclusion.num_measured += 1,
+ }
+ };
+
+ // Execute all tests.
+ let test_mode = !args.bench;
+ if args.test_threads == Some(1) {
+ // Run test sequentially in main thread
+ for test in tests {
+ // Print `test foo ...`, run the test, then print the outcome in
+ // the same line.
+ printer.print_test(&test.info);
+ let outcome = if args.is_ignored(&test) {
+ Outcome::Ignored
+ } else {
+ run_single(test.runner, test_mode)
+ };
+ handle_outcome(outcome, test.info, &mut printer);
+ }
+ } else {
+ // Run test in thread pool.
+ let pool = ThreadPool::default();
+ let (sender, receiver) = mpsc::channel();
+
+ let num_tests = tests.len();
+ for test in tests {
+ if args.is_ignored(&test) {
+ sender.send((Outcome::Ignored, test.info)).unwrap();
+ } else {
+ let sender = sender.clone();
+ pool.execute(move || {
+ // It's fine to ignore the result of sending. If the
+ // receiver has hung up, everything will wind down soon
+ // anyway.
+ let outcome = run_single(test.runner, test_mode);
+ let _ = sender.send((outcome, test.info));
+ });
+ }
+ }
+
+ for (outcome, test_info) in receiver.iter().take(num_tests) {
+ // In multithreaded mode, we do only print the start of the line
+ // after the test ran, as otherwise it would lead to terribly
+ // interleaved output.
+ printer.print_test(&test_info);
+ handle_outcome(outcome, test_info, &mut printer);
+ }
+ }
+
+ // Print failures if there were any, and the final summary.
+ if !failed_tests.is_empty() {
+ printer.print_failures(&failed_tests);
+ }
+
+ printer.print_summary(&conclusion, start_instant.elapsed());
+
+ conclusion
+}
+
+/// Runs the given runner, catching any panics and treating them as a failed test.
+fn run_single(runner: Box<dyn FnOnce(bool) -> Outcome + Send>, test_mode: bool) -> Outcome {
+ use std::panic::{catch_unwind, AssertUnwindSafe};
+
+ catch_unwind(AssertUnwindSafe(move || runner(test_mode))).unwrap_or_else(|e| {
+ // The `panic` information is just an `Any` object representing the
+ // value the panic was invoked with. For most panics (which use
+ // `panic!` like `println!`), this is either `&str` or `String`.
+ let payload = e.downcast_ref::<String>()
+ .map(|s| s.as_str())
+ .or(e.downcast_ref::<&str>().map(|s| *s));
+
+ let msg = match payload {
+ Some(payload) => format!("test panicked: {payload}"),
+ None => format!("test panicked"),
+ };
+ Outcome::Failed(msg.into())
+ })
+}
diff --git a/src/printer.rs b/src/printer.rs
new file mode 100644
index 0000000..d0766f0
--- /dev/null
+++ b/src/printer.rs
@@ -0,0 +1,292 @@
+//! Definition of the `Printer`.
+//!
+//! This is just an abstraction for everything that is printed to the screen
+//! (or logfile, if specified). These parameters influence printing:
+//! - `color`
+//! - `format` (and `quiet`)
+//! - `logfile`
+
+use std::{fs::File, time::Duration};
+
+use termcolor::{Ansi, Color, ColorChoice, ColorSpec, NoColor, StandardStream, WriteColor};
+
+use crate::{
+ Arguments, ColorSetting, Conclusion, FormatSetting, Outcome, Trial, Failed,
+ Measurement, TestInfo,
+};
+
+pub(crate) struct Printer {
+ out: Box<dyn WriteColor>,
+ format: FormatSetting,
+ name_width: usize,
+ kind_width: usize,
+}
+
+impl Printer {
+ /// Creates a new printer configured by the given arguments (`format`,
+ /// `quiet`, `color` and `logfile` options).
+ pub(crate) fn new(args: &Arguments, tests: &[Trial]) -> Self {
+ let color_arg = args.color.unwrap_or(ColorSetting::Auto);
+
+ // Determine target of all output
+ let out = if let Some(logfile) = &args.logfile {
+ let f = File::create(logfile).expect("failed to create logfile");
+ if color_arg == ColorSetting::Always {
+ Box::new(Ansi::new(f)) as Box<dyn WriteColor>
+ } else {
+ Box::new(NoColor::new(f))
+ }
+ } else {
+ let choice = match color_arg {
+ ColorSetting::Auto => ColorChoice::Auto,
+ ColorSetting::Always => ColorChoice::Always,
+ ColorSetting::Never => ColorChoice::Never,
+ };
+ Box::new(StandardStream::stdout(choice))
+ };
+
+ // Determine correct format
+ let format = if args.quiet {
+ FormatSetting::Terse
+ } else {
+ args.format.unwrap_or(FormatSetting::Pretty)
+ };
+
+ // Determine max test name length to do nice formatting later.
+ //
+ // Unicode is hard and there is no way we can properly align/pad the
+ // test names and outcomes. Counting the number of code points is just
+ // a cheap way that works in most cases. Usually, these names are
+ // ASCII.
+ let name_width = tests.iter()
+ .map(|test| test.info.name.chars().count())
+ .max()
+ .unwrap_or(0);
+
+ let kind_width = tests.iter()
+ .map(|test| {
+ if test.info.kind.is_empty() {
+ 0
+ } else {
+ // The two braces [] and one space
+ test.info.kind.chars().count() + 3
+ }
+ })
+ .max()
+ .unwrap_or(0);
+
+ Self {
+ out,
+ format,
+ name_width,
+ kind_width,
+ }
+ }
+
+ /// Prints the first line "running 3 tests".
+ pub(crate) fn print_title(&mut self, num_tests: u64) {
+ match self.format {
+ FormatSetting::Pretty | FormatSetting::Terse => {
+ let plural_s = if num_tests == 1 { "" } else { "s" };
+
+ writeln!(self.out).unwrap();
+ writeln!(self.out, "running {} test{}", num_tests, plural_s).unwrap();
+ }
+ }
+ }
+
+ /// Prints the text announcing the test (e.g. "test foo::bar ... "). Prints
+ /// nothing in terse mode.
+ pub(crate) fn print_test(&mut self, info: &TestInfo) {
+ let TestInfo { name, kind, .. } = info;
+ match self.format {
+ FormatSetting::Pretty => {
+ let kind = if kind.is_empty() {
+ format!("")
+ } else {
+ format!("[{}] ", kind)
+ };
+
+ write!(
+ self.out,
+ "test {: <2$}{: <3$} ... ",
+ kind,
+ name,
+ self.kind_width,
+ self.name_width,
+ ).unwrap();
+ self.out.flush().unwrap();
+ }
+ FormatSetting::Terse => {
+ // In terse mode, nothing is printed before the job. Only
+ // `print_single_outcome` prints one character.
+ }
+ }
+ }
+
+ /// Prints the outcome of a single tests. `ok` or `FAILED` in pretty mode
+ /// and `.` or `F` in terse mode.
+ pub(crate) fn print_single_outcome(&mut self, outcome: &Outcome) {
+ match self.format {
+ FormatSetting::Pretty => {
+ self.print_outcome_pretty(outcome);
+ writeln!(self.out).unwrap();
+ }
+ FormatSetting::Terse => {
+ let c = match outcome {
+ Outcome::Passed => '.',
+ Outcome::Failed { .. } => 'F',
+ Outcome::Ignored => 'i',
+ Outcome::Measured { .. } => {
+ // Benchmark are never printed in terse mode... for
+ // some reason.
+ self.print_outcome_pretty(outcome);
+ writeln!(self.out).unwrap();
+ return;
+ }
+ };
+
+ self.out.set_color(&color_of_outcome(outcome)).unwrap();
+ write!(self.out, "{}", c).unwrap();
+ self.out.reset().unwrap();
+ }
+ }
+ }
+
+ /// Prints the summary line after all tests have been executed.
+ pub(crate) fn print_summary(&mut self, conclusion: &Conclusion, execution_time: Duration) {
+ match self.format {
+ FormatSetting::Pretty | FormatSetting::Terse => {
+ let outcome = if conclusion.has_failed() {
+ Outcome::Failed(Failed { msg: None })
+ } else {
+ Outcome::Passed
+ };
+
+ writeln!(self.out).unwrap();
+ write!(self.out, "test result: ").unwrap();
+ self.print_outcome_pretty(&outcome);
+ writeln!(
+ self.out,
+ ". {} passed; {} failed; {} ignored; {} measured; \
+ {} filtered out; finished in {:.2}s",
+ conclusion.num_passed,
+ conclusion.num_failed,
+ conclusion.num_ignored,
+ conclusion.num_measured,
+ conclusion.num_filtered_out,
+ execution_time.as_secs_f64()
+ ).unwrap();
+ writeln!(self.out).unwrap();
+ }
+ }
+ }
+
+ /// Prints a list of all tests. Used if `--list` is set.
+ pub(crate) fn print_list(&mut self, tests: &[Trial], ignored: bool) {
+ Self::write_list(tests, ignored, &mut self.out).unwrap();
+ }
+
+ pub(crate) fn write_list(
+ tests: &[Trial],
+ ignored: bool,
+ mut out: impl std::io::Write,
+ ) -> std::io::Result<()> {
+ for test in tests {
+ // libtest prints out:
+ // * all tests without `--ignored`
+ // * just the ignored tests with `--ignored`
+ if ignored && !test.info.is_ignored {
+ continue;
+ }
+
+ let kind = if test.info.kind.is_empty() {
+ format!("")
+ } else {
+ format!("[{}] ", test.info.kind)
+ };
+
+ writeln!(
+ out,
+ "{}{}: {}",
+ kind,
+ test.info.name,
+ if test.info.is_bench { "bench" } else { "test" },
+ )?;
+ }
+
+ Ok(())
+ }
+
+ /// Prints a list of failed tests with their messages. This is only called
+ /// if there were any failures.
+ pub(crate) fn print_failures(&mut self, fails: &[(TestInfo, Option<String>)]) {
+ writeln!(self.out).unwrap();
+ writeln!(self.out, "failures:").unwrap();
+ writeln!(self.out).unwrap();
+
+ // Print messages of all tests
+ for (test_info, msg) in fails {
+ writeln!(self.out, "---- {} ----", test_info.name).unwrap();
+ if let Some(msg) = msg {
+ writeln!(self.out, "{}", msg).unwrap();
+ }
+ writeln!(self.out).unwrap();
+ }
+
+ // Print summary list of failed tests
+ writeln!(self.out).unwrap();
+ writeln!(self.out, "failures:").unwrap();
+ for (test_info, _) in fails {
+ writeln!(self.out, " {}", test_info.name).unwrap();
+ }
+ }
+
+ /// Prints a colored 'ok'/'FAILED'/'ignored'/'bench'.
+ fn print_outcome_pretty(&mut self, outcome: &Outcome) {
+ let s = match outcome {
+ Outcome::Passed => "ok",
+ Outcome::Failed { .. } => "FAILED",
+ Outcome::Ignored => "ignored",
+ Outcome::Measured { .. } => "bench",
+ };
+
+ self.out.set_color(&color_of_outcome(outcome)).unwrap();
+ write!(self.out, "{}", s).unwrap();
+ self.out.reset().unwrap();
+
+ if let Outcome::Measured(Measurement { avg, variance }) = outcome {
+ write!(
+ self.out,
+ ": {:>11} ns/iter (+/- {})",
+ fmt_with_thousand_sep(*avg),
+ fmt_with_thousand_sep(*variance),
+ ).unwrap();
+ }
+ }
+}
+
+/// Formats the given integer with `,` as thousand separator.
+pub fn fmt_with_thousand_sep(mut v: u64) -> String {
+ let mut out = String::new();
+ while v >= 1000 {
+ out = format!(",{:03}{}", v % 1000, out);
+ v /= 1000;
+ }
+ out = format!("{}{}", v, out);
+
+ out
+}
+
+/// Returns the `ColorSpec` associated with the given outcome.
+fn color_of_outcome(outcome: &Outcome) -> ColorSpec {
+ let mut out = ColorSpec::new();
+ let color = match outcome {
+ Outcome::Passed => Color::Green,
+ Outcome::Failed { .. } => Color::Red,
+ Outcome::Ignored => Color::Yellow,
+ Outcome::Measured { .. } => Color::Cyan,
+ };
+ out.set_fg(Some(color));
+ out
+}
diff --git a/tests/all_passing.rs b/tests/all_passing.rs
new file mode 100644
index 0000000..b5c5552
--- /dev/null
+++ b/tests/all_passing.rs
@@ -0,0 +1,162 @@
+use common::{args, check};
+use libtest_mimic::{Trial, Conclusion};
+use pretty_assertions::assert_eq;
+
+use crate::common::do_run;
+
+#[macro_use]
+mod common;
+
+
+fn tests() -> Vec<Trial> {
+ vec![
+ Trial::test("foo", || Ok(())),
+ Trial::test("bar", || Ok(())),
+ Trial::test("barro", || Ok(())),
+ ]
+}
+
+#[test]
+fn normal() {
+ check(args([]), tests, 3,
+ Conclusion {
+ num_filtered_out: 0,
+ num_passed: 3,
+ num_failed: 0,
+ num_ignored: 0,
+ num_measured: 0,
+ },
+ "
+ test foo ... ok
+ test bar ... ok
+ test barro ... ok
+ "
+ );
+}
+
+#[test]
+fn filter_one() {
+ check(args(["foo"]), tests, 1,
+ Conclusion {
+ num_filtered_out: 2,
+ num_passed: 1,
+ num_failed: 0,
+ num_ignored: 0,
+ num_measured: 0,
+ },
+ "test foo ... ok",
+ );
+}
+
+#[test]
+fn filter_two() {
+ check(args(["bar"]), tests, 2,
+ Conclusion {
+ num_filtered_out: 1,
+ num_passed: 2,
+ num_failed: 0,
+ num_ignored: 0,
+ num_measured: 0,
+ },
+ "
+ test bar ... ok
+ test barro ... ok
+ ",
+ );
+}
+
+
+#[test]
+fn filter_exact() {
+ check(args(["bar", "--exact"]), tests, 1,
+ Conclusion {
+ num_filtered_out: 2,
+ num_passed: 1,
+ num_failed: 0,
+ num_ignored: 0,
+ num_measured: 0,
+ },
+ "test bar ... ok",
+ );
+}
+
+#[test]
+fn filter_two_and_skip() {
+ check(args(["--skip", "barro", "bar"]), tests, 1,
+ Conclusion {
+ num_filtered_out: 2,
+ num_passed: 1,
+ num_failed: 0,
+ num_ignored: 0,
+ num_measured: 0,
+ },
+ "test bar ... ok",
+ );
+}
+
+#[test]
+fn skip_nothing() {
+ check(args(["--skip", "peter"]), tests, 3,
+ Conclusion {
+ num_filtered_out: 0,
+ num_passed: 3,
+ num_failed: 0,
+ num_ignored: 0,
+ num_measured: 0,
+ },
+ "
+ test foo ... ok
+ test bar ... ok
+ test barro ... ok
+ "
+ );
+}
+
+#[test]
+fn skip_two() {
+ check(args(["--skip", "bar"]), tests, 1,
+ Conclusion {
+ num_filtered_out: 2,
+ num_passed: 1,
+ num_failed: 0,
+ num_ignored: 0,
+ num_measured: 0,
+ },
+ "test foo ... ok"
+ );
+}
+
+#[test]
+fn skip_exact() {
+ check(args(["--exact", "--skip", "bar"]), tests, 2,
+ Conclusion {
+ num_filtered_out: 1,
+ num_passed: 2,
+ num_failed: 0,
+ num_ignored: 0,
+ num_measured: 0,
+ },
+ "
+ test foo ... ok
+ test barro ... ok
+ "
+ );
+}
+
+#[test]
+fn terse_output() {
+ let (c, out) = do_run(args(["--format", "terse"]), tests());
+ assert_eq!(c, Conclusion {
+ num_filtered_out: 0,
+ num_passed: 3,
+ num_failed: 0,
+ num_ignored: 0,
+ num_measured: 0,
+ });
+ assert_log!(out, "
+ running 3 tests
+ ...
+ test result: ok. 3 passed; 0 failed; 0 ignored; 0 measured; 0 filtered out; \
+ finished in 0.00s
+ ");
+}
diff --git a/tests/common/mod.rs b/tests/common/mod.rs
new file mode 100644
index 0000000..bd80340
--- /dev/null
+++ b/tests/common/mod.rs
@@ -0,0 +1,131 @@
+use std::{path::Path, iter::repeat_with, collections::HashMap};
+use pretty_assertions::assert_eq;
+
+use libtest_mimic::{run, Arguments, Conclusion, Trial};
+
+
+const TEMPDIR: &str = env!("CARGO_TARGET_TMPDIR");
+
+pub fn args<const N: usize>(args: [&str; N]) -> Arguments {
+ let mut v = vec!["<dummy-executable>"];
+ v.extend(args);
+ Arguments::from_iter(v)
+}
+
+pub fn do_run(mut args: Arguments, tests: Vec<Trial>) -> (Conclusion, String) {
+ // Create path to temporary file.
+ let suffix = repeat_with(fastrand::alphanumeric).take(10).collect::<String>();
+ let path = Path::new(&TEMPDIR).join(format!("libtest_mimic_output_{suffix}.txt"));
+
+ args.logfile = Some(path.display().to_string());
+
+ let c = run(&args, tests);
+ let output = std::fs::read_to_string(&path)
+ .expect("Can't read temporary logfile");
+ std::fs::remove_file(&path)
+ .expect("Can't remove temporary logfile");
+ (c, output)
+}
+
+pub fn clean_expected_log(s: &str) -> String {
+ let shared_indent = s.lines()
+ .filter(|l| l.contains(|c| c != ' '))
+ .map(|l| l.bytes().take_while(|b| *b == b' ').count())
+ .min()
+ .expect("empty expected");
+
+ let mut out = String::new();
+ for line in s.lines() {
+ use std::fmt::Write;
+ let cropped = if line.len() <= shared_indent {
+ line
+ } else {
+ &line[shared_indent..]
+ };
+ writeln!(out, "{}", cropped).unwrap();
+ }
+
+ out
+}
+
+/// Best effort tool to check certain things about a log that might have all
+/// tests randomly ordered.
+pub fn assert_reordered_log(actual: &str, num: u64, expected_lines: &[&str], tail: &str) {
+ let actual = actual.trim();
+ let (first_line, rest) = actual.split_once('\n').expect("log has too few lines");
+ let (middle, last_line) = rest.rsplit_once('\n').expect("log has too few lines");
+
+
+ assert_eq!(first_line, &format!("running {} test{}", num, if num == 1 { "" } else { "s" }));
+ assert!(last_line.contains(tail));
+
+ let mut actual_lines = HashMap::new();
+ for line in middle.lines().map(|l| l.trim()).filter(|l| !l.is_empty()) {
+ *actual_lines.entry(line).or_insert(0) += 1;
+ }
+
+ for expected in expected_lines.iter().map(|l| l.trim()).filter(|l| !l.is_empty()) {
+ match actual_lines.get_mut(expected) {
+ None | Some(0) => panic!("expected line \"{expected}\" not in log"),
+ Some(num) => *num -= 1,
+ }
+ }
+
+ actual_lines.retain(|_, v| *v != 0);
+ if !actual_lines.is_empty() {
+ panic!("Leftover output in log: {actual_lines:#?}");
+ }
+}
+
+/// Like `assert_eq`, but cleans the expected string (removes indendation).
+#[macro_export]
+macro_rules! assert_log {
+ ($actual:expr, $expected:expr) => {
+ let actual = $actual;
+ let expected = crate::common::clean_expected_log($expected);
+
+ assert_eq!(actual.trim(), expected.trim());
+ };
+}
+
+pub fn check(
+ mut args: Arguments,
+ mut tests: impl FnMut() -> Vec<Trial>,
+ num_running_tests: u64,
+ expected_conclusion: Conclusion,
+ expected_output: &str,
+) {
+ // Run in single threaded mode
+ args.test_threads = Some(1);
+ let (c, out) = do_run(args.clone(), tests());
+ let expected = crate::common::clean_expected_log(expected_output);
+ let actual = {
+ let lines = out.trim().lines().skip(1).collect::<Vec<_>>();
+ lines[..lines.len() - 1].join("\n")
+ };
+ assert_eq!(actual.trim(), expected.trim());
+ assert_eq!(c, expected_conclusion);
+
+ // Run in multithreaded mode.
+ let (c, out) = do_run(args, tests());
+ assert_reordered_log(
+ &out,
+ num_running_tests,
+ &expected_output.lines().collect::<Vec<_>>(),
+ &conclusion_to_output(&c),
+ );
+ assert_eq!(c, expected_conclusion);
+}
+
+fn conclusion_to_output(c: &Conclusion) -> String {
+ let Conclusion { num_filtered_out, num_passed, num_failed, num_ignored, num_measured } = *c;
+ format!(
+ "test result: {}. {} passed; {} failed; {} ignored; {} measured; {} filtered out;",
+ if num_failed > 0 { "FAILED" } else { "ok" },
+ num_passed,
+ num_failed,
+ num_ignored,
+ num_measured,
+ num_filtered_out,
+ )
+}
diff --git a/tests/main_thread.rs b/tests/main_thread.rs
new file mode 100644
index 0000000..c6cc24a
--- /dev/null
+++ b/tests/main_thread.rs
@@ -0,0 +1,16 @@
+use libtest_mimic::{Trial, Arguments};
+
+
+#[test]
+fn check_test_on_main_thread() {
+ let outer_thread = std::thread::current().id();
+
+ let mut args = Arguments::default();
+ args.test_threads = Some(1);
+ let conclusion = libtest_mimic::run(&args, vec![Trial::test("check", move || {
+ assert_eq!(outer_thread, std::thread::current().id());
+ Ok(())
+ })]);
+
+ assert_eq!(conclusion.num_passed, 1);
+}
diff --git a/tests/mixed_bag.rs b/tests/mixed_bag.rs
new file mode 100644
index 0000000..a6fe52f
--- /dev/null
+++ b/tests/mixed_bag.rs
@@ -0,0 +1,529 @@
+use pretty_assertions::assert_eq;
+use libtest_mimic::{Trial, Conclusion, Measurement};
+use crate::common::{args, check, do_run};
+
+#[macro_use]
+mod common;
+
+
+fn tests() -> Vec<Trial> {
+ fn meas(avg: u64, variance: u64) -> Option<Measurement> {
+ Some(Measurement { avg, variance })
+ }
+
+ vec![
+ Trial::test("cat", || Ok(())),
+ Trial::test("dog", || Err("was not a good boy".into())),
+ Trial::test("fox", || Ok(())).with_kind("apple"),
+ Trial::test("bunny", || Err("jumped too high".into())).with_kind("apple"),
+ Trial::test("frog", || Ok(())).with_ignored_flag(true),
+ Trial::test("owl", || Err("broke neck".into())).with_ignored_flag(true),
+ Trial::test("fly", || Ok(())).with_ignored_flag(true).with_kind("banana"),
+ Trial::test("bear", || Err("no honey".into())).with_ignored_flag(true).with_kind("banana"),
+
+ Trial::bench("red", |_| Ok(meas(32, 3))),
+ Trial::bench("blue", |_| Err("sky fell down".into())),
+ Trial::bench("yellow", |_| Ok(meas(64, 4))).with_kind("kiwi"),
+ Trial::bench("green", |_| Err("was poisoned".into())).with_kind("kiwi"),
+ Trial::bench("purple", |_| Ok(meas(100, 5))).with_ignored_flag(true),
+ Trial::bench("cyan", |_| Err("not creative enough".into())).with_ignored_flag(true),
+ Trial::bench("orange", |_| Ok(meas(17, 6))).with_ignored_flag(true).with_kind("banana"),
+ Trial::bench("pink", |_| Err("bad".into())).with_ignored_flag(true).with_kind("banana"),
+ ]
+}
+
+#[test]
+fn normal() {
+ check(args([]), tests, 16,
+ Conclusion {
+ num_filtered_out: 0,
+ num_passed: 4,
+ num_failed: 4,
+ num_ignored: 8,
+ num_measured: 0,
+ },
+ "
+ test cat ... ok
+ test dog ... FAILED
+ test [apple] fox ... ok
+ test [apple] bunny ... FAILED
+ test frog ... ignored
+ test owl ... ignored
+ test [banana] fly ... ignored
+ test [banana] bear ... ignored
+ test red ... ok
+ test blue ... FAILED
+ test [kiwi] yellow ... ok
+ test [kiwi] green ... FAILED
+ test purple ... ignored
+ test cyan ... ignored
+ test [banana] orange ... ignored
+ test [banana] pink ... ignored
+
+ failures:
+
+ ---- dog ----
+ was not a good boy
+
+ ---- bunny ----
+ jumped too high
+
+ ---- blue ----
+ sky fell down
+
+ ---- green ----
+ was poisoned
+
+
+ failures:
+ dog
+ bunny
+ blue
+ green
+ ",
+ );
+}
+
+#[test]
+fn test_mode() {
+ check(args(["--test"]), tests, 16,
+ Conclusion {
+ num_filtered_out: 0,
+ num_passed: 2,
+ num_failed: 2,
+ num_ignored: 12,
+ num_measured: 0,
+ },
+ "
+ test cat ... ok
+ test dog ... FAILED
+ test [apple] fox ... ok
+ test [apple] bunny ... FAILED
+ test frog ... ignored
+ test owl ... ignored
+ test [banana] fly ... ignored
+ test [banana] bear ... ignored
+ test red ... ignored
+ test blue ... ignored
+ test [kiwi] yellow ... ignored
+ test [kiwi] green ... ignored
+ test purple ... ignored
+ test cyan ... ignored
+ test [banana] orange ... ignored
+ test [banana] pink ... ignored
+
+ failures:
+
+ ---- dog ----
+ was not a good boy
+
+ ---- bunny ----
+ jumped too high
+
+
+ failures:
+ dog
+ bunny
+ ",
+ );
+}
+
+#[test]
+fn bench_mode() {
+ check(args(["--bench"]), tests, 16,
+ Conclusion {
+ num_filtered_out: 0,
+ num_passed: 0,
+ num_failed: 2,
+ num_ignored: 12,
+ num_measured: 2,
+ },
+ "
+ test cat ... ignored
+ test dog ... ignored
+ test [apple] fox ... ignored
+ test [apple] bunny ... ignored
+ test frog ... ignored
+ test owl ... ignored
+ test [banana] fly ... ignored
+ test [banana] bear ... ignored
+ test red ... bench: 32 ns/iter (+/- 3)
+ test blue ... FAILED
+ test [kiwi] yellow ... bench: 64 ns/iter (+/- 4)
+ test [kiwi] green ... FAILED
+ test purple ... ignored
+ test cyan ... ignored
+ test [banana] orange ... ignored
+ test [banana] pink ... ignored
+
+ failures:
+
+ ---- blue ----
+ sky fell down
+
+ ---- green ----
+ was poisoned
+
+
+ failures:
+ blue
+ green
+ ",
+ );
+}
+
+#[test]
+fn list() {
+ let (c, out) = common::do_run(args(["--list"]), tests());
+ assert_log!(out, "
+ cat: test
+ dog: test
+ [apple] fox: test
+ [apple] bunny: test
+ frog: test
+ owl: test
+ [banana] fly: test
+ [banana] bear: test
+ red: bench
+ blue: bench
+ [kiwi] yellow: bench
+ [kiwi] green: bench
+ purple: bench
+ cyan: bench
+ [banana] orange: bench
+ [banana] pink: bench
+ ");
+ assert_eq!(c, Conclusion {
+ num_filtered_out: 0,
+ num_passed: 0,
+ num_failed: 0,
+ num_ignored: 0,
+ num_measured: 0,
+ });
+}
+
+#[test]
+fn list_ignored() {
+ let (c, out) = common::do_run(args(["--list", "--ignored"]), tests());
+ assert_log!(out, "
+ frog: test
+ owl: test
+ [banana] fly: test
+ [banana] bear: test
+ purple: bench
+ cyan: bench
+ [banana] orange: bench
+ [banana] pink: bench
+ ");
+ assert_eq!(c, Conclusion {
+ num_filtered_out: 0,
+ num_passed: 0,
+ num_failed: 0,
+ num_ignored: 0,
+ num_measured: 0,
+ });
+}
+
+#[test]
+fn list_with_filter() {
+ let (c, out) = common::do_run(args(["--list", "a"]), tests());
+ assert_log!(out, "
+ cat: test
+ [banana] bear: test
+ cyan: bench
+ [banana] orange: bench
+ ");
+ assert_eq!(c, Conclusion {
+ num_filtered_out: 0,
+ num_passed: 0,
+ num_failed: 0,
+ num_ignored: 0,
+ num_measured: 0,
+ });
+}
+
+#[test]
+fn filter_c() {
+ check(args(["c"]), tests, 2,
+ Conclusion {
+ num_filtered_out: 14,
+ num_passed: 1,
+ num_failed: 0,
+ num_ignored: 1,
+ num_measured: 0,
+ },
+ "
+ test cat ... ok
+ test cyan ... ignored
+ ",
+ );
+}
+
+#[test]
+fn filter_o_test() {
+ check(args(["--test", "o"]), tests, 6,
+ Conclusion {
+ num_filtered_out: 10,
+ num_passed: 1,
+ num_failed: 1,
+ num_ignored: 4,
+ num_measured: 0,
+ },
+ "
+ test dog ... FAILED
+ test [apple] fox ... ok
+ test frog ... ignored
+ test owl ... ignored
+ test [kiwi] yellow ... ignored
+ test [banana] orange ... ignored
+
+ failures:
+
+ ---- dog ----
+ was not a good boy
+
+
+ failures:
+ dog
+ ",
+ );
+}
+
+#[test]
+fn filter_o_test_include_ignored() {
+ check(args(["--test", "--include-ignored", "o"]), tests, 6,
+ Conclusion {
+ num_filtered_out: 10,
+ num_passed: 2,
+ num_failed: 2,
+ num_ignored: 2,
+ num_measured: 0,
+ },
+ "
+ test dog ... FAILED
+ test [apple] fox ... ok
+ test frog ... ok
+ test owl ... FAILED
+ test [kiwi] yellow ... ignored
+ test [banana] orange ... ignored
+
+ failures:
+
+ ---- dog ----
+ was not a good boy
+
+ ---- owl ----
+ broke neck
+
+
+ failures:
+ dog
+ owl
+ ",
+ );
+}
+
+#[test]
+fn filter_o_test_ignored() {
+ check(args(["--test", "--ignored", "o"]), tests, 3,
+ Conclusion {
+ num_filtered_out: 13,
+ num_passed: 1,
+ num_failed: 1,
+ num_ignored: 1,
+ num_measured: 0,
+ },
+ "
+ test frog ... ok
+ test owl ... FAILED
+ test [banana] orange ... ignored
+
+ failures:
+
+ ---- owl ----
+ broke neck
+
+
+ failures:
+ owl
+ ",
+ );
+}
+
+#[test]
+fn normal_include_ignored() {
+ check(args(["--include-ignored"]), tests, 16,
+ Conclusion {
+ num_filtered_out: 0,
+ num_passed: 8,
+ num_failed: 8,
+ num_ignored: 0,
+ num_measured: 0,
+ },
+ "
+ test cat ... ok
+ test dog ... FAILED
+ test [apple] fox ... ok
+ test [apple] bunny ... FAILED
+ test frog ... ok
+ test owl ... FAILED
+ test [banana] fly ... ok
+ test [banana] bear ... FAILED
+ test red ... ok
+ test blue ... FAILED
+ test [kiwi] yellow ... ok
+ test [kiwi] green ... FAILED
+ test purple ... ok
+ test cyan ... FAILED
+ test [banana] orange ... ok
+ test [banana] pink ... FAILED
+
+ failures:
+
+ ---- dog ----
+ was not a good boy
+
+ ---- bunny ----
+ jumped too high
+
+ ---- owl ----
+ broke neck
+
+ ---- bear ----
+ no honey
+
+ ---- blue ----
+ sky fell down
+
+ ---- green ----
+ was poisoned
+
+ ---- cyan ----
+ not creative enough
+
+ ---- pink ----
+ bad
+
+
+ failures:
+ dog
+ bunny
+ owl
+ bear
+ blue
+ green
+ cyan
+ pink
+ ",
+ );
+}
+
+#[test]
+fn normal_ignored() {
+ check(args(["--ignored"]), tests, 8,
+ Conclusion {
+ num_filtered_out: 8,
+ num_passed: 4,
+ num_failed: 4,
+ num_ignored: 0,
+ num_measured: 0,
+ },
+ "
+ test frog ... ok
+ test owl ... FAILED
+ test [banana] fly ... ok
+ test [banana] bear ... FAILED
+ test purple ... ok
+ test cyan ... FAILED
+ test [banana] orange ... ok
+ test [banana] pink ... FAILED
+
+ failures:
+
+ ---- owl ----
+ broke neck
+
+ ---- bear ----
+ no honey
+
+ ---- cyan ----
+ not creative enough
+
+ ---- pink ----
+ bad
+
+
+ failures:
+ owl
+ bear
+ cyan
+ pink
+ ",
+ );
+}
+
+#[test]
+fn lots_of_flags() {
+ check(args(["--include-ignored", "--skip", "g", "--test", "o"]), tests, 3,
+ Conclusion {
+ num_filtered_out: 13,
+ num_passed: 1,
+ num_failed: 1,
+ num_ignored: 1,
+ num_measured: 0,
+ },
+ "
+ test [apple] fox ... ok
+ test owl ... FAILED
+ test [kiwi] yellow ... ignored
+
+ failures:
+
+ ---- owl ----
+ broke neck
+
+
+ failures:
+ owl
+ ",
+ );
+}
+
+#[test]
+fn terse_output() {
+ let (c, out) = do_run(args(["--format", "terse", "--test-threads", "1"]), tests());
+ assert_eq!(c, Conclusion {
+ num_filtered_out: 0,
+ num_passed: 4,
+ num_failed: 4,
+ num_ignored: 8,
+ num_measured: 0,
+ });
+ assert_log!(out, "
+ running 16 tests
+ .F.Fiiii.F.Fiiii
+ failures:
+
+ ---- dog ----
+ was not a good boy
+
+ ---- bunny ----
+ jumped too high
+
+ ---- blue ----
+ sky fell down
+
+ ---- green ----
+ was poisoned
+
+
+ failures:
+ dog
+ bunny
+ blue
+ green
+
+ test result: FAILED. 4 passed; 4 failed; 8 ignored; 0 measured; 0 filtered out; \
+ finished in 0.00s
+ ");
+}
diff --git a/tests/panic.rs b/tests/panic.rs
new file mode 100644
index 0000000..503985a
--- /dev/null
+++ b/tests/panic.rs
@@ -0,0 +1,39 @@
+use common::{args, check};
+use libtest_mimic::{Trial, Conclusion};
+
+#[macro_use]
+mod common;
+
+
+fn tests() -> Vec<Trial> {
+ vec![
+ Trial::test("passes", || Ok(())),
+ Trial::test("panics", || panic!("uh oh")),
+ ]
+}
+
+#[test]
+fn normal() {
+ check(args([]), tests, 2,
+ Conclusion {
+ num_filtered_out: 0,
+ num_passed: 1,
+ num_failed: 1,
+ num_ignored: 0,
+ num_measured: 0,
+ },
+ "
+ test passes ... ok
+ test panics ... FAILED
+
+ failures:
+
+ ---- panics ----
+ test panicked: uh oh
+
+
+ failures:
+ panics
+ "
+ );
+}
diff --git a/tests/real/mixed_bag.rs b/tests/real/mixed_bag.rs
new file mode 100644
index 0000000..b19349e
--- /dev/null
+++ b/tests/real/mixed_bag.rs
@@ -0,0 +1,45 @@
+#![feature(test)]
+
+extern crate test;
+
+
+#[test]
+fn cat() {}
+
+#[test]
+fn dog() {
+ panic!("was not a good boy");
+}
+
+#[test]
+#[ignore]
+fn frog() {}
+
+#[test]
+#[ignore]
+fn owl() {
+ panic!("broke neck");
+}
+
+
+#[bench]
+fn red(b: &mut test::Bencher) {
+ b.iter(|| std::thread::sleep(std::time::Duration::from_millis(50)));
+}
+
+#[bench]
+fn blue(_: &mut test::Bencher) {
+ panic!("sky fell down");
+}
+
+#[bench]
+#[ignore]
+fn purple(b: &mut test::Bencher) {
+ b.iter(|| {});
+}
+
+#[bench]
+#[ignore]
+fn cyan(_: &mut test::Bencher) {
+ panic!("not creative enough");
+}