tester-0.9.1/.cargo_vcs_info.json0000644000000001360000000000100123510ustar { "git": { "sha1": "c2b517fbc900ed837cc4dfc3253de630beab0227" }, "path_in_vcs": "" }tester-0.9.1/.github/workflows/CI.yml000064400000000000000000000017311046102023000155560ustar 00000000000000on: [push, pull_request] name: CI jobs: check: name: Check runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - uses: actions-rs/toolchain@v1 with: profile: minimal toolchain: stable override: true - uses: actions-rs/cargo@v1 with: command: check args: --all test: name: Test Suite runs-on: ${{ matrix.os }} strategy: matrix: os: [ubuntu-latest, macos-latest] toolchain: [stable, nightly] steps: - uses: actions/checkout@v2 - uses: actions-rs/toolchain@v1 with: profile: minimal toolchain: ${{ matrix.toolchain }} override: true - uses: actions-rs/cargo@v1 if: matrix.toolchain == 'stable' with: command: test - uses: actions-rs/cargo@v1 if: matrix.toolchain == 'nightly' with: command: test args: --all-features tester-0.9.1/.gitignore000064400000000000000000000000221046102023000131230ustar 00000000000000target Cargo.lock tester-0.9.1/Cargo.toml0000644000000020200000000000100103410ustar # THIS FILE IS AUTOMATICALLY GENERATED BY CARGO # # When uploading crates to the registry Cargo will automatically # "normalize" Cargo.toml files for maximal compatibility # with all versions of Cargo and also rewrite `path` dependencies # to registry (e.g., crates.io) dependencies. # # If you are reading this file be aware that the original Cargo.toml # will likely look very different (and much more reasonable). # See Cargo.toml.orig for the original contents. [package] edition = "2018" name = "tester" version = "0.9.1" authors = ["The Rust Project Developers"] description = "A fork of Rust’s `test` crate that doesn’t require unstable language features." license = "MIT OR Apache-2.0" repository = "https://github.com/messense/rustc-test" [dependencies.cfg-if] version = "1.0.0" [dependencies.getopts] version = "0.2" [dependencies.num_cpus] version = "1.13.0" [dependencies.term] version = "0.7" [features] asm_black_box = [] capture = [] [target."cfg(unix)".dependencies.libc] version = "0.2" default-features = false tester-0.9.1/Cargo.toml.orig000064400000000000000000000007641046102023000140370ustar 00000000000000[package] name = "tester" version = "0.9.1" authors = ["The Rust Project Developers"] license = "MIT OR Apache-2.0" description = "A fork of Rust’s `test` crate that doesn’t require unstable language features." repository = "https://github.com/messense/rustc-test" edition = "2018" [features] asm_black_box = [] capture = [] [dependencies] getopts = "0.2" term = "0.7" num_cpus = "1.13.0" cfg-if = "1.0.0" [target.'cfg(unix)'.dependencies] libc = { version = "0.2", default-features = false } tester-0.9.1/LICENSE-APACHE000064400000000000000000000251371046102023000130750ustar 00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. tester-0.9.1/LICENSE-MIT000064400000000000000000000017771046102023000126110ustar 00000000000000Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. tester-0.9.1/src/bench.rs000064400000000000000000000145261046102023000133650ustar 00000000000000//! Benchmarking module. use super::{ event::CompletedTest, options::BenchMode, test_result::TestResult, types::TestDesc, Sender, }; use crate::stats; use std::cmp; #[cfg(feature = "capture")] use std::io; use std::panic::{catch_unwind, AssertUnwindSafe}; use std::sync::{Arc, Mutex}; use std::time::{Duration, Instant}; #[cfg(feature = "asm_black_box")] pub use std::hint::black_box; #[cfg(not(feature = "asm_black_box"))] #[inline(never)] pub fn black_box(dummy: T) -> T { dummy } /// Manager of the benchmarking runs. /// /// This is fed into functions marked with `#[bench]` to allow for /// set-up & tear-down before running a piece of code repeatedly via a /// call to `iter`. #[derive(Clone)] pub struct Bencher { mode: BenchMode, summary: Option, pub bytes: u64, } impl Bencher { /// Callback for benchmark functions to run in their body. pub fn iter(&mut self, mut inner: F) where F: FnMut() -> T, { if self.mode == BenchMode::Single { ns_iter_inner(&mut inner, 1); return; } self.summary = Some(iter(&mut inner)); } pub fn bench(&mut self, mut f: F) -> Option where F: FnMut(&mut Bencher), { f(self); self.summary } } #[derive(Debug, Clone, PartialEq)] pub struct BenchSamples { pub ns_iter_summ: stats::Summary, pub mb_s: usize, } pub fn fmt_bench_samples(bs: &BenchSamples) -> String { use std::fmt::Write; let mut output = String::new(); let median = bs.ns_iter_summ.median as usize; let deviation = (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as usize; write!( output, "{:>11} ns/iter (+/- {})", fmt_thousands_sep(median, ','), fmt_thousands_sep(deviation, ',') ) .unwrap(); if bs.mb_s != 0 { write!(output, " = {} MB/s", bs.mb_s).unwrap(); } output } // Format a number with thousands separators fn fmt_thousands_sep(mut n: usize, sep: char) -> String { use std::fmt::Write; let mut output = String::new(); let mut trailing = false; for &pow in &[9, 6, 3, 0] { let base = 10_usize.pow(pow); if pow == 0 || trailing || n / base != 0 { if !trailing { write!(output, "{}", n / base).unwrap(); } else { write!(output, "{:03}", n / base).unwrap(); } if pow != 0 { output.push(sep); } trailing = true; } n %= base; } output } fn ns_iter_inner(inner: &mut F, k: u64) -> u64 where F: FnMut() -> T, { let start = Instant::now(); for _ in 0..k { black_box(inner()); } start.elapsed().as_nanos() as u64 } pub fn iter(inner: &mut F) -> stats::Summary where F: FnMut() -> T, { // Initial bench run to get ballpark figure. let ns_single = ns_iter_inner(inner, 1); // Try to estimate iter count for 1ms falling back to 1m // iterations if first run took < 1ns. let ns_target_total = 1_000_000; // 1ms let mut n = ns_target_total / cmp::max(1, ns_single); // if the first run took more than 1ms we don't want to just // be left doing 0 iterations on every loop. The unfortunate // side effect of not being able to do as many runs is // automatically handled by the statistical analysis below // (i.e., larger error bars). n = cmp::max(1, n); let mut total_run = Duration::new(0, 0); let samples: &mut [f64] = &mut [0.0_f64; 50]; loop { let loop_start = Instant::now(); for p in &mut *samples { *p = ns_iter_inner(inner, n) as f64 / n as f64; } stats::winsorize(samples, 5.0); let summ = stats::Summary::new(samples); for p in &mut *samples { let ns = ns_iter_inner(inner, 5 * n); *p = ns as f64 / (5 * n) as f64; } stats::winsorize(samples, 5.0); let summ5 = stats::Summary::new(samples); let loop_run = loop_start.elapsed(); // If we've run for 100ms and seem to have converged to a // stable median. if loop_run > Duration::from_millis(100) && summ.median_abs_dev_pct < 1.0 && summ.median - summ5.median < summ5.median_abs_dev { return summ5; } total_run += loop_run; // Longest we ever run for is 3s. if total_run > Duration::from_secs(3) { return summ5; } // If we overflow here just return the results so far. We check a // multiplier of 10 because we're about to multiply by 2 and the // next iteration of the loop will also multiply by 5 (to calculate // the summ5 result) n = match n.checked_mul(10) { Some(_) => n * 2, None => { return summ5; } }; } } pub fn benchmark(desc: TestDesc, monitor_ch: Sender, nocapture: bool, f: F) where F: FnMut(&mut Bencher), { let mut bs = Bencher { mode: BenchMode::Auto, summary: None, bytes: 0 }; let data = Arc::new(Mutex::new(Vec::new())); if !nocapture { #[cfg(feature = "capture")] io::set_output_capture(Some(data.clone())); } let result = catch_unwind(AssertUnwindSafe(|| bs.bench(f))); #[cfg(feature = "capture")] io::set_output_capture(None); let test_result = match result { //bs.bench(f) { Ok(Some(ns_iter_summ)) => { let ns_iter = cmp::max(ns_iter_summ.median as u64, 1); let mb_s = bs.bytes * 1000 / ns_iter; let bs = BenchSamples { ns_iter_summ, mb_s: mb_s as usize }; TestResult::TrBench(bs) } Ok(None) => { // iter not called, so no data. // FIXME: error in this case? let samples: &mut [f64] = &mut [0.0_f64; 1]; let bs = BenchSamples { ns_iter_summ: stats::Summary::new(samples), mb_s: 0 }; TestResult::TrBench(bs) } Err(_) => TestResult::TrFailed, }; let stdout = data.lock().unwrap().to_vec(); let message = CompletedTest::new(desc, test_result, None, stdout); monitor_ch.send(message).unwrap(); } pub fn run_once(f: F) where F: FnMut(&mut Bencher), { let mut bs = Bencher { mode: BenchMode::Single, summary: None, bytes: 0 }; bs.bench(f); } tester-0.9.1/src/cli.rs000064400000000000000000000344211046102023000130510ustar 00000000000000//! Module converting command-line arguments into test configuration. use std::env; use std::path::PathBuf; use super::helpers::isatty; use super::options::{ColorConfig, Options, OutputFormat, RunIgnored}; use super::time::TestTimeOptions; #[derive(Debug)] pub struct TestOpts { pub list: bool, pub filters: Vec, pub filter_exact: bool, pub force_run_in_process: bool, pub exclude_should_panic: bool, pub run_ignored: RunIgnored, pub run_tests: bool, pub bench_benchmarks: bool, pub logfile: Option, pub nocapture: bool, pub color: ColorConfig, pub format: OutputFormat, pub test_threads: Option, pub skip: Vec, pub time_options: Option, pub options: Options, } impl TestOpts { pub fn use_color(&self) -> bool { match self.color { ColorConfig::AutoColor => !self.nocapture && isatty::stdout_isatty(), ColorConfig::AlwaysColor => true, ColorConfig::NeverColor => false, } } } /// Result of parsing the options. pub type OptRes = Result; /// Result of parsing the option part. type OptPartRes = Result; fn optgroups() -> getopts::Options { let mut opts = getopts::Options::new(); opts.optflag("", "include-ignored", "Run ignored and not ignored tests") .optflag("", "ignored", "Run only ignored tests") .optflag("", "force-run-in-process", "Forces tests to run in-process when panic=abort") .optflag("", "exclude-should-panic", "Excludes tests marked as should_panic") .optflag("", "test", "Run tests and not benchmarks") .optflag("", "bench", "Run benchmarks instead of tests") .optflag("", "list", "List all tests and benchmarks") .optflag("h", "help", "Display this message (longer with --help)") .optopt( "", "logfile", "Write logs to the specified file instead \ of stdout", "PATH", ) .optflag( "", "nocapture", "don't capture stdout/stderr of each \ task, allow printing directly", ) .optopt( "", "test-threads", "Number of threads used for running tests \ in parallel", "n_threads", ) .optmulti( "", "skip", "Skip tests whose names contain FILTER (this flag can \ be used multiple times)", "FILTER", ) .optflag( "q", "quiet", "Display one character per test instead of one line. \ Alias to --format=terse", ) .optflag("", "exact", "Exactly match filters rather than by substring") .optopt( "", "color", "Configure coloring of output: auto = colorize if stdout is a tty and tests are run on serially (default); always = always colorize output; never = never colorize output;", "auto|always|never", ) .optopt( "", "format", "Configure formatting of output: pretty = Print verbose output; terse = Display one character per test; json = Output a json document", "pretty|terse|json", ) .optflag("", "show-output", "Show captured stdout of successful tests") .optopt( "Z", "", "Enable nightly-only flags: unstable-options = Allow use of experimental features", "unstable-options", ) .optflagopt( "", "report-time", "Show execution time of each test. Available values: plain = do not colorize the execution time (default); colored = colorize output according to the `color` parameter value; Threshold values for colorized output can be configured via `RUST_TEST_TIME_UNIT`, `RUST_TEST_TIME_INTEGRATION` and `RUST_TEST_TIME_DOCTEST` environment variables. Expected format of environment variable is `VARIABLE=WARN_TIME,CRITICAL_TIME`. Durations must be specified in milliseconds, e.g. `500,2000` means that the warn time is 0.5 seconds, and the critical time is 2 seconds. Not available for --format=terse", "plain|colored", ) .optflag( "", "ensure-time", "Treat excess of the test execution time limit as error. Threshold values for this option can be configured via `RUST_TEST_TIME_UNIT`, `RUST_TEST_TIME_INTEGRATION` and `RUST_TEST_TIME_DOCTEST` environment variables. Expected format of environment variable is `VARIABLE=WARN_TIME,CRITICAL_TIME`. `CRITICAL_TIME` here means the limit that should not be exceeded by test. ", ); opts } fn usage(binary: &str, options: &getopts::Options) { let message = format!("Usage: {} [OPTIONS] [FILTERS...]", binary); println!( r#"{usage} The FILTERS string is tested against the name of all tests, and only those tests whose names contain the filter are run. Multiple filter strings may be passed, which will run all tests matching any of the filters. By default, all tests are run in parallel. This can be altered with the --test-threads flag or the RUST_TEST_THREADS environment variable when running tests (set it to 1). All tests have their standard output and standard error captured by default. This can be overridden with the --nocapture flag or setting RUST_TEST_NOCAPTURE environment variable to a value other than "0". Logging is not captured by default. Test Attributes: `#[test]` - Indicates a function is a test to be run. This function takes no arguments. `#[bench]` - Indicates a function is a benchmark to be run. This function takes one argument (test::Bencher). `#[should_panic]` - This function (also labeled with `#[test]`) will only pass if the code causes a panic (an assertion failure or panic!) A message may be provided, which the failure string must contain: #[should_panic(expected = "foo")]. `#[ignore]` - When applied to a function which is already attributed as a test, then the test runner will ignore these tests during normal test runs. Running with --ignored or --include-ignored will run these tests."#, usage = options.usage(&message) ); } /// Parses command line arguments into test options. /// Returns `None` if help was requested (since we only show help message and don't run tests), /// returns `Some(Err(..))` if provided arguments are incorrect, /// otherwise creates a `TestOpts` object and returns it. pub fn parse_opts(args: &[String]) -> Option { // Parse matches. let opts = optgroups(); let args = args.get(1..).unwrap_or(args); let matches = match opts.parse(args) { Ok(m) => m, Err(f) => return Some(Err(f.to_string())), }; // Check if help was requested. if matches.opt_present("h") { // Show help and do nothing more. usage(&args[0], &opts); return None; } // Actually parse the opts. let opts_result = parse_opts_impl(matches); Some(opts_result) } // Gets the option value and checks if unstable features are enabled. macro_rules! unstable_optflag { ($matches:ident, $allow_unstable:ident, $option_name:literal) => {{ let opt = $matches.opt_present($option_name); if !$allow_unstable && opt { return Err(format!( "The \"{}\" flag is only accepted on the nightly compiler with -Z unstable-options", $option_name )); } opt }}; } // Implementation of `parse_opts` that doesn't care about help message // and returns a `Result`. fn parse_opts_impl(matches: getopts::Matches) -> OptRes { let allow_unstable = get_allow_unstable(&matches)?; // Unstable flags let force_run_in_process = unstable_optflag!(matches, allow_unstable, "force-run-in-process"); let exclude_should_panic = unstable_optflag!(matches, allow_unstable, "exclude-should-panic"); let include_ignored = unstable_optflag!(matches, allow_unstable, "include-ignored"); let time_options = get_time_options(&matches, allow_unstable)?; let quiet = matches.opt_present("quiet"); let exact = matches.opt_present("exact"); let list = matches.opt_present("list"); let skip = matches.opt_strs("skip"); let bench_benchmarks = matches.opt_present("bench"); let run_tests = !bench_benchmarks || matches.opt_present("test"); let logfile = get_log_file(&matches)?; let run_ignored = get_run_ignored(&matches, include_ignored)?; let filters = matches.free.clone(); let nocapture = get_nocapture(&matches)?; let test_threads = get_test_threads(&matches)?; let color = get_color_config(&matches)?; let format = get_format(&matches, quiet, allow_unstable)?; let options = Options::new().display_output(matches.opt_present("show-output")); let test_opts = TestOpts { list, filters, filter_exact: exact, force_run_in_process, exclude_should_panic, run_ignored, run_tests, bench_benchmarks, logfile, nocapture, color, format, test_threads, skip, time_options, options, }; Ok(test_opts) } // FIXME: Copied from librustc_ast until linkage errors are resolved. Issue #47566 fn is_nightly() -> bool { // Whether this is a feature-staged build, i.e., on the beta or stable channel let disable_unstable_features = option_env!("CFG_DISABLE_UNSTABLE_FEATURES").is_some(); // Whether we should enable unstable features for bootstrapping let bootstrap = env::var("RUSTC_BOOTSTRAP").is_ok(); bootstrap || !disable_unstable_features } // Gets the CLI options associated with `report-time` feature. fn get_time_options( matches: &getopts::Matches, allow_unstable: bool, ) -> OptPartRes> { let report_time = unstable_optflag!(matches, allow_unstable, "report-time"); let colored_opt_str = matches.opt_str("report-time"); let mut report_time_colored = report_time && colored_opt_str == Some("colored".into()); let ensure_test_time = unstable_optflag!(matches, allow_unstable, "ensure-time"); // If `ensure-test-time` option is provided, time output is enforced, // so user won't be confused if any of tests will silently fail. let options = if report_time || ensure_test_time { if ensure_test_time && !report_time { report_time_colored = true; } Some(TestTimeOptions::new_from_env(ensure_test_time, report_time_colored)) } else { None }; Ok(options) } fn get_test_threads(matches: &getopts::Matches) -> OptPartRes> { let test_threads = match matches.opt_str("test-threads") { Some(n_str) => match n_str.parse::() { Ok(0) => return Err("argument for --test-threads must not be 0".to_string()), Ok(n) => Some(n), Err(e) => { return Err(format!( "argument for --test-threads must be a number > 0 \ (error: {})", e )); } }, None => None, }; Ok(test_threads) } fn get_format( matches: &getopts::Matches, quiet: bool, allow_unstable: bool, ) -> OptPartRes { let format = match matches.opt_str("format").as_deref() { None if quiet => OutputFormat::Terse, Some("pretty") | None => OutputFormat::Pretty, Some("terse") => OutputFormat::Terse, Some("json") => { if !allow_unstable { return Err("The \"json\" format is only accepted on the nightly compiler".into()); } OutputFormat::Json } Some(v) => { return Err(format!( "argument for --format must be pretty, terse, or json (was \ {})", v )); } }; Ok(format) } fn get_color_config(matches: &getopts::Matches) -> OptPartRes { let color = match matches.opt_str("color").as_deref() { Some("auto") | None => ColorConfig::AutoColor, Some("always") => ColorConfig::AlwaysColor, Some("never") => ColorConfig::NeverColor, Some(v) => { return Err(format!( "argument for --color must be auto, always, or never (was \ {})", v )); } }; Ok(color) } fn get_nocapture(matches: &getopts::Matches) -> OptPartRes { let mut nocapture = matches.opt_present("nocapture"); if !nocapture { nocapture = match env::var("RUST_TEST_NOCAPTURE") { Ok(val) => &val != "0", Err(_) => false, }; } Ok(nocapture) } fn get_run_ignored(matches: &getopts::Matches, include_ignored: bool) -> OptPartRes { let run_ignored = match (include_ignored, matches.opt_present("ignored")) { (true, true) => { return Err("the options --include-ignored and --ignored are mutually exclusive".into()); } (true, false) => RunIgnored::Yes, (false, true) => RunIgnored::Only, (false, false) => RunIgnored::No, }; Ok(run_ignored) } fn get_allow_unstable(matches: &getopts::Matches) -> OptPartRes { let mut allow_unstable = false; if let Some(opt) = matches.opt_str("Z") { if !is_nightly() { return Err("the option `Z` is only accepted on the nightly compiler".into()); } match &*opt { "unstable-options" => { allow_unstable = true; } _ => { return Err("Unrecognized option to `Z`".into()); } } }; Ok(allow_unstable) } fn get_log_file(matches: &getopts::Matches) -> OptPartRes> { let logfile = matches.opt_str("logfile").map(|s| PathBuf::from(&s)); Ok(logfile) } tester-0.9.1/src/console.rs000064400000000000000000000226741046102023000137530ustar 00000000000000//! Module providing interface for running tests in the console. use std::fs::File; use std::io; use std::io::prelude::Write; use std::time::Instant; use super::{ bench::fmt_bench_samples, cli::TestOpts, event::{CompletedTest, TestEvent}, filter_tests, formatters::{JsonFormatter, OutputFormatter, PrettyFormatter, TerseFormatter}, helpers::{concurrency::get_concurrency, metrics::MetricMap}, options::{Options, OutputFormat}, run_tests, test_result::TestResult, time::{TestExecTime, TestSuiteExecTime}, types::{NamePadding, TestDesc, TestDescAndFn}, }; /// Generic wrapper over stdout. pub enum OutputLocation { Pretty(Box), Raw(T), } impl Write for OutputLocation { fn write(&mut self, buf: &[u8]) -> io::Result { match *self { OutputLocation::Pretty(ref mut term) => term.write(buf), OutputLocation::Raw(ref mut stdout) => stdout.write(buf), } } fn flush(&mut self) -> io::Result<()> { match *self { OutputLocation::Pretty(ref mut term) => term.flush(), OutputLocation::Raw(ref mut stdout) => stdout.flush(), } } } pub struct ConsoleTestState { pub log_out: Option, pub total: usize, pub passed: usize, pub failed: usize, pub ignored: usize, pub allowed_fail: usize, pub filtered_out: usize, pub measured: usize, pub exec_time: Option, pub metrics: MetricMap, pub failures: Vec<(TestDesc, Vec)>, pub not_failures: Vec<(TestDesc, Vec)>, pub time_failures: Vec<(TestDesc, Vec)>, pub options: Options, } impl ConsoleTestState { pub fn new(opts: &TestOpts) -> io::Result { let log_out = match opts.logfile { Some(ref path) => Some(File::create(path)?), None => None, }; Ok(ConsoleTestState { log_out, total: 0, passed: 0, failed: 0, ignored: 0, allowed_fail: 0, filtered_out: 0, measured: 0, exec_time: None, metrics: MetricMap::new(), failures: Vec::new(), not_failures: Vec::new(), time_failures: Vec::new(), options: opts.options, }) } pub fn write_log(&mut self, msg: F) -> io::Result<()> where S: AsRef, F: FnOnce() -> S, { match self.log_out { None => Ok(()), Some(ref mut o) => { let msg = msg(); let msg = msg.as_ref(); o.write_all(msg.as_bytes()) } } } pub fn write_log_result( &mut self, test: &TestDesc, result: &TestResult, exec_time: Option<&TestExecTime>, ) -> io::Result<()> { self.write_log(|| { format!( "{} {}", match *result { TestResult::TrOk => "ok".to_owned(), TestResult::TrFailed => "failed".to_owned(), TestResult::TrFailedMsg(ref msg) => format!("failed: {}", msg), TestResult::TrIgnored => "ignored".to_owned(), TestResult::TrAllowedFail => "failed (allowed)".to_owned(), TestResult::TrBench(ref bs) => fmt_bench_samples(bs), TestResult::TrTimedFail => "failed (time limit exceeded)".to_owned(), }, test.name, ) })?; if let Some(exec_time) = exec_time { self.write_log(|| format!(" <{}>", exec_time))?; } self.write_log(|| "\n") } fn current_test_count(&self) -> usize { self.passed + self.failed + self.ignored + self.measured + self.allowed_fail } } // List the tests to console, and optionally to logfile. Filters are honored. pub fn list_tests_console(opts: &TestOpts, tests: Vec) -> io::Result<()> { let mut output = match term::stdout() { None => OutputLocation::Raw(io::stdout()), Some(t) => OutputLocation::Pretty(t), }; let quiet = opts.format == OutputFormat::Terse; let mut st = ConsoleTestState::new(opts)?; let mut ntest = 0; let mut nbench = 0; for test in filter_tests(&opts, tests) { use crate::TestFn::*; let TestDescAndFn { desc: TestDesc { name, .. }, testfn } = test; let fntype = match testfn { StaticTestFn(..) | DynTestFn(..) => { ntest += 1; "test" } StaticBenchFn(..) | DynBenchFn(..) => { nbench += 1; "benchmark" } }; writeln!(output, "{}: {}", name, fntype)?; st.write_log(|| format!("{} {}\n", fntype, name))?; } fn plural(count: u32, s: &str) -> String { match count { 1 => format!("{} {}", 1, s), n => format!("{} {}s", n, s), } } if !quiet { if ntest != 0 || nbench != 0 { writeln!(output)?; } writeln!(output, "{}, {}", plural(ntest, "test"), plural(nbench, "benchmark"))?; } Ok(()) } // Updates `ConsoleTestState` depending on result of the test execution. fn handle_test_result(st: &mut ConsoleTestState, completed_test: CompletedTest) { let test = completed_test.desc; let stdout = completed_test.stdout; match completed_test.result { TestResult::TrOk => { st.passed += 1; st.not_failures.push((test, stdout)); } TestResult::TrIgnored => st.ignored += 1, TestResult::TrAllowedFail => st.allowed_fail += 1, TestResult::TrBench(bs) => { st.metrics.insert_metric( test.name.as_slice(), bs.ns_iter_summ.median, bs.ns_iter_summ.max - bs.ns_iter_summ.min, ); st.measured += 1 } TestResult::TrFailed => { st.failed += 1; st.failures.push((test, stdout)); } TestResult::TrFailedMsg(msg) => { st.failed += 1; let mut stdout = stdout; stdout.extend_from_slice(format!("note: {}", msg).as_bytes()); st.failures.push((test, stdout)); } TestResult::TrTimedFail => { st.failed += 1; st.time_failures.push((test, stdout)); } } } // Handler for events that occur during test execution. // It is provided as a callback to the `run_tests` function. fn on_test_event( event: &TestEvent, st: &mut ConsoleTestState, out: &mut dyn OutputFormatter, ) -> io::Result<()> { match (*event).clone() { TestEvent::TeFiltered(ref filtered_tests) => { st.total = filtered_tests.len(); out.write_run_start(filtered_tests.len())?; } TestEvent::TeFilteredOut(filtered_out) => { st.filtered_out = filtered_out; } TestEvent::TeWait(ref test) => out.write_test_start(test)?, TestEvent::TeTimeout(ref test) => out.write_timeout(test)?, TestEvent::TeResult(completed_test) => { let test = &completed_test.desc; let result = &completed_test.result; let exec_time = &completed_test.exec_time; let stdout = &completed_test.stdout; st.write_log_result(test, result, exec_time.as_ref())?; out.write_result(test, result, exec_time.as_ref(), &*stdout, st)?; handle_test_result(st, completed_test); } } Ok(()) } /// A simple console test runner. /// Runs provided tests reporting process and results to the stdout. pub fn run_tests_console(opts: &TestOpts, tests: Vec) -> io::Result { let output = match term::stdout() { None => OutputLocation::Raw(io::stdout()), Some(t) => OutputLocation::Pretty(t), }; let max_name_len = tests .iter() .max_by_key(|t| len_if_padded(*t)) .map(|t| t.desc.name.as_slice().len()) .unwrap_or(0); let is_multithreaded = opts.test_threads.unwrap_or_else(get_concurrency) > 1; let mut out: Box = match opts.format { OutputFormat::Pretty => Box::new(PrettyFormatter::new( output, opts.use_color(), max_name_len, is_multithreaded, opts.time_options, )), OutputFormat::Terse => { Box::new(TerseFormatter::new(output, opts.use_color(), max_name_len, is_multithreaded)) } OutputFormat::Json => Box::new(JsonFormatter::new(output)), }; let mut st = ConsoleTestState::new(opts)?; // Prevent the usage of `Instant` in some cases: // - It's currently not supported for wasm targets. // - We disable it for miri because it's not available when isolation is enabled. let is_instant_supported = !cfg!(target_arch = "wasm32") && !cfg!(miri); let start_time = if is_instant_supported { Some(Instant::now()) } else { None }; run_tests(opts, tests, |x| on_test_event(&x, &mut st, &mut *out))?; st.exec_time = start_time.map(|t| TestSuiteExecTime(t.elapsed())); assert!(st.current_test_count() == st.total); out.write_run_finish(&st) } // Calculates padding for given test description. fn len_if_padded(t: &TestDescAndFn) -> usize { match t.testfn.padding() { NamePadding::PadNone => 0, NamePadding::PadOnRight => t.desc.name.as_slice().len(), } } tester-0.9.1/src/event.rs000064400000000000000000000014471046102023000134250ustar 00000000000000//! Module containing different events that can occur //! during tests execution process. use super::test_result::TestResult; use super::time::TestExecTime; use super::types::TestDesc; #[derive(Debug, Clone)] pub struct CompletedTest { pub desc: TestDesc, pub result: TestResult, pub exec_time: Option, pub stdout: Vec, } impl CompletedTest { pub fn new( desc: TestDesc, result: TestResult, exec_time: Option, stdout: Vec, ) -> Self { Self { desc, result, exec_time, stdout } } } unsafe impl Send for CompletedTest {} #[derive(Debug, Clone)] pub enum TestEvent { TeFiltered(Vec), TeWait(TestDesc), TeResult(CompletedTest), TeTimeout(TestDesc), TeFilteredOut(usize), } tester-0.9.1/src/formatters/json.rs000064400000000000000000000176331046102023000154470ustar 00000000000000use std::{borrow::Cow, io, io::prelude::Write}; use super::OutputFormatter; use crate::{ console::{ConsoleTestState, OutputLocation}, test_result::TestResult, time, types::TestDesc, }; pub(crate) struct JsonFormatter { out: OutputLocation, } impl JsonFormatter { pub fn new(out: OutputLocation) -> Self { Self { out } } fn writeln_message(&mut self, s: &str) -> io::Result<()> { assert!(!s.contains('\n')); self.out.write_all(s.as_ref())?; self.out.write_all(b"\n") } fn write_message(&mut self, s: &str) -> io::Result<()> { assert!(!s.contains('\n')); self.out.write_all(s.as_ref()) } fn write_event( &mut self, ty: &str, name: &str, evt: &str, exec_time: Option<&time::TestExecTime>, stdout: Option>, extra: Option<&str>, ) -> io::Result<()> { // A doc test's name includes a filename which must be escaped for correct json. self.write_message(&*format!( r#"{{ "type": "{}", "name": "{}", "event": "{}""#, ty, EscapedString(name), evt ))?; if let Some(exec_time) = exec_time { self.write_message(&*format!(r#", "exec_time": {}"#, exec_time.0.as_secs_f64()))?; } if let Some(stdout) = stdout { self.write_message(&*format!(r#", "stdout": "{}""#, EscapedString(stdout)))?; } if let Some(extra) = extra { self.write_message(&*format!(r#", {}"#, extra))?; } self.writeln_message(" }") } } impl OutputFormatter for JsonFormatter { fn write_run_start(&mut self, test_count: usize) -> io::Result<()> { self.writeln_message(&*format!( r#"{{ "type": "suite", "event": "started", "test_count": {} }}"#, test_count )) } fn write_test_start(&mut self, desc: &TestDesc) -> io::Result<()> { self.writeln_message(&*format!( r#"{{ "type": "test", "event": "started", "name": "{}" }}"#, EscapedString(desc.name.as_slice()) )) } fn write_result( &mut self, desc: &TestDesc, result: &TestResult, exec_time: Option<&time::TestExecTime>, stdout: &[u8], state: &ConsoleTestState, ) -> io::Result<()> { let display_stdout = state.options.display_output || *result != TestResult::TrOk; let stdout = if display_stdout && !stdout.is_empty() { Some(String::from_utf8_lossy(stdout)) } else { None }; match *result { TestResult::TrOk => { self.write_event("test", desc.name.as_slice(), "ok", exec_time, stdout, None) } TestResult::TrFailed => { self.write_event("test", desc.name.as_slice(), "failed", exec_time, stdout, None) } TestResult::TrTimedFail => self.write_event( "test", desc.name.as_slice(), "failed", exec_time, stdout, Some(r#""reason": "time limit exceeded""#), ), TestResult::TrFailedMsg(ref m) => self.write_event( "test", desc.name.as_slice(), "failed", exec_time, stdout, Some(&*format!(r#""message": "{}""#, EscapedString(m))), ), TestResult::TrIgnored => { self.write_event("test", desc.name.as_slice(), "ignored", exec_time, stdout, None) } TestResult::TrAllowedFail => self.write_event( "test", desc.name.as_slice(), "allowed_failure", exec_time, stdout, None, ), TestResult::TrBench(ref bs) => { let median = bs.ns_iter_summ.median as usize; let deviation = (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as usize; let mbps = if bs.mb_s == 0 { String::new() } else { format!(r#", "mib_per_second": {}"#, bs.mb_s) }; let line = format!( "{{ \"type\": \"bench\", \ \"name\": \"{}\", \ \"median\": {}, \ \"deviation\": {}{} }}", EscapedString(desc.name.as_slice()), median, deviation, mbps ); self.writeln_message(&*line) } } } fn write_timeout(&mut self, desc: &TestDesc) -> io::Result<()> { self.writeln_message(&*format!( r#"{{ "type": "test", "event": "timeout", "name": "{}" }}"#, EscapedString(desc.name.as_slice()) )) } fn write_run_finish(&mut self, state: &ConsoleTestState) -> io::Result { self.write_message(&*format!( "{{ \"type\": \"suite\", \ \"event\": \"{}\", \ \"passed\": {}, \ \"failed\": {}, \ \"allowed_fail\": {}, \ \"ignored\": {}, \ \"measured\": {}, \ \"filtered_out\": {}", if state.failed == 0 { "ok" } else { "failed" }, state.passed, state.failed + state.allowed_fail, state.allowed_fail, state.ignored, state.measured, state.filtered_out, ))?; if let Some(ref exec_time) = state.exec_time { let time_str = format!(", \"exec_time\": {}", exec_time.0.as_secs_f64()); self.write_message(&time_str)?; } self.writeln_message(" }")?; Ok(state.failed == 0) } } /// A formatting utility used to print strings with characters in need of escaping. /// Base code taken form `libserialize::json::escape_str` struct EscapedString>(S); impl> std::fmt::Display for EscapedString { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> ::std::fmt::Result { let mut start = 0; for (i, byte) in self.0.as_ref().bytes().enumerate() { let escaped = match byte { b'"' => "\\\"", b'\\' => "\\\\", b'\x00' => "\\u0000", b'\x01' => "\\u0001", b'\x02' => "\\u0002", b'\x03' => "\\u0003", b'\x04' => "\\u0004", b'\x05' => "\\u0005", b'\x06' => "\\u0006", b'\x07' => "\\u0007", b'\x08' => "\\b", b'\t' => "\\t", b'\n' => "\\n", b'\x0b' => "\\u000b", b'\x0c' => "\\f", b'\r' => "\\r", b'\x0e' => "\\u000e", b'\x0f' => "\\u000f", b'\x10' => "\\u0010", b'\x11' => "\\u0011", b'\x12' => "\\u0012", b'\x13' => "\\u0013", b'\x14' => "\\u0014", b'\x15' => "\\u0015", b'\x16' => "\\u0016", b'\x17' => "\\u0017", b'\x18' => "\\u0018", b'\x19' => "\\u0019", b'\x1a' => "\\u001a", b'\x1b' => "\\u001b", b'\x1c' => "\\u001c", b'\x1d' => "\\u001d", b'\x1e' => "\\u001e", b'\x1f' => "\\u001f", b'\x7f' => "\\u007f", _ => { continue; } }; if start < i { f.write_str(&self.0.as_ref()[start..i])?; } f.write_str(escaped)?; start = i + 1; } if start != self.0.as_ref().len() { f.write_str(&self.0.as_ref()[start..])?; } Ok(()) } } tester-0.9.1/src/formatters/mod.rs000064400000000000000000000022161046102023000152440ustar 00000000000000use std::{io, io::prelude::Write}; use crate::{ console::ConsoleTestState, test_result::TestResult, time, types::{TestDesc, TestName}, }; mod json; mod pretty; mod terse; pub(crate) use self::json::JsonFormatter; pub(crate) use self::pretty::PrettyFormatter; pub(crate) use self::terse::TerseFormatter; pub(crate) trait OutputFormatter { fn write_run_start(&mut self, test_count: usize) -> io::Result<()>; fn write_test_start(&mut self, desc: &TestDesc) -> io::Result<()>; fn write_timeout(&mut self, desc: &TestDesc) -> io::Result<()>; fn write_result( &mut self, desc: &TestDesc, result: &TestResult, exec_time: Option<&time::TestExecTime>, stdout: &[u8], state: &ConsoleTestState, ) -> io::Result<()>; fn write_run_finish(&mut self, state: &ConsoleTestState) -> io::Result; } pub(crate) fn write_stderr_delimiter(test_output: &mut Vec, test_name: &TestName) { match test_output.last() { Some(b'\n') => (), Some(_) => test_output.push(b'\n'), None => (), } writeln!(test_output, "---- {} stderr ----", test_name).unwrap(); } tester-0.9.1/src/formatters/pretty.rs000064400000000000000000000211511046102023000160130ustar 00000000000000use std::{io, io::prelude::Write}; use super::OutputFormatter; use crate::{ bench::fmt_bench_samples, console::{ConsoleTestState, OutputLocation}, test_result::TestResult, time, types::TestDesc, }; pub(crate) struct PrettyFormatter { out: OutputLocation, use_color: bool, time_options: Option, /// Number of columns to fill when aligning names max_name_len: usize, is_multithreaded: bool, } impl PrettyFormatter { pub fn new( out: OutputLocation, use_color: bool, max_name_len: usize, is_multithreaded: bool, time_options: Option, ) -> Self { PrettyFormatter { out, use_color, max_name_len, is_multithreaded, time_options } } #[cfg(test)] pub fn output_location(&self) -> &OutputLocation { &self.out } pub fn write_ok(&mut self) -> io::Result<()> { self.write_short_result("ok", term::color::GREEN) } pub fn write_failed(&mut self) -> io::Result<()> { self.write_short_result("FAILED", term::color::RED) } pub fn write_ignored(&mut self) -> io::Result<()> { self.write_short_result("ignored", term::color::YELLOW) } pub fn write_allowed_fail(&mut self) -> io::Result<()> { self.write_short_result("FAILED (allowed)", term::color::YELLOW) } pub fn write_time_failed(&mut self) -> io::Result<()> { self.write_short_result("FAILED (time limit exceeded)", term::color::RED) } pub fn write_bench(&mut self) -> io::Result<()> { self.write_pretty("bench", term::color::CYAN) } pub fn write_short_result( &mut self, result: &str, color: term::color::Color, ) -> io::Result<()> { self.write_pretty(result, color) } pub fn write_pretty(&mut self, word: &str, color: term::color::Color) -> io::Result<()> { match self.out { OutputLocation::Pretty(ref mut term) => { if self.use_color { term.fg(color)?; } term.write_all(word.as_bytes())?; if self.use_color { term.reset()?; } term.flush() } OutputLocation::Raw(ref mut stdout) => { stdout.write_all(word.as_bytes())?; stdout.flush() } } } pub fn write_plain>(&mut self, s: S) -> io::Result<()> { let s = s.as_ref(); self.out.write_all(s.as_bytes())?; self.out.flush() } fn write_time( &mut self, desc: &TestDesc, exec_time: Option<&time::TestExecTime>, ) -> io::Result<()> { if let (Some(opts), Some(time)) = (self.time_options, exec_time) { let time_str = format!(" <{}>", time); let color = if opts.colored { if opts.is_critical(desc, time) { Some(term::color::RED) } else if opts.is_warn(desc, time) { Some(term::color::YELLOW) } else { None } } else { None }; match color { Some(color) => self.write_pretty(&time_str, color)?, None => self.write_plain(&time_str)?, } } Ok(()) } fn write_results( &mut self, inputs: &Vec<(TestDesc, Vec)>, results_type: &str, ) -> io::Result<()> { let results_out_str = format!("\n{}:\n", results_type); self.write_plain(&results_out_str)?; let mut results = Vec::new(); let mut stdouts = String::new(); for &(ref f, ref stdout) in inputs { results.push(f.name.to_string()); if !stdout.is_empty() { stdouts.push_str(&format!("---- {} stdout ----\n", f.name)); let output = String::from_utf8_lossy(stdout); stdouts.push_str(&output); stdouts.push('\n'); } } if !stdouts.is_empty() { self.write_plain("\n")?; self.write_plain(&stdouts)?; } self.write_plain(&results_out_str)?; results.sort(); for name in &results { self.write_plain(&format!(" {}\n", name))?; } Ok(()) } pub fn write_successes(&mut self, state: &ConsoleTestState) -> io::Result<()> { self.write_results(&state.not_failures, "successes") } pub fn write_failures(&mut self, state: &ConsoleTestState) -> io::Result<()> { self.write_results(&state.failures, "failures") } pub fn write_time_failures(&mut self, state: &ConsoleTestState) -> io::Result<()> { self.write_results(&state.time_failures, "failures (time limit exceeded)") } fn write_test_name(&mut self, desc: &TestDesc) -> io::Result<()> { let name = desc.padded_name(self.max_name_len, desc.name.padding()); self.write_plain(&format!("test {} ... ", name))?; Ok(()) } } impl OutputFormatter for PrettyFormatter { fn write_run_start(&mut self, test_count: usize) -> io::Result<()> { let noun = if test_count != 1 { "tests" } else { "test" }; self.write_plain(&format!("\nrunning {} {}\n", test_count, noun)) } fn write_test_start(&mut self, desc: &TestDesc) -> io::Result<()> { // When running tests concurrently, we should not print // the test's name as the result will be mis-aligned. // When running the tests serially, we print the name here so // that the user can see which test hangs. if !self.is_multithreaded { self.write_test_name(desc)?; } Ok(()) } fn write_result( &mut self, desc: &TestDesc, result: &TestResult, exec_time: Option<&time::TestExecTime>, _: &[u8], _: &ConsoleTestState, ) -> io::Result<()> { if self.is_multithreaded { self.write_test_name(desc)?; } match *result { TestResult::TrOk => self.write_ok()?, TestResult::TrFailed | TestResult::TrFailedMsg(_) => self.write_failed()?, TestResult::TrIgnored => self.write_ignored()?, TestResult::TrAllowedFail => self.write_allowed_fail()?, TestResult::TrBench(ref bs) => { self.write_bench()?; self.write_plain(&format!(": {}", fmt_bench_samples(bs)))?; } TestResult::TrTimedFail => self.write_time_failed()?, } self.write_time(desc, exec_time)?; self.write_plain("\n") } fn write_timeout(&mut self, desc: &TestDesc) -> io::Result<()> { if self.is_multithreaded { self.write_test_name(desc)?; } self.write_plain(&format!( "test {} has been running for over {} seconds\n", desc.name, time::TEST_WARN_TIMEOUT_S )) } fn write_run_finish(&mut self, state: &ConsoleTestState) -> io::Result { if state.options.display_output { self.write_successes(state)?; } let success = state.failed == 0; if !success { if !state.failures.is_empty() { self.write_failures(state)?; } if !state.time_failures.is_empty() { self.write_time_failures(state)?; } } self.write_plain("\ntest result: ")?; if success { // There's no parallelism at this point so it's safe to use color self.write_pretty("ok", term::color::GREEN)?; } else { self.write_pretty("FAILED", term::color::RED)?; } let s = if state.allowed_fail > 0 { format!( ". {} passed; {} failed ({} allowed); {} ignored; {} measured; {} filtered out", state.passed, state.failed + state.allowed_fail, state.allowed_fail, state.ignored, state.measured, state.filtered_out ) } else { format!( ". {} passed; {} failed; {} ignored; {} measured; {} filtered out", state.passed, state.failed, state.ignored, state.measured, state.filtered_out ) }; self.write_plain(&s)?; if let Some(ref exec_time) = state.exec_time { let time_str = format!("; finished in {}", exec_time); self.write_plain(&time_str)?; } self.write_plain("\n\n")?; Ok(success) } } tester-0.9.1/src/formatters/terse.rs000064400000000000000000000204111046102023000156040ustar 00000000000000use std::{io, io::prelude::Write}; use super::OutputFormatter; use crate::{ bench::fmt_bench_samples, console::{ConsoleTestState, OutputLocation}, test_result::TestResult, time, types::NamePadding, types::TestDesc, }; // insert a '\n' after 100 tests in quiet mode const QUIET_MODE_MAX_COLUMN: usize = 100; pub(crate) struct TerseFormatter { out: OutputLocation, use_color: bool, is_multithreaded: bool, /// Number of columns to fill when aligning names max_name_len: usize, test_count: usize, total_test_count: usize, } impl TerseFormatter { pub fn new( out: OutputLocation, use_color: bool, max_name_len: usize, is_multithreaded: bool, ) -> Self { TerseFormatter { out, use_color, max_name_len, is_multithreaded, test_count: 0, total_test_count: 0, // initialized later, when write_run_start is called } } pub fn write_ok(&mut self) -> io::Result<()> { self.write_short_result(".", term::color::GREEN) } pub fn write_failed(&mut self) -> io::Result<()> { self.write_short_result("F", term::color::RED) } pub fn write_ignored(&mut self) -> io::Result<()> { self.write_short_result("i", term::color::YELLOW) } pub fn write_allowed_fail(&mut self) -> io::Result<()> { self.write_short_result("a", term::color::YELLOW) } pub fn write_bench(&mut self) -> io::Result<()> { self.write_pretty("bench", term::color::CYAN) } pub fn write_short_result( &mut self, result: &str, color: term::color::Color, ) -> io::Result<()> { self.write_pretty(result, color)?; if self.test_count % QUIET_MODE_MAX_COLUMN == QUIET_MODE_MAX_COLUMN - 1 { // we insert a new line every 100 dots in order to flush the // screen when dealing with line-buffered output (e.g., piping to // `stamp` in the rust CI). let out = format!(" {}/{}\n", self.test_count + 1, self.total_test_count); self.write_plain(&out)?; } self.test_count += 1; Ok(()) } pub fn write_pretty(&mut self, word: &str, color: term::color::Color) -> io::Result<()> { match self.out { OutputLocation::Pretty(ref mut term) => { if self.use_color { term.fg(color)?; } term.write_all(word.as_bytes())?; if self.use_color { term.reset()?; } term.flush() } OutputLocation::Raw(ref mut stdout) => { stdout.write_all(word.as_bytes())?; stdout.flush() } } } pub fn write_plain>(&mut self, s: S) -> io::Result<()> { let s = s.as_ref(); self.out.write_all(s.as_bytes())?; self.out.flush() } pub fn write_outputs(&mut self, state: &ConsoleTestState) -> io::Result<()> { self.write_plain("\nsuccesses:\n")?; let mut successes = Vec::new(); let mut stdouts = String::new(); for &(ref f, ref stdout) in &state.not_failures { successes.push(f.name.to_string()); if !stdout.is_empty() { stdouts.push_str(&format!("---- {} stdout ----\n", f.name)); let output = String::from_utf8_lossy(stdout); stdouts.push_str(&output); stdouts.push('\n'); } } if !stdouts.is_empty() { self.write_plain("\n")?; self.write_plain(&stdouts)?; } self.write_plain("\nsuccesses:\n")?; successes.sort(); for name in &successes { self.write_plain(&format!(" {}\n", name))?; } Ok(()) } pub fn write_failures(&mut self, state: &ConsoleTestState) -> io::Result<()> { self.write_plain("\nfailures:\n")?; let mut failures = Vec::new(); let mut fail_out = String::new(); for &(ref f, ref stdout) in &state.failures { failures.push(f.name.to_string()); if !stdout.is_empty() { fail_out.push_str(&format!("---- {} stdout ----\n", f.name)); let output = String::from_utf8_lossy(stdout); fail_out.push_str(&output); fail_out.push('\n'); } } if !fail_out.is_empty() { self.write_plain("\n")?; self.write_plain(&fail_out)?; } self.write_plain("\nfailures:\n")?; failures.sort(); for name in &failures { self.write_plain(&format!(" {}\n", name))?; } Ok(()) } fn write_test_name(&mut self, desc: &TestDesc) -> io::Result<()> { let name = desc.padded_name(self.max_name_len, desc.name.padding()); self.write_plain(&format!("test {} ... ", name))?; Ok(()) } } impl OutputFormatter for TerseFormatter { fn write_run_start(&mut self, test_count: usize) -> io::Result<()> { self.total_test_count = test_count; let noun = if test_count != 1 { "tests" } else { "test" }; self.write_plain(&format!("\nrunning {} {}\n", test_count, noun)) } fn write_test_start(&mut self, desc: &TestDesc) -> io::Result<()> { // Remnants from old libtest code that used the padding value // in order to indicate benchmarks. // When running benchmarks, terse-mode should still print their name as if // it is the Pretty formatter. if !self.is_multithreaded && desc.name.padding() == NamePadding::PadOnRight { self.write_test_name(desc)?; } Ok(()) } fn write_result( &mut self, desc: &TestDesc, result: &TestResult, _: Option<&time::TestExecTime>, _: &[u8], _: &ConsoleTestState, ) -> io::Result<()> { match *result { TestResult::TrOk => self.write_ok(), TestResult::TrFailed | TestResult::TrFailedMsg(_) | TestResult::TrTimedFail => { self.write_failed() } TestResult::TrIgnored => self.write_ignored(), TestResult::TrAllowedFail => self.write_allowed_fail(), TestResult::TrBench(ref bs) => { if self.is_multithreaded { self.write_test_name(desc)?; } self.write_bench()?; self.write_plain(&format!(": {}\n", fmt_bench_samples(bs))) } } } fn write_timeout(&mut self, desc: &TestDesc) -> io::Result<()> { self.write_plain(&format!( "test {} has been running for over {} seconds\n", desc.name, time::TEST_WARN_TIMEOUT_S )) } fn write_run_finish(&mut self, state: &ConsoleTestState) -> io::Result { if state.options.display_output { self.write_outputs(state)?; } let success = state.failed == 0; if !success { self.write_failures(state)?; } self.write_plain("\ntest result: ")?; if success { // There's no parallelism at this point so it's safe to use color self.write_pretty("ok", term::color::GREEN)?; } else { self.write_pretty("FAILED", term::color::RED)?; } let s = if state.allowed_fail > 0 { format!( ". {} passed; {} failed ({} allowed); {} ignored; {} measured; {} filtered out", state.passed, state.failed + state.allowed_fail, state.allowed_fail, state.ignored, state.measured, state.filtered_out ) } else { format!( ". {} passed; {} failed; {} ignored; {} measured; {} filtered out", state.passed, state.failed, state.ignored, state.measured, state.filtered_out ) }; self.write_plain(&s)?; if let Some(ref exec_time) = state.exec_time { let time_str = format!("; finished in {}", exec_time); self.write_plain(&time_str)?; } self.write_plain("\n\n")?; Ok(success) } } tester-0.9.1/src/helpers/concurrency.rs000064400000000000000000000007221046102023000162730ustar 00000000000000//! Helper module which helps to determine amount of threads to be used //! during tests execution. use std::{env, num::NonZeroUsize}; pub fn get_concurrency() -> usize { if let Ok(value) = env::var("RUST_TEST_THREADS") { match value.parse::().ok() { Some(n) => n.get(), _ => panic!("RUST_TEST_THREADS is `{}`, should be a positive integer.", value), } } else { num_cpus::get_physical() } } tester-0.9.1/src/helpers/exit_code.rs000064400000000000000000000012141046102023000157010ustar 00000000000000//! Helper module to detect subprocess exit code. use std::process::ExitStatus; #[cfg(not(unix))] pub fn get_exit_code(status: ExitStatus) -> Result { status.code().ok_or("received no exit code from child process".into()) } #[cfg(unix)] pub fn get_exit_code(status: ExitStatus) -> Result { use std::os::unix::process::ExitStatusExt; match status.code() { Some(code) => Ok(code), None => match status.signal() { Some(signal) => Err(format!("child process exited with signal {}", signal)), None => Err("child process exited with unknown signal".into()), }, } } tester-0.9.1/src/helpers/isatty.rs000064400000000000000000000017571046102023000152670ustar 00000000000000//! Helper module which provides a function to test //! if stdout is a tty. cfg_if::cfg_if! { if #[cfg(unix)] { pub fn stdout_isatty() -> bool { unsafe { libc::isatty(libc::STDOUT_FILENO) != 0 } } } else if #[cfg(windows)] { pub fn stdout_isatty() -> bool { type DWORD = u32; type BOOL = i32; type HANDLE = *mut u8; type LPDWORD = *mut u32; const STD_OUTPUT_HANDLE: DWORD = -11i32 as DWORD; extern "system" { fn GetStdHandle(which: DWORD) -> HANDLE; fn GetConsoleMode(hConsoleHandle: HANDLE, lpMode: LPDWORD) -> BOOL; } unsafe { let handle = GetStdHandle(STD_OUTPUT_HANDLE); let mut out = 0; GetConsoleMode(handle, &mut out) != 0 } } } else { // FIXME: Implement isatty on SGX pub fn stdout_isatty() -> bool { false } } } tester-0.9.1/src/helpers/metrics.rs000064400000000000000000000030621046102023000154070ustar 00000000000000//! Benchmark metrics. use std::collections::BTreeMap; #[derive(Clone, PartialEq, Debug, Copy)] pub struct Metric { value: f64, noise: f64, } impl Metric { pub fn new(value: f64, noise: f64) -> Metric { Metric { value, noise } } } #[derive(Clone, PartialEq)] pub struct MetricMap(BTreeMap); impl MetricMap { pub fn new() -> MetricMap { MetricMap(BTreeMap::new()) } /// Insert a named `value` (+/- `noise`) metric into the map. The value /// must be non-negative. The `noise` indicates the uncertainty of the /// metric, which doubles as the "noise range" of acceptable /// pairwise-regressions on this named value, when comparing from one /// metric to the next using `compare_to_old`. /// /// If `noise` is positive, then it means this metric is of a value /// you want to see grow smaller, so a change larger than `noise` in the /// positive direction represents a regression. /// /// If `noise` is negative, then it means this metric is of a value /// you want to see grow larger, so a change larger than `noise` in the /// negative direction represents a regression. pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) { let m = Metric { value, noise }; self.0.insert(name.to_owned(), m); } pub fn fmt_metrics(&self) -> String { let v = self .0 .iter() .map(|(k, v)| format!("{}: {} (+/- {})", *k, v.value, v.noise)) .collect::>(); v.join(", ") } } tester-0.9.1/src/helpers/mod.rs000064400000000000000000000002421046102023000145150ustar 00000000000000//! Module with common helpers not directly related to tests //! but used in `libtest`. pub mod concurrency; pub mod exit_code; pub mod isatty; pub mod metrics; tester-0.9.1/src/lib.rs000064400000000000000000000525531046102023000130560ustar 00000000000000//! Support code for rustc's built in unit-test and micro-benchmarking //! framework. //! //! Almost all user code will only be interested in `Bencher` and //! `black_box`. All other interactions (such as writing tests and //! benchmarks themselves) should be done via the `#[test]` and //! `#[bench]` attributes. //! //! See the [Testing Chapter](../book/ch11-00-testing.html) of the book for more details. // Currently, not much of this is meant for users. It is intended to // support the simplest interface possible for representing and // running tests while providing a base that other test frameworks may // build off of. // N.B., this is also specified in this crate's Cargo.toml, but librustc_ast contains logic specific to // this crate, which relies on this attribute (rather than the value of `--crate-name` passed by // cargo) to detect this crate. #![cfg_attr(feature = "asm_black_box", feature(test))] #![cfg_attr(feature = "capture", feature(internal_output_capture))] // Public reexports pub use self::bench::{black_box, Bencher}; pub use self::console::run_tests_console; pub use self::options::{ColorConfig, Options, OutputFormat, RunIgnored, ShouldPanic}; pub use self::types::TestName::*; pub use self::types::*; pub use self::ColorConfig::*; pub use cli::TestOpts; // Module to be used by rustc to compile tests in libtest pub mod test { pub use crate::{ assert_test_result, bench::Bencher, cli::{parse_opts, TestOpts}, filter_tests, helpers::metrics::{Metric, MetricMap}, options::{Options, RunIgnored, RunStrategy, ShouldPanic}, run_test, test_main, test_main_static, test_result::{TestResult, TrFailed, TrFailedMsg, TrIgnored, TrOk}, time::{TestExecTime, TestTimeOptions}, types::{ DynTestFn, DynTestName, StaticBenchFn, StaticTestFn, StaticTestName, TestDesc, TestDescAndFn, TestName, TestType, }, }; } use std::{ env, io, io::prelude::Write, panic::{self, catch_unwind, AssertUnwindSafe, PanicInfo}, process::{self, Command}, sync::mpsc::{channel, Sender}, sync::{Arc, Mutex}, thread, time::{Duration, Instant}, }; pub mod bench; mod cli; mod console; mod event; mod formatters; mod helpers; mod options; pub mod stats; mod test_result; mod time; mod types; #[cfg(test)] mod tests; use event::{CompletedTest, TestEvent}; use helpers::concurrency::get_concurrency; use helpers::exit_code::get_exit_code; use options::{Concurrent, RunStrategy}; use test_result::*; use time::TestExecTime; // Process exit code to be used to indicate test failures. const ERROR_EXIT_CODE: i32 = 101; const SECONDARY_TEST_INVOKER_VAR: &str = "__RUST_TEST_INVOKE"; // The default console test runner. It accepts the command line // arguments and a vector of test_descs. pub fn test_main(args: &[String], tests: Vec, options: Option) { let mut opts = match cli::parse_opts(args) { Some(Ok(o)) => o, Some(Err(msg)) => { eprintln!("error: {}", msg); process::exit(ERROR_EXIT_CODE); } None => return, }; if let Some(options) = options { opts.options = options; } if opts.list { if let Err(e) = console::list_tests_console(&opts, tests) { eprintln!("error: io error when listing tests: {:?}", e); process::exit(ERROR_EXIT_CODE); } } else { match console::run_tests_console(&opts, tests) { Ok(true) => {} Ok(false) => process::exit(ERROR_EXIT_CODE), Err(e) => { eprintln!("error: io error when listing tests: {:?}", e); process::exit(ERROR_EXIT_CODE); } } } } /// A variant optimized for invocation with a static test vector. /// This will panic (intentionally) when fed any dynamic tests. /// /// This is the entry point for the main function generated by `rustc --test` /// when panic=unwind. pub fn test_main_static(tests: &[&TestDescAndFn]) { let args = env::args().collect::>(); let owned_tests: Vec<_> = tests.iter().map(make_owned_test).collect(); test_main(&args, owned_tests, None) } /// A variant optimized for invocation with a static test vector. /// This will panic (intentionally) when fed any dynamic tests. /// /// Runs tests in panic=abort mode, which involves spawning subprocesses for /// tests. /// /// This is the entry point for the main function generated by `rustc --test` /// when panic=abort. pub fn test_main_static_abort(tests: &[&TestDescAndFn]) { // If we're being run in SpawnedSecondary mode, run the test here. run_test // will then exit the process. if let Ok(name) = env::var(SECONDARY_TEST_INVOKER_VAR) { env::remove_var(SECONDARY_TEST_INVOKER_VAR); let test = tests .iter() .filter(|test| test.desc.name.as_slice() == name) .map(make_owned_test) .next() .unwrap_or_else(|| panic!("couldn't find a test with the provided name '{}'", name)); let TestDescAndFn { desc, testfn } = test; let testfn = match testfn { StaticTestFn(f) => f, _ => panic!("only static tests are supported"), }; run_test_in_spawned_subprocess(desc, Box::new(testfn)); } let args = env::args().collect::>(); let owned_tests: Vec<_> = tests.iter().map(make_owned_test).collect(); test_main(&args, owned_tests, Some(Options::new().panic_abort(true))) } /// Clones static values for putting into a dynamic vector, which test_main() /// needs to hand out ownership of tests to parallel test runners. /// /// This will panic when fed any dynamic tests, because they cannot be cloned. fn make_owned_test(test: &&TestDescAndFn) -> TestDescAndFn { match test.testfn { StaticTestFn(f) => TestDescAndFn { testfn: StaticTestFn(f), desc: test.desc.clone() }, StaticBenchFn(f) => TestDescAndFn { testfn: StaticBenchFn(f), desc: test.desc.clone() }, _ => panic!("non-static tests passed to test::test_main_static"), } } /// Invoked when unit tests terminate. Should panic if the unit /// Tests is considered a failure. By default, invokes `report()` /// and checks for a `0` result. pub trait Termination { fn report(self) -> i32; } impl Termination for () { fn report(self) -> i32 { 0 } } /// Invoked when unit tests terminate. Should panic if the unit /// Tests is considered a failure. By default, invokes `report()` /// and checks for a `0` result. pub fn assert_test_result(result: T) { let code = result.report(); assert_eq!( code, 0, "the test returned a termination value with a non-zero status code ({}) \ which indicates a failure", code ); } pub fn run_tests( opts: &TestOpts, tests: Vec, mut notify_about_test_event: F, ) -> io::Result<()> where F: FnMut(TestEvent) -> io::Result<()>, { use std::collections::{self, HashMap}; use std::hash::BuildHasherDefault; use std::sync::mpsc::RecvTimeoutError; // Use a deterministic hasher type TestMap = HashMap>; let tests_len = tests.len(); let mut filtered_tests = filter_tests(opts, tests); if !opts.bench_benchmarks { filtered_tests = convert_benchmarks_to_tests(filtered_tests); } let filtered_tests = { let mut filtered_tests = filtered_tests; for test in filtered_tests.iter_mut() { test.desc.name = test.desc.name.with_padding(test.testfn.padding()); } filtered_tests }; let filtered_out = tests_len - filtered_tests.len(); let event = TestEvent::TeFilteredOut(filtered_out); notify_about_test_event(event)?; let filtered_descs = filtered_tests.iter().map(|t| t.desc.clone()).collect(); let event = TestEvent::TeFiltered(filtered_descs); notify_about_test_event(event)?; let (filtered_tests, filtered_benchs): (Vec<_>, _) = filtered_tests .into_iter() .partition(|e| matches!(e.testfn, StaticTestFn(_) | DynTestFn(_))); let concurrency = opts.test_threads.unwrap_or_else(get_concurrency); let mut remaining = filtered_tests; remaining.reverse(); let mut pending = 0; let (tx, rx) = channel::(); let run_strategy = if opts.options.panic_abort && !opts.force_run_in_process { RunStrategy::SpawnPrimary } else { RunStrategy::InProcess }; let mut running_tests: TestMap = HashMap::default(); fn get_timed_out_tests(running_tests: &mut TestMap) -> Vec { let now = Instant::now(); let timed_out = running_tests .iter() .filter_map(|(desc, timeout)| if &now >= timeout { Some(desc.clone()) } else { None }) .collect(); for test in &timed_out { running_tests.remove(test); } timed_out } fn calc_timeout(running_tests: &TestMap) -> Option { running_tests.values().min().map(|next_timeout| { let now = Instant::now(); if *next_timeout >= now { *next_timeout - now } else { Duration::new(0, 0) } }) } if concurrency == 1 { while !remaining.is_empty() { let test = remaining.pop().unwrap(); let event = TestEvent::TeWait(test.desc.clone()); notify_about_test_event(event)?; run_test(opts, !opts.run_tests, test, run_strategy, tx.clone(), Concurrent::No); let completed_test = rx.recv().unwrap(); let event = TestEvent::TeResult(completed_test); notify_about_test_event(event)?; } } else { while pending > 0 || !remaining.is_empty() { while pending < concurrency && !remaining.is_empty() { let test = remaining.pop().unwrap(); let timeout = time::get_default_test_timeout(); running_tests.insert(test.desc.clone(), timeout); let event = TestEvent::TeWait(test.desc.clone()); notify_about_test_event(event)?; //here no pad run_test(opts, !opts.run_tests, test, run_strategy, tx.clone(), Concurrent::Yes); pending += 1; } let mut res; loop { if let Some(timeout) = calc_timeout(&running_tests) { res = rx.recv_timeout(timeout); for test in get_timed_out_tests(&mut running_tests) { let event = TestEvent::TeTimeout(test); notify_about_test_event(event)?; } match res { Err(RecvTimeoutError::Timeout) => { // Result is not yet ready, continue waiting. } _ => { // We've got a result, stop the loop. break; } } } else { res = rx.recv().map_err(|_| RecvTimeoutError::Disconnected); break; } } let completed_test = res.unwrap(); running_tests.remove(&completed_test.desc); let event = TestEvent::TeResult(completed_test); notify_about_test_event(event)?; pending -= 1; } } if opts.bench_benchmarks { // All benchmarks run at the end, in serial. for b in filtered_benchs { let event = TestEvent::TeWait(b.desc.clone()); notify_about_test_event(event)?; run_test(opts, false, b, run_strategy, tx.clone(), Concurrent::No); let completed_test = rx.recv().unwrap(); let event = TestEvent::TeResult(completed_test); notify_about_test_event(event)?; } } Ok(()) } pub fn filter_tests(opts: &TestOpts, tests: Vec) -> Vec { let mut filtered = tests; let matches_filter = |test: &TestDescAndFn, filter: &str| { let test_name = test.desc.name.as_slice(); match opts.filter_exact { true => test_name == filter, false => test_name.contains(filter), } }; // Remove tests that don't match the test filter if !opts.filters.is_empty() { filtered.retain(|test| opts.filters.iter().any(|filter| matches_filter(test, filter))); } // Skip tests that match any of the skip filters filtered.retain(|test| !opts.skip.iter().any(|sf| matches_filter(test, sf))); // Excludes #[should_panic] tests if opts.exclude_should_panic { filtered.retain(|test| test.desc.should_panic == ShouldPanic::No); } // maybe unignore tests match opts.run_ignored { RunIgnored::Yes => { filtered.iter_mut().for_each(|test| test.desc.ignore = false); } RunIgnored::Only => { filtered.retain(|test| test.desc.ignore); filtered.iter_mut().for_each(|test| test.desc.ignore = false); } RunIgnored::No => {} } // Sort the tests alphabetically filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(t2.desc.name.as_slice())); filtered } pub fn convert_benchmarks_to_tests(tests: Vec) -> Vec { // convert benchmarks to tests, if we're not benchmarking them tests .into_iter() .map(|x| { let testfn = match x.testfn { DynBenchFn(bench) => DynTestFn(Box::new(move || { bench::run_once(|b| __rust_begin_short_backtrace(|| bench.run(b))) })), StaticBenchFn(benchfn) => DynTestFn(Box::new(move || { bench::run_once(|b| __rust_begin_short_backtrace(|| benchfn(b))) })), f => f, }; TestDescAndFn { desc: x.desc, testfn } }) .collect() } pub fn run_test( opts: &TestOpts, force_ignore: bool, test: TestDescAndFn, strategy: RunStrategy, monitor_ch: Sender, concurrency: Concurrent, ) { let TestDescAndFn { desc, testfn } = test; // Emscripten can catch panics but other wasm targets cannot let ignore_because_no_process_support = desc.should_panic != ShouldPanic::No && cfg!(target_arch = "wasm32") && !cfg!(target_os = "emscripten"); if force_ignore || desc.ignore || ignore_because_no_process_support { let message = CompletedTest::new(desc, TrIgnored, None, Vec::new()); monitor_ch.send(message).unwrap(); return; } struct TestRunOpts { pub strategy: RunStrategy, pub nocapture: bool, pub concurrency: Concurrent, pub time: Option, } fn run_test_inner( desc: TestDesc, monitor_ch: Sender, testfn: Box, opts: TestRunOpts, ) { let concurrency = opts.concurrency; let name = desc.name.clone(); let runtest = move || match opts.strategy { RunStrategy::InProcess => run_test_in_process( desc, opts.nocapture, opts.time.is_some(), testfn, monitor_ch, opts.time, ), RunStrategy::SpawnPrimary => spawn_test_subprocess( desc, opts.nocapture, opts.time.is_some(), monitor_ch, opts.time, ), }; // If the platform is single-threaded we're just going to run // the test synchronously, regardless of the concurrency // level. let supports_threads = !cfg!(target_os = "emscripten") && !cfg!(target_arch = "wasm32"); if concurrency == Concurrent::Yes && supports_threads { let cfg = thread::Builder::new().name(name.as_slice().to_owned()); cfg.spawn(runtest).unwrap(); } else { runtest(); } } let test_run_opts = TestRunOpts { strategy, nocapture: opts.nocapture, concurrency, time: opts.time_options }; match testfn { DynBenchFn(bencher) => { // Benchmarks aren't expected to panic, so we run them all in-process. crate::bench::benchmark(desc, monitor_ch, opts.nocapture, |harness| { bencher.run(harness) }); } StaticBenchFn(benchfn) => { // Benchmarks aren't expected to panic, so we run them all in-process. crate::bench::benchmark(desc, monitor_ch, opts.nocapture, benchfn); } DynTestFn(f) => { match strategy { RunStrategy::InProcess => (), _ => panic!("Cannot run dynamic test fn out-of-process"), }; run_test_inner( desc, monitor_ch, Box::new(move || __rust_begin_short_backtrace(f)), test_run_opts, ); } StaticTestFn(f) => run_test_inner( desc, monitor_ch, Box::new(move || __rust_begin_short_backtrace(f)), test_run_opts, ), } } /// Fixed frame used to clean the backtrace with `RUST_BACKTRACE=1`. #[inline(never)] fn __rust_begin_short_backtrace(f: F) { f(); // prevent this frame from being tail-call optimised away black_box(()); } fn run_test_in_process( desc: TestDesc, nocapture: bool, report_time: bool, testfn: Box, monitor_ch: Sender, time_opts: Option, ) { // Buffer for capturing standard I/O let data = Arc::new(Mutex::new(Vec::new())); if !nocapture { #[cfg(feature = "capture")] io::set_output_capture(Some(data.clone())); } let start = if report_time { Some(Instant::now()) } else { None }; let result = catch_unwind(AssertUnwindSafe(testfn)); let exec_time = start.map(|start| { let duration = start.elapsed(); TestExecTime(duration) }); #[cfg(feature = "capture")] io::set_output_capture(None); let test_result = match result { Ok(()) => calc_result(&desc, Ok(()), &time_opts, &exec_time), Err(e) => calc_result(&desc, Err(e.as_ref()), &time_opts, &exec_time), }; let stdout = data.lock().unwrap_or_else(|e| e.into_inner()).to_vec(); let message = CompletedTest::new(desc, test_result, exec_time, stdout); monitor_ch.send(message).unwrap(); } fn spawn_test_subprocess( desc: TestDesc, nocapture: bool, report_time: bool, monitor_ch: Sender, time_opts: Option, ) { let (result, test_output, exec_time) = (|| { let args = env::args().collect::>(); let current_exe = &args[0]; let mut command = Command::new(current_exe); command.env(SECONDARY_TEST_INVOKER_VAR, desc.name.as_slice()); if nocapture { command.stdout(process::Stdio::inherit()); command.stderr(process::Stdio::inherit()); } let start = if report_time { Some(Instant::now()) } else { None }; let output = match command.output() { Ok(out) => out, Err(e) => { let err = format!("Failed to spawn {} as child for test: {:?}", args[0], e); return (TrFailed, err.into_bytes(), None); } }; let exec_time = start.map(|start| { let duration = start.elapsed(); TestExecTime(duration) }); let std::process::Output { stdout, stderr, status } = output; let mut test_output = stdout; formatters::write_stderr_delimiter(&mut test_output, &desc.name); test_output.extend_from_slice(&stderr); let result = match (|| -> Result { let exit_code = get_exit_code(status)?; Ok(get_result_from_exit_code(&desc, exit_code, &time_opts, &exec_time)) })() { Ok(r) => r, Err(e) => { write!(&mut test_output, "Unexpected error: {}", e).unwrap(); TrFailed } }; (result, test_output, exec_time) })(); let message = CompletedTest::new(desc, result, exec_time, test_output); monitor_ch.send(message).unwrap(); } fn run_test_in_spawned_subprocess(desc: TestDesc, testfn: Box) -> ! { let builtin_panic_hook = panic::take_hook(); let record_result = Arc::new(move |panic_info: Option<&'_ PanicInfo<'_>>| { let test_result = match panic_info { Some(info) => calc_result(&desc, Err(info.payload()), &None, &None), None => calc_result(&desc, Ok(()), &None, &None), }; // We don't support serializing TrFailedMsg, so just // print the message out to stderr. if let TrFailedMsg(msg) = &test_result { eprintln!("{}", msg); } if let Some(info) = panic_info { builtin_panic_hook(info); } if let TrOk = test_result { process::exit(test_result::TR_OK); } else { process::exit(test_result::TR_FAILED); } }); let record_result2 = record_result.clone(); panic::set_hook(Box::new(move |info| record_result2(Some(&info)))); testfn(); record_result(None); unreachable!("panic=abort callback should have exited the process") } tester-0.9.1/src/options.rs000064400000000000000000000040031046102023000137660ustar 00000000000000//! Enums denoting options for test execution. /// Whether to execute tests concurrently or not #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub enum Concurrent { Yes, No, } /// Number of times to run a benchmarked function #[derive(Clone, PartialEq, Eq)] pub enum BenchMode { Auto, Single, } /// Whether test is expected to panic or not #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] pub enum ShouldPanic { No, Yes, YesWithMessage(&'static str), } /// Whether should console output be colored or not #[derive(Copy, Clone, Debug)] pub enum ColorConfig { AutoColor, AlwaysColor, NeverColor, } /// Format of the test results output #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub enum OutputFormat { /// Verbose output Pretty, /// Quiet output Terse, /// JSON output Json, } /// Whether ignored test should be run or not #[derive(Copy, Clone, Debug, PartialEq, Eq)] pub enum RunIgnored { Yes, No, /// Run only ignored tests Only, } #[derive(Clone, Copy)] pub enum RunStrategy { /// Runs the test in the current process, and sends the result back over the /// supplied channel. InProcess, /// Spawns a subprocess to run the test, and sends the result back over the /// supplied channel. Requires `argv[0]` to exist and point to the binary /// that's currently running. SpawnPrimary, } /// Options for the test run defined by the caller (instead of CLI arguments). /// In case we want to add other options as well, just add them in this struct. #[derive(Copy, Clone, Debug)] pub struct Options { pub display_output: bool, pub panic_abort: bool, } impl Options { pub fn new() -> Options { Options { display_output: false, panic_abort: false } } pub fn display_output(mut self, display_output: bool) -> Options { self.display_output = display_output; self } pub fn panic_abort(mut self, panic_abort: bool) -> Options { self.panic_abort = panic_abort; self } } tester-0.9.1/src/stats/tests.rs000064400000000000000000000351001046102023000145750ustar 00000000000000use super::*; use std::io; use std::io::prelude::*; // Test vectors generated from R, using the script src/etc/stat-test-vectors.r. macro_rules! assert_approx_eq { ($a: expr, $b: expr) => {{ let (a, b) = (&$a, &$b); assert!((*a - *b).abs() < 1.0e-6, "{} is not approximately equal to {}", *a, *b); }}; } fn check(samples: &[f64], summ: &Summary) { let summ2 = Summary::new(samples); let mut w = io::sink(); let w = &mut w; (write!(w, "\n")).unwrap(); assert_eq!(summ.sum, summ2.sum); assert_eq!(summ.min, summ2.min); assert_eq!(summ.max, summ2.max); assert_eq!(summ.mean, summ2.mean); assert_eq!(summ.median, summ2.median); // We needed a few more digits to get exact equality on these // but they're within float epsilon, which is 1.0e-6. assert_approx_eq!(summ.var, summ2.var); assert_approx_eq!(summ.std_dev, summ2.std_dev); assert_approx_eq!(summ.std_dev_pct, summ2.std_dev_pct); assert_approx_eq!(summ.median_abs_dev, summ2.median_abs_dev); assert_approx_eq!(summ.median_abs_dev_pct, summ2.median_abs_dev_pct); assert_eq!(summ.quartiles, summ2.quartiles); assert_eq!(summ.iqr, summ2.iqr); } #[test] fn test_min_max_nan() { let xs = &[1.0, 2.0, f64::NAN, 3.0, 4.0]; let summary = Summary::new(xs); assert_eq!(summary.min, 1.0); assert_eq!(summary.max, 4.0); } #[test] fn test_norm2() { let val = &[958.0000000000, 924.0000000000]; let summ = &Summary { sum: 1882.0000000000, min: 924.0000000000, max: 958.0000000000, mean: 941.0000000000, median: 941.0000000000, var: 578.0000000000, std_dev: 24.0416305603, std_dev_pct: 2.5549022912, median_abs_dev: 25.2042000000, median_abs_dev_pct: 2.6784484591, quartiles: (932.5000000000, 941.0000000000, 949.5000000000), iqr: 17.0000000000, }; check(val, summ); } #[test] fn test_norm10narrow() { let val = &[ 966.0000000000, 985.0000000000, 1110.0000000000, 848.0000000000, 821.0000000000, 975.0000000000, 962.0000000000, 1157.0000000000, 1217.0000000000, 955.0000000000, ]; let summ = &Summary { sum: 9996.0000000000, min: 821.0000000000, max: 1217.0000000000, mean: 999.6000000000, median: 970.5000000000, var: 16050.7111111111, std_dev: 126.6914010938, std_dev_pct: 12.6742097933, median_abs_dev: 102.2994000000, median_abs_dev_pct: 10.5408964451, quartiles: (956.7500000000, 970.5000000000, 1078.7500000000), iqr: 122.0000000000, }; check(val, summ); } #[test] fn test_norm10medium() { let val = &[ 954.0000000000, 1064.0000000000, 855.0000000000, 1000.0000000000, 743.0000000000, 1084.0000000000, 704.0000000000, 1023.0000000000, 357.0000000000, 869.0000000000, ]; let summ = &Summary { sum: 8653.0000000000, min: 357.0000000000, max: 1084.0000000000, mean: 865.3000000000, median: 911.5000000000, var: 48628.4555555556, std_dev: 220.5186059170, std_dev_pct: 25.4846418487, median_abs_dev: 195.7032000000, median_abs_dev_pct: 21.4704552935, quartiles: (771.0000000000, 911.5000000000, 1017.2500000000), iqr: 246.2500000000, }; check(val, summ); } #[test] fn test_norm10wide() { let val = &[ 505.0000000000, 497.0000000000, 1591.0000000000, 887.0000000000, 1026.0000000000, 136.0000000000, 1580.0000000000, 940.0000000000, 754.0000000000, 1433.0000000000, ]; let summ = &Summary { sum: 9349.0000000000, min: 136.0000000000, max: 1591.0000000000, mean: 934.9000000000, median: 913.5000000000, var: 239208.9888888889, std_dev: 489.0899599142, std_dev_pct: 52.3146817750, median_abs_dev: 611.5725000000, median_abs_dev_pct: 66.9482758621, quartiles: (567.2500000000, 913.5000000000, 1331.2500000000), iqr: 764.0000000000, }; check(val, summ); } #[test] fn test_norm25verynarrow() { let val = &[ 991.0000000000, 1018.0000000000, 998.0000000000, 1013.0000000000, 974.0000000000, 1007.0000000000, 1014.0000000000, 999.0000000000, 1011.0000000000, 978.0000000000, 985.0000000000, 999.0000000000, 983.0000000000, 982.0000000000, 1015.0000000000, 1002.0000000000, 977.0000000000, 948.0000000000, 1040.0000000000, 974.0000000000, 996.0000000000, 989.0000000000, 1015.0000000000, 994.0000000000, 1024.0000000000, ]; let summ = &Summary { sum: 24926.0000000000, min: 948.0000000000, max: 1040.0000000000, mean: 997.0400000000, median: 998.0000000000, var: 393.2066666667, std_dev: 19.8294393937, std_dev_pct: 1.9888308788, median_abs_dev: 22.2390000000, median_abs_dev_pct: 2.2283567134, quartiles: (983.0000000000, 998.0000000000, 1013.0000000000), iqr: 30.0000000000, }; check(val, summ); } #[test] fn test_exp10a() { let val = &[ 23.0000000000, 11.0000000000, 2.0000000000, 57.0000000000, 4.0000000000, 12.0000000000, 5.0000000000, 29.0000000000, 3.0000000000, 21.0000000000, ]; let summ = &Summary { sum: 167.0000000000, min: 2.0000000000, max: 57.0000000000, mean: 16.7000000000, median: 11.5000000000, var: 287.7888888889, std_dev: 16.9643416875, std_dev_pct: 101.5828843560, median_abs_dev: 13.3434000000, median_abs_dev_pct: 116.0295652174, quartiles: (4.2500000000, 11.5000000000, 22.5000000000), iqr: 18.2500000000, }; check(val, summ); } #[test] fn test_exp10b() { let val = &[ 24.0000000000, 17.0000000000, 6.0000000000, 38.0000000000, 25.0000000000, 7.0000000000, 51.0000000000, 2.0000000000, 61.0000000000, 32.0000000000, ]; let summ = &Summary { sum: 263.0000000000, min: 2.0000000000, max: 61.0000000000, mean: 26.3000000000, median: 24.5000000000, var: 383.5666666667, std_dev: 19.5848580967, std_dev_pct: 74.4671410520, median_abs_dev: 22.9803000000, median_abs_dev_pct: 93.7971428571, quartiles: (9.5000000000, 24.5000000000, 36.5000000000), iqr: 27.0000000000, }; check(val, summ); } #[test] fn test_exp10c() { let val = &[ 71.0000000000, 2.0000000000, 32.0000000000, 1.0000000000, 6.0000000000, 28.0000000000, 13.0000000000, 37.0000000000, 16.0000000000, 36.0000000000, ]; let summ = &Summary { sum: 242.0000000000, min: 1.0000000000, max: 71.0000000000, mean: 24.2000000000, median: 22.0000000000, var: 458.1777777778, std_dev: 21.4050876611, std_dev_pct: 88.4507754589, median_abs_dev: 21.4977000000, median_abs_dev_pct: 97.7168181818, quartiles: (7.7500000000, 22.0000000000, 35.0000000000), iqr: 27.2500000000, }; check(val, summ); } #[test] fn test_exp25() { let val = &[ 3.0000000000, 24.0000000000, 1.0000000000, 19.0000000000, 7.0000000000, 5.0000000000, 30.0000000000, 39.0000000000, 31.0000000000, 13.0000000000, 25.0000000000, 48.0000000000, 1.0000000000, 6.0000000000, 42.0000000000, 63.0000000000, 2.0000000000, 12.0000000000, 108.0000000000, 26.0000000000, 1.0000000000, 7.0000000000, 44.0000000000, 25.0000000000, 11.0000000000, ]; let summ = &Summary { sum: 593.0000000000, min: 1.0000000000, max: 108.0000000000, mean: 23.7200000000, median: 19.0000000000, var: 601.0433333333, std_dev: 24.5161851301, std_dev_pct: 103.3565983562, median_abs_dev: 19.2738000000, median_abs_dev_pct: 101.4410526316, quartiles: (6.0000000000, 19.0000000000, 31.0000000000), iqr: 25.0000000000, }; check(val, summ); } #[test] fn test_binom25() { let val = &[ 18.0000000000, 17.0000000000, 27.0000000000, 15.0000000000, 21.0000000000, 25.0000000000, 17.0000000000, 24.0000000000, 25.0000000000, 24.0000000000, 26.0000000000, 26.0000000000, 23.0000000000, 15.0000000000, 23.0000000000, 17.0000000000, 18.0000000000, 18.0000000000, 21.0000000000, 16.0000000000, 15.0000000000, 31.0000000000, 20.0000000000, 17.0000000000, 15.0000000000, ]; let summ = &Summary { sum: 514.0000000000, min: 15.0000000000, max: 31.0000000000, mean: 20.5600000000, median: 20.0000000000, var: 20.8400000000, std_dev: 4.5650848842, std_dev_pct: 22.2037202539, median_abs_dev: 5.9304000000, median_abs_dev_pct: 29.6520000000, quartiles: (17.0000000000, 20.0000000000, 24.0000000000), iqr: 7.0000000000, }; check(val, summ); } #[test] fn test_pois25lambda30() { let val = &[ 27.0000000000, 33.0000000000, 34.0000000000, 34.0000000000, 24.0000000000, 39.0000000000, 28.0000000000, 27.0000000000, 31.0000000000, 28.0000000000, 38.0000000000, 21.0000000000, 33.0000000000, 36.0000000000, 29.0000000000, 37.0000000000, 32.0000000000, 34.0000000000, 31.0000000000, 39.0000000000, 25.0000000000, 31.0000000000, 32.0000000000, 40.0000000000, 24.0000000000, ]; let summ = &Summary { sum: 787.0000000000, min: 21.0000000000, max: 40.0000000000, mean: 31.4800000000, median: 32.0000000000, var: 26.5933333333, std_dev: 5.1568724372, std_dev_pct: 16.3814245145, median_abs_dev: 5.9304000000, median_abs_dev_pct: 18.5325000000, quartiles: (28.0000000000, 32.0000000000, 34.0000000000), iqr: 6.0000000000, }; check(val, summ); } #[test] fn test_pois25lambda40() { let val = &[ 42.0000000000, 50.0000000000, 42.0000000000, 46.0000000000, 34.0000000000, 45.0000000000, 34.0000000000, 49.0000000000, 39.0000000000, 28.0000000000, 40.0000000000, 35.0000000000, 37.0000000000, 39.0000000000, 46.0000000000, 44.0000000000, 32.0000000000, 45.0000000000, 42.0000000000, 37.0000000000, 48.0000000000, 42.0000000000, 33.0000000000, 42.0000000000, 48.0000000000, ]; let summ = &Summary { sum: 1019.0000000000, min: 28.0000000000, max: 50.0000000000, mean: 40.7600000000, median: 42.0000000000, var: 34.4400000000, std_dev: 5.8685603004, std_dev_pct: 14.3978417577, median_abs_dev: 5.9304000000, median_abs_dev_pct: 14.1200000000, quartiles: (37.0000000000, 42.0000000000, 45.0000000000), iqr: 8.0000000000, }; check(val, summ); } #[test] fn test_pois25lambda50() { let val = &[ 45.0000000000, 43.0000000000, 44.0000000000, 61.0000000000, 51.0000000000, 53.0000000000, 59.0000000000, 52.0000000000, 49.0000000000, 51.0000000000, 51.0000000000, 50.0000000000, 49.0000000000, 56.0000000000, 42.0000000000, 52.0000000000, 51.0000000000, 43.0000000000, 48.0000000000, 48.0000000000, 50.0000000000, 42.0000000000, 43.0000000000, 42.0000000000, 60.0000000000, ]; let summ = &Summary { sum: 1235.0000000000, min: 42.0000000000, max: 61.0000000000, mean: 49.4000000000, median: 50.0000000000, var: 31.6666666667, std_dev: 5.6273143387, std_dev_pct: 11.3913245723, median_abs_dev: 4.4478000000, median_abs_dev_pct: 8.8956000000, quartiles: (44.0000000000, 50.0000000000, 52.0000000000), iqr: 8.0000000000, }; check(val, summ); } #[test] fn test_unif25() { let val = &[ 99.0000000000, 55.0000000000, 92.0000000000, 79.0000000000, 14.0000000000, 2.0000000000, 33.0000000000, 49.0000000000, 3.0000000000, 32.0000000000, 84.0000000000, 59.0000000000, 22.0000000000, 86.0000000000, 76.0000000000, 31.0000000000, 29.0000000000, 11.0000000000, 41.0000000000, 53.0000000000, 45.0000000000, 44.0000000000, 98.0000000000, 98.0000000000, 7.0000000000, ]; let summ = &Summary { sum: 1242.0000000000, min: 2.0000000000, max: 99.0000000000, mean: 49.6800000000, median: 45.0000000000, var: 1015.6433333333, std_dev: 31.8691595957, std_dev_pct: 64.1488719719, median_abs_dev: 45.9606000000, median_abs_dev_pct: 102.1346666667, quartiles: (29.0000000000, 45.0000000000, 79.0000000000), iqr: 50.0000000000, }; check(val, summ); } #[test] fn test_sum_f64s() { assert_eq!([0.5f64, 3.2321f64, 1.5678f64].sum(), 5.2999); } #[test] fn test_sum_f64_between_ints_that_sum_to_0() { assert_eq!([1e30f64, 1.2f64, -1e30f64].sum(), 1.2); } /* #[bench] pub fn sum_three_items(b: &mut Bencher) { b.iter(|| { [1e20f64, 1.5f64, -1e20f64].sum(); }) } #[bench] pub fn sum_many_f64(b: &mut Bencher) { let nums = [-1e30f64, 1e60, 1e30, 1.0, -1e60]; let v = (0..500).map(|i| nums[i % 5]).collect::>(); b.iter(|| { v.sum(); }) } #[bench] pub fn no_iter(_: &mut Bencher) {} */ tester-0.9.1/src/stats.rs000064400000000000000000000246041046102023000134420ustar 00000000000000#![allow(missing_docs)] #![allow(deprecated)] // Float use std::cmp::Ordering::{self, Equal, Greater, Less}; use std::mem; #[cfg(test)] mod tests; fn local_cmp(x: f64, y: f64) -> Ordering { // arbitrarily decide that NaNs are larger than everything. if y.is_nan() { Less } else if x.is_nan() { Greater } else if x < y { Less } else if x == y { Equal } else { Greater } } fn local_sort(v: &mut [f64]) { v.sort_by(|x: &f64, y: &f64| local_cmp(*x, *y)); } /// Trait that provides simple descriptive statistics on a univariate set of numeric samples. pub trait Stats { /// Sum of the samples. /// /// Note: this method sacrifices performance at the altar of accuracy /// Depends on IEEE-754 arithmetic guarantees. See proof of correctness at: /// ["Adaptive Precision Floating-Point Arithmetic and Fast Robust Geometric /// Predicates"][paper] /// /// [paper]: http://www.cs.cmu.edu/~quake-papers/robust-arithmetic.ps fn sum(&self) -> f64; /// Minimum value of the samples. fn min(&self) -> f64; /// Maximum value of the samples. fn max(&self) -> f64; /// Arithmetic mean (average) of the samples: sum divided by sample-count. /// /// See: fn mean(&self) -> f64; /// Median of the samples: value separating the lower half of the samples from the higher half. /// Equal to `self.percentile(50.0)`. /// /// See: fn median(&self) -> f64; /// Variance of the samples: bias-corrected mean of the squares of the differences of each /// sample from the sample mean. Note that this calculates the _sample variance_ rather than the /// population variance, which is assumed to be unknown. It therefore corrects the `(n-1)/n` /// bias that would appear if we calculated a population variance, by dividing by `(n-1)` rather /// than `n`. /// /// See: fn var(&self) -> f64; /// Standard deviation: the square root of the sample variance. /// /// Note: this is not a robust statistic for non-normal distributions. Prefer the /// `median_abs_dev` for unknown distributions. /// /// See: fn std_dev(&self) -> f64; /// Standard deviation as a percent of the mean value. See `std_dev` and `mean`. /// /// Note: this is not a robust statistic for non-normal distributions. Prefer the /// `median_abs_dev_pct` for unknown distributions. fn std_dev_pct(&self) -> f64; /// Scaled median of the absolute deviations of each sample from the sample median. This is a /// robust (distribution-agnostic) estimator of sample variability. Use this in preference to /// `std_dev` if you cannot assume your sample is normally distributed. Note that this is scaled /// by the constant `1.4826` to allow its use as a consistent estimator for the standard /// deviation. /// /// See: fn median_abs_dev(&self) -> f64; /// Median absolute deviation as a percent of the median. See `median_abs_dev` and `median`. fn median_abs_dev_pct(&self) -> f64; /// Percentile: the value below which `pct` percent of the values in `self` fall. For example, /// percentile(95.0) will return the value `v` such that 95% of the samples `s` in `self` /// satisfy `s <= v`. /// /// Calculated by linear interpolation between closest ranks. /// /// See: fn percentile(&self, pct: f64) -> f64; /// Quartiles of the sample: three values that divide the sample into four equal groups, each /// with 1/4 of the data. The middle value is the median. See `median` and `percentile`. This /// function may calculate the 3 quartiles more efficiently than 3 calls to `percentile`, but /// is otherwise equivalent. /// /// See also: fn quartiles(&self) -> (f64, f64, f64); /// Inter-quartile range: the difference between the 25th percentile (1st quartile) and the 75th /// percentile (3rd quartile). See `quartiles`. /// /// See also: fn iqr(&self) -> f64; } /// Extracted collection of all the summary statistics of a sample set. #[derive(Debug, Clone, PartialEq, Copy)] #[allow(missing_docs)] pub struct Summary { pub sum: f64, pub min: f64, pub max: f64, pub mean: f64, pub median: f64, pub var: f64, pub std_dev: f64, pub std_dev_pct: f64, pub median_abs_dev: f64, pub median_abs_dev_pct: f64, pub quartiles: (f64, f64, f64), pub iqr: f64, } impl Summary { /// Construct a new summary of a sample set. pub fn new(samples: &[f64]) -> Summary { Summary { sum: samples.sum(), min: samples.min(), max: samples.max(), mean: samples.mean(), median: samples.median(), var: samples.var(), std_dev: samples.std_dev(), std_dev_pct: samples.std_dev_pct(), median_abs_dev: samples.median_abs_dev(), median_abs_dev_pct: samples.median_abs_dev_pct(), quartiles: samples.quartiles(), iqr: samples.iqr(), } } } impl Stats for [f64] { // FIXME #11059 handle NaN, inf and overflow fn sum(&self) -> f64 { let mut partials = vec![]; for &x in self { let mut x = x; let mut j = 0; // This inner loop applies `hi`/`lo` summation to each // partial so that the list of partial sums remains exact. for i in 0..partials.len() { let mut y: f64 = partials[i]; if x.abs() < y.abs() { mem::swap(&mut x, &mut y); } // Rounded `x+y` is stored in `hi` with round-off stored in // `lo`. Together `hi+lo` are exactly equal to `x+y`. let hi = x + y; let lo = y - (hi - x); if lo != 0.0 { partials[j] = lo; j += 1; } x = hi; } if j >= partials.len() { partials.push(x); } else { partials[j] = x; partials.truncate(j + 1); } } let zero: f64 = 0.0; partials.iter().fold(zero, |p, q| p + *q) } fn min(&self) -> f64 { assert!(!self.is_empty()); self.iter().fold(self[0], |p, q| p.min(*q)) } fn max(&self) -> f64 { assert!(!self.is_empty()); self.iter().fold(self[0], |p, q| p.max(*q)) } fn mean(&self) -> f64 { assert!(!self.is_empty()); self.sum() / (self.len() as f64) } fn median(&self) -> f64 { self.percentile(50_f64) } fn var(&self) -> f64 { if self.len() < 2 { 0.0 } else { let mean = self.mean(); let mut v: f64 = 0.0; for s in self { let x = *s - mean; v += x * x; } // N.B., this is _supposed to be_ len-1, not len. If you // change it back to len, you will be calculating a // population variance, not a sample variance. let denom = (self.len() - 1) as f64; v / denom } } fn std_dev(&self) -> f64 { self.var().sqrt() } fn std_dev_pct(&self) -> f64 { let hundred = 100_f64; (self.std_dev() / self.mean()) * hundred } fn median_abs_dev(&self) -> f64 { let med = self.median(); let abs_devs: Vec = self.iter().map(|&v| (med - v).abs()).collect(); // This constant is derived by smarter statistics brains than me, but it is // consistent with how R and other packages treat the MAD. let number = 1.4826; abs_devs.median() * number } fn median_abs_dev_pct(&self) -> f64 { let hundred = 100_f64; (self.median_abs_dev() / self.median()) * hundred } fn percentile(&self, pct: f64) -> f64 { let mut tmp = self.to_vec(); local_sort(&mut tmp); percentile_of_sorted(&tmp, pct) } fn quartiles(&self) -> (f64, f64, f64) { let mut tmp = self.to_vec(); local_sort(&mut tmp); let first = 25_f64; let a = percentile_of_sorted(&tmp, first); let second = 50_f64; let b = percentile_of_sorted(&tmp, second); let third = 75_f64; let c = percentile_of_sorted(&tmp, third); (a, b, c) } fn iqr(&self) -> f64 { let (a, _, c) = self.quartiles(); c - a } } // Helper function: extract a value representing the `pct` percentile of a sorted sample-set, using // linear interpolation. If samples are not sorted, return nonsensical value. fn percentile_of_sorted(sorted_samples: &[f64], pct: f64) -> f64 { assert!(!sorted_samples.is_empty()); if sorted_samples.len() == 1 { return sorted_samples[0]; } let zero: f64 = 0.0; assert!(zero <= pct); let hundred = 100_f64; assert!(pct <= hundred); if pct == hundred { return sorted_samples[sorted_samples.len() - 1]; } let length = (sorted_samples.len() - 1) as f64; let rank = (pct / hundred) * length; let lrank = rank.floor(); let d = rank - lrank; let n = lrank as usize; let lo = sorted_samples[n]; let hi = sorted_samples[n + 1]; lo + (hi - lo) * d } /// Winsorize a set of samples, replacing values above the `100-pct` percentile /// and below the `pct` percentile with those percentiles themselves. This is a /// way of minimizing the effect of outliers, at the cost of biasing the sample. /// It differs from trimming in that it does not change the number of samples, /// just changes the values of those that are outliers. /// /// See: pub fn winsorize(samples: &mut [f64], pct: f64) { let mut tmp = samples.to_vec(); local_sort(&mut tmp); let lo = percentile_of_sorted(&tmp, pct); let hundred = 100_f64; let hi = percentile_of_sorted(&tmp, hundred - pct); for samp in samples { if *samp > hi { *samp = hi } else if *samp < lo { *samp = lo } } } tester-0.9.1/src/test_result.rs000064400000000000000000000070411046102023000146550ustar 00000000000000use std::any::Any; use super::bench::BenchSamples; use super::options::ShouldPanic; use super::time; use super::types::TestDesc; pub use self::TestResult::*; // Return codes for secondary process. // Start somewhere other than 0 so we know the return code means what we think // it means. pub const TR_OK: i32 = 50; pub const TR_FAILED: i32 = 51; #[derive(Debug, Clone, PartialEq)] pub enum TestResult { TrOk, TrFailed, TrFailedMsg(String), TrIgnored, TrAllowedFail, TrBench(BenchSamples), TrTimedFail, } unsafe impl Send for TestResult {} /// Creates a `TestResult` depending on the raw result of test execution /// and associated data. pub fn calc_result<'a>( desc: &TestDesc, task_result: Result<(), &'a dyn Any>, time_opts: &Option, exec_time: &Option, ) -> TestResult { let result = match (&desc.should_panic, task_result) { (&ShouldPanic::No, Ok(())) | (&ShouldPanic::Yes, Err(_)) => TestResult::TrOk, (&ShouldPanic::YesWithMessage(msg), Err(ref err)) => { let maybe_panic_str = err .downcast_ref::() .map(|e| &**e) .or_else(|| err.downcast_ref::<&'static str>().copied()); if maybe_panic_str.map(|e| e.contains(msg)).unwrap_or(false) { TestResult::TrOk } else if desc.allow_fail { TestResult::TrAllowedFail } else if let Some(panic_str) = maybe_panic_str { TestResult::TrFailedMsg(format!( r#"panic did not contain expected string panic message: `{:?}`, expected substring: `{:?}`"#, panic_str, msg )) } else { TestResult::TrFailedMsg(format!( r#"expected panic with string value, found non-string value: `{:?}` expected substring: `{:?}`"#, (**err).type_id(), msg )) } } (&ShouldPanic::Yes, Ok(())) => { TestResult::TrFailedMsg("test did not panic as expected".to_string()) } _ if desc.allow_fail => TestResult::TrAllowedFail, _ => TestResult::TrFailed, }; // If test is already failed (or allowed to fail), do not change the result. if result != TestResult::TrOk { return result; } // Check if test is failed due to timeout. if let (Some(opts), Some(time)) = (time_opts, exec_time) { if opts.error_on_excess && opts.is_critical(desc, time) { return TestResult::TrTimedFail; } } result } /// Creates a `TestResult` depending on the exit code of test subprocess. pub fn get_result_from_exit_code( desc: &TestDesc, code: i32, time_opts: &Option, exec_time: &Option, ) -> TestResult { let result = match (desc.allow_fail, code) { (_, TR_OK) => TestResult::TrOk, (true, TR_FAILED) => TestResult::TrAllowedFail, (false, TR_FAILED) => TestResult::TrFailed, (_, _) => TestResult::TrFailedMsg(format!("got unexpected return code {}", code)), }; // If test is already failed (or allowed to fail), do not change the result. if result != TestResult::TrOk { return result; } // Check if test is failed due to timeout. if let (Some(opts), Some(time)) = (time_opts, exec_time) { if opts.error_on_excess && opts.is_critical(desc, time) { return TestResult::TrTimedFail; } } result } tester-0.9.1/src/tests.rs000064400000000000000000000521251046102023000134450ustar 00000000000000use super::*; use crate::{ bench::Bencher, console::OutputLocation, formatters::PrettyFormatter, options::OutputFormat, test::{ filter_tests, parse_opts, run_test, DynTestFn, DynTestName, MetricMap, RunIgnored, RunStrategy, ShouldPanic, StaticTestName, TestDesc, TestDescAndFn, TestOpts, TrIgnored, TrOk, // FIXME (introduced by #65251) // ShouldPanic, StaticTestName, TestDesc, TestDescAndFn, TestOpts, TestTimeOptions, // TestType, TrFailedMsg, TrIgnored, TrOk, }, time::{TestTimeOptions, TimeThreshold}, }; use std::any::TypeId; use std::sync::mpsc::channel; use std::time::Duration; impl TestOpts { fn new() -> TestOpts { TestOpts { list: false, filters: vec![], filter_exact: false, force_run_in_process: false, exclude_should_panic: false, run_ignored: RunIgnored::No, run_tests: false, bench_benchmarks: false, logfile: None, nocapture: false, color: AutoColor, format: OutputFormat::Pretty, test_threads: None, skip: vec![], time_options: None, options: Options::new(), } } } fn one_ignored_one_unignored_test() -> Vec { vec![ TestDescAndFn { desc: TestDesc { name: StaticTestName("1"), ignore: true, should_panic: ShouldPanic::No, allow_fail: false, test_type: TestType::Unknown, }, testfn: DynTestFn(Box::new(move || {})), }, TestDescAndFn { desc: TestDesc { name: StaticTestName("2"), ignore: false, should_panic: ShouldPanic::No, allow_fail: false, test_type: TestType::Unknown, }, testfn: DynTestFn(Box::new(move || {})), }, ] } #[test] pub fn do_not_run_ignored_tests() { fn f() { panic!(); } let desc = TestDescAndFn { desc: TestDesc { name: StaticTestName("whatever"), ignore: true, should_panic: ShouldPanic::No, allow_fail: false, test_type: TestType::Unknown, }, testfn: DynTestFn(Box::new(f)), }; let (tx, rx) = channel(); run_test(&TestOpts::new(), false, desc, RunStrategy::InProcess, tx, Concurrent::No); let result = rx.recv().unwrap().result; assert_ne!(result, TrOk); } #[test] pub fn ignored_tests_result_in_ignored() { fn f() {} let desc = TestDescAndFn { desc: TestDesc { name: StaticTestName("whatever"), ignore: true, should_panic: ShouldPanic::No, allow_fail: false, test_type: TestType::Unknown, }, testfn: DynTestFn(Box::new(f)), }; let (tx, rx) = channel(); run_test(&TestOpts::new(), false, desc, RunStrategy::InProcess, tx, Concurrent::No); let result = rx.recv().unwrap().result; assert_eq!(result, TrIgnored); } // FIXME: Re-enable emscripten once it can catch panics again (introduced by #65251) #[test] #[cfg(not(target_os = "emscripten"))] fn test_should_panic() { fn f() { panic!(); } let desc = TestDescAndFn { desc: TestDesc { name: StaticTestName("whatever"), ignore: false, should_panic: ShouldPanic::Yes, allow_fail: false, test_type: TestType::Unknown, }, testfn: DynTestFn(Box::new(f)), }; let (tx, rx) = channel(); run_test(&TestOpts::new(), false, desc, RunStrategy::InProcess, tx, Concurrent::No); let result = rx.recv().unwrap().result; assert_eq!(result, TrOk); } // FIXME: Re-enable emscripten once it can catch panics again (introduced by #65251) #[test] #[cfg(not(target_os = "emscripten"))] fn test_should_panic_good_message() { fn f() { panic!("an error message"); } let desc = TestDescAndFn { desc: TestDesc { name: StaticTestName("whatever"), ignore: false, should_panic: ShouldPanic::YesWithMessage("error message"), allow_fail: false, test_type: TestType::Unknown, }, testfn: DynTestFn(Box::new(f)), }; let (tx, rx) = channel(); run_test(&TestOpts::new(), false, desc, RunStrategy::InProcess, tx, Concurrent::No); let result = rx.recv().unwrap().result; assert_eq!(result, TrOk); } // FIXME: Re-enable emscripten once it can catch panics again (introduced by #65251) #[test] #[cfg(not(target_os = "emscripten"))] fn test_should_panic_bad_message() { use crate::tests::TrFailedMsg; fn f() { panic!("an error message"); } let expected = "foobar"; let failed_msg = r#"panic did not contain expected string panic message: `"an error message"`, expected substring: `"foobar"`"#; let desc = TestDescAndFn { desc: TestDesc { name: StaticTestName("whatever"), ignore: false, should_panic: ShouldPanic::YesWithMessage(expected), allow_fail: false, test_type: TestType::Unknown, }, testfn: DynTestFn(Box::new(f)), }; let (tx, rx) = channel(); run_test(&TestOpts::new(), false, desc, RunStrategy::InProcess, tx, Concurrent::No); let result = rx.recv().unwrap().result; assert_eq!(result, TrFailedMsg(failed_msg.to_string())); } // FIXME: Re-enable emscripten once it can catch panics again (introduced by #65251) #[test] #[cfg(not(target_os = "emscripten"))] fn test_should_panic_non_string_message_type() { use crate::tests::TrFailedMsg; fn f() { panic!(1i32); } let expected = "foobar"; let failed_msg = format!( r#"expected panic with string value, found non-string value: `{:?}` expected substring: `"foobar"`"#, TypeId::of::() ); let desc = TestDescAndFn { desc: TestDesc { name: StaticTestName("whatever"), ignore: false, should_panic: ShouldPanic::YesWithMessage(expected), allow_fail: false, test_type: TestType::Unknown, }, testfn: DynTestFn(Box::new(f)), }; let (tx, rx) = channel(); run_test(&TestOpts::new(), false, desc, RunStrategy::InProcess, tx, Concurrent::No); let result = rx.recv().unwrap().result; assert_eq!(result, TrFailedMsg(failed_msg)); } // FIXME: Re-enable emscripten once it can catch panics again (introduced by #65251) #[test] #[cfg(not(target_os = "emscripten"))] fn test_should_panic_but_succeeds() { fn f() {} let desc = TestDescAndFn { desc: TestDesc { name: StaticTestName("whatever"), ignore: false, should_panic: ShouldPanic::Yes, allow_fail: false, test_type: TestType::Unknown, }, testfn: DynTestFn(Box::new(f)), }; let (tx, rx) = channel(); run_test(&TestOpts::new(), false, desc, RunStrategy::InProcess, tx, Concurrent::No); let result = rx.recv().unwrap().result; assert_eq!(result, TrFailedMsg("test did not panic as expected".to_string())); } fn report_time_test_template(report_time: bool) -> Option { fn f() {} let desc = TestDescAndFn { desc: TestDesc { name: StaticTestName("whatever"), ignore: false, should_panic: ShouldPanic::No, allow_fail: false, test_type: TestType::Unknown, }, testfn: DynTestFn(Box::new(f)), }; let time_options = if report_time { Some(TestTimeOptions::default()) } else { None }; let test_opts = TestOpts { time_options, ..TestOpts::new() }; let (tx, rx) = channel(); run_test(&test_opts, false, desc, RunStrategy::InProcess, tx, Concurrent::No); let exec_time = rx.recv().unwrap().exec_time; exec_time } #[test] fn test_should_not_report_time() { let exec_time = report_time_test_template(false); assert!(exec_time.is_none()); } #[test] fn test_should_report_time() { let exec_time = report_time_test_template(true); assert!(exec_time.is_some()); } fn time_test_failure_template(test_type: TestType) -> TestResult { fn f() {} let desc = TestDescAndFn { desc: TestDesc { name: StaticTestName("whatever"), ignore: false, should_panic: ShouldPanic::No, allow_fail: false, test_type, }, testfn: DynTestFn(Box::new(f)), }; // `Default` will initialize all the thresholds to 0 milliseconds. let mut time_options = TestTimeOptions::default(); time_options.error_on_excess = true; let test_opts = TestOpts { time_options: Some(time_options), ..TestOpts::new() }; let (tx, rx) = channel(); run_test(&test_opts, false, desc, RunStrategy::InProcess, tx, Concurrent::No); let result = rx.recv().unwrap().result; result } #[test] fn test_error_on_exceed() { let types = [TestType::UnitTest, TestType::IntegrationTest, TestType::DocTest]; for test_type in types.iter() { let result = time_test_failure_template(*test_type); assert_eq!(result, TestResult::TrTimedFail); } // Check that for unknown tests thresholds aren't applied. let result = time_test_failure_template(TestType::Unknown); assert_eq!(result, TestResult::TrOk); } fn typed_test_desc(test_type: TestType) -> TestDesc { TestDesc { name: StaticTestName("whatever"), ignore: false, should_panic: ShouldPanic::No, allow_fail: false, test_type, } } fn test_exec_time(millis: u64) -> TestExecTime { TestExecTime(Duration::from_millis(millis)) } #[test] fn test_time_options_threshold() { let unit = TimeThreshold::new(Duration::from_millis(50), Duration::from_millis(100)); let integration = TimeThreshold::new(Duration::from_millis(500), Duration::from_millis(1000)); let doc = TimeThreshold::new(Duration::from_millis(5000), Duration::from_millis(10000)); let options = TestTimeOptions { error_on_excess: false, colored: false, unit_threshold: unit.clone(), integration_threshold: integration.clone(), doctest_threshold: doc.clone(), }; let test_vector = [ (TestType::UnitTest, unit.warn.as_millis() - 1, false, false), (TestType::UnitTest, unit.warn.as_millis(), true, false), (TestType::UnitTest, unit.critical.as_millis(), true, true), (TestType::IntegrationTest, integration.warn.as_millis() - 1, false, false), (TestType::IntegrationTest, integration.warn.as_millis(), true, false), (TestType::IntegrationTest, integration.critical.as_millis(), true, true), (TestType::DocTest, doc.warn.as_millis() - 1, false, false), (TestType::DocTest, doc.warn.as_millis(), true, false), (TestType::DocTest, doc.critical.as_millis(), true, true), ]; for (test_type, time, expected_warn, expected_critical) in test_vector.iter() { let test_desc = typed_test_desc(*test_type); let exec_time = test_exec_time(*time as u64); assert_eq!(options.is_warn(&test_desc, &exec_time), *expected_warn); assert_eq!(options.is_critical(&test_desc, &exec_time), *expected_critical); } } #[test] fn parse_ignored_flag() { let args = vec!["progname".to_string(), "filter".to_string(), "--ignored".to_string()]; let opts = parse_opts(&args).unwrap().unwrap(); assert_eq!(opts.run_ignored, RunIgnored::Only); } #[test] fn parse_show_output_flag() { let args = vec!["progname".to_string(), "filter".to_string(), "--show-output".to_string()]; let opts = parse_opts(&args).unwrap().unwrap(); assert!(opts.options.display_output); } #[test] fn parse_include_ignored_flag() { let args = vec![ "progname".to_string(), "filter".to_string(), "-Zunstable-options".to_string(), "--include-ignored".to_string(), ]; let opts = parse_opts(&args).unwrap().unwrap(); assert_eq!(opts.run_ignored, RunIgnored::Yes); } #[test] pub fn filter_for_ignored_option() { // When we run ignored tests the test filter should filter out all the // unignored tests and flip the ignore flag on the rest to false let mut opts = TestOpts::new(); opts.run_tests = true; opts.run_ignored = RunIgnored::Only; let tests = one_ignored_one_unignored_test(); let filtered = filter_tests(&opts, tests); assert_eq!(filtered.len(), 1); assert_eq!(filtered[0].desc.name.to_string(), "1"); assert!(!filtered[0].desc.ignore); } #[test] pub fn run_include_ignored_option() { // When we "--include-ignored" tests, the ignore flag should be set to false on // all tests and no test filtered out let mut opts = TestOpts::new(); opts.run_tests = true; opts.run_ignored = RunIgnored::Yes; let tests = one_ignored_one_unignored_test(); let filtered = filter_tests(&opts, tests); assert_eq!(filtered.len(), 2); assert!(!filtered[0].desc.ignore); assert!(!filtered[1].desc.ignore); } #[test] pub fn exclude_should_panic_option() { let mut opts = TestOpts::new(); opts.run_tests = true; opts.exclude_should_panic = true; let mut tests = one_ignored_one_unignored_test(); tests.push(TestDescAndFn { desc: TestDesc { name: StaticTestName("3"), ignore: false, should_panic: ShouldPanic::Yes, allow_fail: false, test_type: TestType::Unknown, }, testfn: DynTestFn(Box::new(move || {})), }); let filtered = filter_tests(&opts, tests); assert_eq!(filtered.len(), 2); assert!(filtered.iter().all(|test| test.desc.should_panic == ShouldPanic::No)); } #[test] pub fn exact_filter_match() { fn tests() -> Vec { vec!["base", "base::test", "base::test1", "base::test2"] .into_iter() .map(|name| TestDescAndFn { desc: TestDesc { name: StaticTestName(name), ignore: false, should_panic: ShouldPanic::No, allow_fail: false, test_type: TestType::Unknown, }, testfn: DynTestFn(Box::new(move || {})), }) .collect() } let substr = filter_tests(&TestOpts { filters: vec!["base".into()], ..TestOpts::new() }, tests()); assert_eq!(substr.len(), 4); let substr = filter_tests(&TestOpts { filters: vec!["bas".into()], ..TestOpts::new() }, tests()); assert_eq!(substr.len(), 4); let substr = filter_tests(&TestOpts { filters: vec!["::test".into()], ..TestOpts::new() }, tests()); assert_eq!(substr.len(), 3); let substr = filter_tests(&TestOpts { filters: vec!["base::test".into()], ..TestOpts::new() }, tests()); assert_eq!(substr.len(), 3); let substr = filter_tests( &TestOpts { filters: vec!["test1".into(), "test2".into()], ..TestOpts::new() }, tests(), ); assert_eq!(substr.len(), 2); let exact = filter_tests( &TestOpts { filters: vec!["base".into()], filter_exact: true, ..TestOpts::new() }, tests(), ); assert_eq!(exact.len(), 1); let exact = filter_tests( &TestOpts { filters: vec!["bas".into()], filter_exact: true, ..TestOpts::new() }, tests(), ); assert_eq!(exact.len(), 0); let exact = filter_tests( &TestOpts { filters: vec!["::test".into()], filter_exact: true, ..TestOpts::new() }, tests(), ); assert_eq!(exact.len(), 0); let exact = filter_tests( &TestOpts { filters: vec!["base::test".into()], filter_exact: true, ..TestOpts::new() }, tests(), ); assert_eq!(exact.len(), 1); let exact = filter_tests( &TestOpts { filters: vec!["base".into(), "base::test".into()], filter_exact: true, ..TestOpts::new() }, tests(), ); assert_eq!(exact.len(), 2); } #[test] pub fn sort_tests() { let mut opts = TestOpts::new(); opts.run_tests = true; let names = vec![ "sha1::test".to_string(), "isize::test_to_str".to_string(), "isize::test_pow".to_string(), "test::do_not_run_ignored_tests".to_string(), "test::ignored_tests_result_in_ignored".to_string(), "test::first_free_arg_should_be_a_filter".to_string(), "test::parse_ignored_flag".to_string(), "test::parse_include_ignored_flag".to_string(), "test::filter_for_ignored_option".to_string(), "test::run_include_ignored_option".to_string(), "test::sort_tests".to_string(), ]; let tests = { fn testfn() {} let mut tests = Vec::new(); for name in &names { let test = TestDescAndFn { desc: TestDesc { name: DynTestName((*name).clone()), ignore: false, should_panic: ShouldPanic::No, allow_fail: false, test_type: TestType::Unknown, }, testfn: DynTestFn(Box::new(testfn)), }; tests.push(test); } tests }; let filtered = filter_tests(&opts, tests); let expected = vec![ "isize::test_pow".to_string(), "isize::test_to_str".to_string(), "sha1::test".to_string(), "test::do_not_run_ignored_tests".to_string(), "test::filter_for_ignored_option".to_string(), "test::first_free_arg_should_be_a_filter".to_string(), "test::ignored_tests_result_in_ignored".to_string(), "test::parse_ignored_flag".to_string(), "test::parse_include_ignored_flag".to_string(), "test::run_include_ignored_option".to_string(), "test::sort_tests".to_string(), ]; for (a, b) in expected.iter().zip(filtered) { assert_eq!(*a, b.desc.name.to_string()); } } #[test] pub fn test_metricmap_compare() { let mut m1 = MetricMap::new(); let mut m2 = MetricMap::new(); m1.insert_metric("in-both-noise", 1000.0, 200.0); m2.insert_metric("in-both-noise", 1100.0, 200.0); m1.insert_metric("in-first-noise", 1000.0, 2.0); m2.insert_metric("in-second-noise", 1000.0, 2.0); m1.insert_metric("in-both-want-downwards-but-regressed", 1000.0, 10.0); m2.insert_metric("in-both-want-downwards-but-regressed", 2000.0, 10.0); m1.insert_metric("in-both-want-downwards-and-improved", 2000.0, 10.0); m2.insert_metric("in-both-want-downwards-and-improved", 1000.0, 10.0); m1.insert_metric("in-both-want-upwards-but-regressed", 2000.0, -10.0); m2.insert_metric("in-both-want-upwards-but-regressed", 1000.0, -10.0); m1.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0); m2.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0); } #[test] pub fn test_bench_once_no_iter() { fn f(_: &mut Bencher) {} bench::run_once(f); } #[test] pub fn test_bench_once_iter() { fn f(b: &mut Bencher) { b.iter(|| {}) } bench::run_once(f); } #[test] pub fn test_bench_no_iter() { fn f(_: &mut Bencher) {} let (tx, rx) = channel(); let desc = TestDesc { name: StaticTestName("f"), ignore: false, should_panic: ShouldPanic::No, allow_fail: false, test_type: TestType::Unknown, }; crate::bench::benchmark(desc, tx, true, f); rx.recv().unwrap(); } #[test] pub fn test_bench_iter() { fn f(b: &mut Bencher) { b.iter(|| {}) } let (tx, rx) = channel(); let desc = TestDesc { name: StaticTestName("f"), ignore: false, should_panic: ShouldPanic::No, allow_fail: false, test_type: TestType::Unknown, }; crate::bench::benchmark(desc, tx, true, f); rx.recv().unwrap(); } #[test] fn should_sort_failures_before_printing_them() { let test_a = TestDesc { name: StaticTestName("a"), ignore: false, should_panic: ShouldPanic::No, allow_fail: false, test_type: TestType::Unknown, }; let test_b = TestDesc { name: StaticTestName("b"), ignore: false, should_panic: ShouldPanic::No, allow_fail: false, test_type: TestType::Unknown, }; let mut out = PrettyFormatter::new(OutputLocation::Raw(Vec::new()), false, 10, false, None); let st = console::ConsoleTestState { log_out: None, total: 0, passed: 0, failed: 0, ignored: 0, allowed_fail: 0, filtered_out: 0, measured: 0, exec_time: None, metrics: MetricMap::new(), failures: vec![(test_b, Vec::new()), (test_a, Vec::new())], options: Options::new(), not_failures: Vec::new(), time_failures: Vec::new(), }; out.write_failures(&st).unwrap(); let s = match out.output_location() { &OutputLocation::Raw(ref m) => String::from_utf8_lossy(&m[..]), &OutputLocation::Pretty(_) => unreachable!(), }; let apos = s.find("a").unwrap(); let bpos = s.find("b").unwrap(); assert!(apos < bpos); } tester-0.9.1/src/time.rs000064400000000000000000000171711046102023000132430ustar 00000000000000//! Module `time` contains everything related to the time measurement of unit tests //! execution. //! The purposes of this module: //! - Check whether test is timed out. //! - Provide helpers for `report-time` and `measure-time` options. //! - Provide newtypes for executions times. use std::env; use std::fmt; use std::str::FromStr; use std::time::{Duration, Instant}; use super::types::{TestDesc, TestType}; pub const TEST_WARN_TIMEOUT_S: u64 = 60; /// This small module contains constants used by `report-time` option. /// Those constants values will be used if corresponding environment variables are not set. /// /// To override values for unit-tests, use a constant `RUST_TEST_TIME_UNIT`, /// To override values for integration tests, use a constant `RUST_TEST_TIME_INTEGRATION`, /// To override values for doctests, use a constant `RUST_TEST_TIME_DOCTEST`. /// /// Example of the expected format is `RUST_TEST_TIME_xxx=100,200`, where 100 means /// warn time, and 200 means critical time. pub mod time_constants { use super::TEST_WARN_TIMEOUT_S; use std::time::Duration; /// Environment variable for overriding default threshold for unit-tests. pub const UNIT_ENV_NAME: &str = "RUST_TEST_TIME_UNIT"; // Unit tests are supposed to be really quick. pub const UNIT_WARN: Duration = Duration::from_millis(50); pub const UNIT_CRITICAL: Duration = Duration::from_millis(100); /// Environment variable for overriding default threshold for unit-tests. pub const INTEGRATION_ENV_NAME: &str = "RUST_TEST_TIME_INTEGRATION"; // Integration tests may have a lot of work, so they can take longer to execute. pub const INTEGRATION_WARN: Duration = Duration::from_millis(500); pub const INTEGRATION_CRITICAL: Duration = Duration::from_millis(1000); /// Environment variable for overriding default threshold for unit-tests. pub const DOCTEST_ENV_NAME: &str = "RUST_TEST_TIME_DOCTEST"; // Doctests are similar to integration tests, because they can include a lot of // initialization code. pub const DOCTEST_WARN: Duration = INTEGRATION_WARN; pub const DOCTEST_CRITICAL: Duration = INTEGRATION_CRITICAL; // Do not suppose anything about unknown tests, base limits on the // `TEST_WARN_TIMEOUT_S` constant. pub const UNKNOWN_WARN: Duration = Duration::from_secs(TEST_WARN_TIMEOUT_S); pub const UNKNOWN_CRITICAL: Duration = Duration::from_secs(TEST_WARN_TIMEOUT_S * 2); } /// Returns an `Instance` object denoting when the test should be considered /// timed out. pub fn get_default_test_timeout() -> Instant { Instant::now() + Duration::from_secs(TEST_WARN_TIMEOUT_S) } /// The measured execution time of a unit test. #[derive(Debug, Clone, PartialEq)] pub struct TestExecTime(pub Duration); impl fmt::Display for TestExecTime { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{:.3}s", self.0.as_secs_f64()) } } /// The measured execution time of the whole test suite. #[derive(Debug, Clone, Default, PartialEq)] pub struct TestSuiteExecTime(pub Duration); impl fmt::Display for TestSuiteExecTime { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{:.2}s", self.0.as_secs_f64()) } } /// Structure denoting time limits for test execution. #[derive(Copy, Clone, Debug, Default, PartialEq, Eq)] pub struct TimeThreshold { pub warn: Duration, pub critical: Duration, } impl TimeThreshold { /// Creates a new `TimeThreshold` instance with provided durations. pub fn new(warn: Duration, critical: Duration) -> Self { Self { warn, critical } } /// Attempts to create a `TimeThreshold` instance with values obtained /// from the environment variable, and returns `None` if the variable /// is not set. /// Environment variable format is expected to match `\d+,\d+`. /// /// # Panics /// /// Panics if variable with provided name is set but contains inappropriate /// value. pub fn from_env_var(env_var_name: &str) -> Option { let durations_str = env::var(env_var_name).ok()?; // Split string into 2 substrings by comma and try to parse numbers. let mut durations = durations_str.splitn(2, ',').map(|v| { u64::from_str(v).unwrap_or_else(|_| { panic!( "Duration value in variable {} is expected to be a number, but got {}", env_var_name, v ) }) }); // Callback to be called if the environment variable has unexpected structure. let panic_on_incorrect_value = || { panic!( "Duration variable {} expected to have 2 numbers separated by comma, but got {}", env_var_name, durations_str ); }; let (warn, critical) = ( durations.next().unwrap_or_else(panic_on_incorrect_value), durations.next().unwrap_or_else(panic_on_incorrect_value), ); if warn > critical { panic!("Test execution warn time should be less or equal to the critical time"); } Some(Self::new(Duration::from_millis(warn), Duration::from_millis(critical))) } } /// Structure with parameters for calculating test execution time. #[derive(Copy, Clone, Debug, Default, PartialEq, Eq)] pub struct TestTimeOptions { /// Denotes if the test critical execution time limit excess should be considered /// a test failure. pub error_on_excess: bool, pub colored: bool, pub unit_threshold: TimeThreshold, pub integration_threshold: TimeThreshold, pub doctest_threshold: TimeThreshold, } impl TestTimeOptions { pub fn new_from_env(error_on_excess: bool, colored: bool) -> Self { let unit_threshold = TimeThreshold::from_env_var(time_constants::UNIT_ENV_NAME) .unwrap_or_else(Self::default_unit); let integration_threshold = TimeThreshold::from_env_var(time_constants::INTEGRATION_ENV_NAME) .unwrap_or_else(Self::default_integration); let doctest_threshold = TimeThreshold::from_env_var(time_constants::DOCTEST_ENV_NAME) .unwrap_or_else(Self::default_doctest); Self { error_on_excess, colored, unit_threshold, integration_threshold, doctest_threshold } } pub fn is_warn(&self, test: &TestDesc, exec_time: &TestExecTime) -> bool { exec_time.0 >= self.warn_time(test) } pub fn is_critical(&self, test: &TestDesc, exec_time: &TestExecTime) -> bool { exec_time.0 >= self.critical_time(test) } fn warn_time(&self, test: &TestDesc) -> Duration { match test.test_type { TestType::UnitTest => self.unit_threshold.warn, TestType::IntegrationTest => self.integration_threshold.warn, TestType::DocTest => self.doctest_threshold.warn, TestType::Unknown => time_constants::UNKNOWN_WARN, } } fn critical_time(&self, test: &TestDesc) -> Duration { match test.test_type { TestType::UnitTest => self.unit_threshold.critical, TestType::IntegrationTest => self.integration_threshold.critical, TestType::DocTest => self.doctest_threshold.critical, TestType::Unknown => time_constants::UNKNOWN_CRITICAL, } } fn default_unit() -> TimeThreshold { TimeThreshold::new(time_constants::UNIT_WARN, time_constants::UNIT_CRITICAL) } fn default_integration() -> TimeThreshold { TimeThreshold::new(time_constants::INTEGRATION_WARN, time_constants::INTEGRATION_CRITICAL) } fn default_doctest() -> TimeThreshold { TimeThreshold::new(time_constants::DOCTEST_WARN, time_constants::DOCTEST_CRITICAL) } } tester-0.9.1/src/types.rs000064400000000000000000000101751046102023000134460ustar 00000000000000//! Common types used by `libtest`. use std::borrow::Cow; use std::fmt; use super::bench::Bencher; use super::options; pub use NamePadding::*; pub use TestFn::*; pub use TestName::*; /// Type of the test according to the [rust book](https://doc.rust-lang.org/cargo/guide/tests.html) /// conventions. #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] pub enum TestType { /// Unit-tests are expected to be in the `src` folder of the crate. UnitTest, /// Integration-style tests are expected to be in the `tests` folder of the crate. IntegrationTest, /// Doctests are created by the `librustdoc` manually, so it's a different type of test. DocTest, /// Tests for the sources that don't follow the project layout convention /// (e.g. tests in raw `main.rs` compiled by calling `rustc --test` directly). Unknown, } #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] pub enum NamePadding { PadNone, PadOnRight, } // The name of a test. By convention this follows the rules for rust // paths; i.e., it should be a series of identifiers separated by double // colons. This way if some test runner wants to arrange the tests // hierarchically it may. #[derive(Clone, PartialEq, Eq, Hash, Debug)] pub enum TestName { StaticTestName(&'static str), DynTestName(String), AlignedTestName(Cow<'static, str>, NamePadding), } impl TestName { pub fn as_slice(&self) -> &str { match *self { StaticTestName(s) => s, DynTestName(ref s) => s, AlignedTestName(ref s, _) => &*s, } } pub fn padding(&self) -> NamePadding { match self { &AlignedTestName(_, p) => p, _ => PadNone, } } pub fn with_padding(&self, padding: NamePadding) -> TestName { let name = match *self { TestName::StaticTestName(name) => Cow::Borrowed(name), TestName::DynTestName(ref name) => Cow::Owned(name.clone()), TestName::AlignedTestName(ref name, _) => name.clone(), }; TestName::AlignedTestName(name, padding) } } impl fmt::Display for TestName { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fmt::Display::fmt(self.as_slice(), f) } } /// Represents a benchmark function. pub trait TDynBenchFn: Send { fn run(&self, harness: &mut Bencher); } // A function that runs a test. If the function returns successfully, // the test succeeds; if the function panics then the test fails. We // may need to come up with a more clever definition of test in order // to support isolation of tests into threads. pub enum TestFn { StaticTestFn(fn()), StaticBenchFn(fn(&mut Bencher)), DynTestFn(Box), DynBenchFn(Box), } impl TestFn { pub fn padding(&self) -> NamePadding { match *self { StaticTestFn(..) => PadNone, StaticBenchFn(..) => PadOnRight, DynTestFn(..) => PadNone, DynBenchFn(..) => PadOnRight, } } } impl fmt::Debug for TestFn { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str(match *self { StaticTestFn(..) => "StaticTestFn(..)", StaticBenchFn(..) => "StaticBenchFn(..)", DynTestFn(..) => "DynTestFn(..)", DynBenchFn(..) => "DynBenchFn(..)", }) } } // The definition of a single test. A test runner will run a list of // these. #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub struct TestDesc { pub name: TestName, pub ignore: bool, pub should_panic: options::ShouldPanic, pub allow_fail: bool, pub test_type: TestType, } impl TestDesc { pub fn padded_name(&self, column_count: usize, align: NamePadding) -> String { let mut name = String::from(self.name.as_slice()); let fill = column_count.saturating_sub(name.len()); let pad = " ".repeat(fill); match align { PadNone => name, PadOnRight => { name.push_str(&pad); name } } } } #[derive(Debug)] pub struct TestDescAndFn { pub desc: TestDesc, pub testfn: TestFn, }