pax_global_header00006660000000000000000000000064146672422340014524gustar00rootroot0000000000000052 comment=d5e0884805c9bc1baf97a47db117f34982f2d247 benchmark-ips-2.14.0/000077500000000000000000000000001466724223400143335ustar00rootroot00000000000000benchmark-ips-2.14.0/.autotest000066400000000000000000000007401466724223400162050ustar00rootroot00000000000000# -*- ruby -*- require 'autotest/restart' # Autotest.add_hook :initialize do |at| # at.extra_files << "../some/external/dependency.rb" # # at.libs << ":../some/external" # # at.add_exception 'vendor' # # at.add_mapping(/dependency.rb/) do |f, _| # at.files_matching(/test_.*rb$/) # end # # %w(TestA TestB).each do |klass| # at.extra_class_map[klass] = "test/test_misc.rb" # end # end # Autotest.add_hook :run_command do |at| # system "rake build" # end benchmark-ips-2.14.0/.github/000077500000000000000000000000001466724223400156735ustar00rootroot00000000000000benchmark-ips-2.14.0/.github/dependabot.yml000066400000000000000000000001651466724223400205250ustar00rootroot00000000000000version: 2 updates: - package-ecosystem: "github-actions" directory: "/" schedule: interval: "daily" benchmark-ips-2.14.0/.github/workflows/000077500000000000000000000000001466724223400177305ustar00rootroot00000000000000benchmark-ips-2.14.0/.github/workflows/ci.yml000066400000000000000000000006241466724223400210500ustar00rootroot00000000000000name: CI on: [push, pull_request] jobs: test: strategy: fail-fast: false matrix: ruby: [2.3, 2.4, 2.5, 2.6, 2.7, '3.0', 3.1, 3.2, head, jruby, truffleruby] runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 - uses: ruby/setup-ruby@v1 with: ruby-version: ${{ matrix.ruby }} bundler-cache: true - run: bundle exec rake benchmark-ips-2.14.0/.gitignore000066400000000000000000000000441466724223400163210ustar00rootroot00000000000000log tmp pkg doc Gemfile.lock vendor benchmark-ips-2.14.0/Gemfile000066400000000000000000000002001466724223400156160ustar00rootroot00000000000000source 'https://rubygems.org' gem 'rake', '>= 10.5' gem 'minitest', :group => :test if RUBY_VERSION < "1.9" gem 'json' end benchmark-ips-2.14.0/History.md000066400000000000000000000141721466724223400163230ustar00rootroot00000000000000### 2.14.0 / 2024-09-08 * Feature * Adds Benchmark::IPS.quick_compare. * Adds absolute duration of each iteration to compare output. ### 2.13.0 / 2023-12-12 * Feature * Prints ruby version at top of report. ### 2.12.0 / 2023-03-08 * Feature * Adds MultiReport and ability report to a stream rather than a string. ### 2.11.0 / 2023-02-15 * Feature * Adds .json! method to the ips block argument, allowing you to print the output as JSON to a file or STDOUT. ### 2.10.0 / 2022-02-17 * Feature * Adds :order option to compare, with new `:baseline` order which compares all variations against the first option benchmarked. ### 2.9.3 / 2022-01-25 * Bug fix * All warmups and benchmarks must run at least once ### 2.9.2 / 2021-10-10 * Bug fix * Fix a problem with certain configs of quiet mode ### 2.9.1 / 2021-05-24 * Bug fix * Include all files in gem ### 2.9.0 / 2021-05-21 * Features * Suite can now be set via an accessor * Default SHARE_URL is now `ips.fastruby.io`, operated by Ombu Labs. ### 2.8.4 / 2020-12-03 * Bug fix * Fixed hold! when results file does not exist. ### 2.8.3 / 2020-08-28 * Bug fix * Fixed inaccuracy caused by integer overflows. ### 2.8.2 / 2020-05-04 * Bug fix * Fixed problems with Manifest.txt. * Empty interim results files are ignored. ### 2.8.0 / 2020-05-01 * Feature * Allow running with empty ips block. * Added save! method for saving interim results. * Run more than just 1 cycle during warmup to reduce overhead. * Optimized Job::Entry hot-path for fairer results on JRuby/TruffleRuby. * Bug fix * Removed the warmup section if set to 0. * Added some RDoc docs. * Added some examples in examples/ ### 2.7.2 / 2016-08-18 * 1 bug fix: * Restore old accessors. Fixes #76 ### 2.7.1 / 2016-08-08 Add missing files ### 2.7.0 / 2016-08-05 * 1 minor features: * Add support for confidence intervals * 1 bug fixes: * Cleanup a few coding patterns * 2 doc fixes: * Add infos about benchark.fyi to Readme * Remove ancient releases * 3 merged PRs: * Merge pull request #65 from kbrock/fixup_inject * Merge pull request #67 from benoittgt/master * Merge pull request #69 from chrisseaton/kalibera-confidence-intervals ### MISSING 2.6.0 and 2.6.1 ### 2.5.0 / 2016-02-14 * 1 minor feature: * Add iterations option. * 1 bug fixes: * Don't tell people something is slower if it's within the error. * 2 merged PRs: * Merge pull request #58 from chrisseaton/iterations * Merge pull request #60 from chrisseaton/significance ### 2.4.1 / 2016-02-12 * 1 bug fix: * Add missing files to gem ### 2.4.0 / 2016-02-12 * 1 minor features * Add support for hold! and independent invocations. * 6 bug fixes * Separate messages for warming up and calculating. * Tighten timing loop. * Pass simple types into Job#create_report * More concise sorting * Fix runtime comparison * Use runtime if ips is not available * 5 doc fixes * Fix typo unsed --> used * Better document Report::Entry * Fix some typos in docs * Don't calculate mean 2 times * Add more tolerance to tests * 13 merged PRs * Merge pull request #44 from kbrock/job_extract * Merge pull request #45 from kbrock/runtime_only * Merge pull request #47 from kbrock/use_avg * Merge pull request #46 from kbrock/report_stdout * Merge pull request #48 from bquorning/fix-label-for-runtime-comparison * Merge pull request #50 from tjschuck/fix_typo * Merge pull request #51 from bquorning/all-reports-respond-to-ips * Merge pull request #52 from kbrock/document_reports * Merge pull request #53 from kbrock/interface_create_report * Merge pull request #54 from PragTob/patch-2 * Merge pull request #55 from chrisseaton/messages * Merge pull request #56 from chrisseaton/independence * Merge pull request #57 from chrisseaton/tighten-loop ### 2.3.0 / 2015-07-20 * 2 minor features: * Support keyword arguments * Allow any datatype for labels (use #to_s conversion) * 1 doc/test changes: * Newer Travis for 1.8.7, ree, and 2.2.2 * 3 PRs merged: * Merge pull request #41 from kbrock/kwargs-support * Merge pull request #42 from kbrock/newer_travis * Merge pull request #43 from kbrock/non_to_s_labels ### 2.2.0 / 2015-05-09 * 1 minor features: * Fix quiet mode * Allow passing a custom suite via config * Silent a job if a suite was passed and is quiet * Export report to json file. * Accept symbol as report's argument. * 2 doc fixes: * Squish duplicate `to` in README * Update copyright to 2015. [ci skip] * 9 PRs merged: * Merge pull request #37 from splattael/patch-1 * Merge pull request #36 from kirs/quiet-mode * Merge pull request #35 from JuanitoFatas/doc/suite * Merge pull request #34 from splattael/config-suite * Merge pull request #33 from splattael/suite-quiet * Merge pull request #32 from O-I/remove-gemfile-lock * Merge pull request #31 from JuanitoFatas/doc/bump-copyright-year * Merge pull request #29 from JuanitoFatas/feature/json-export * Merge pull request #26 from JuanitoFatas/feature/takes-symbol-as-report-parameter ### 2.1.1 / 2015-01-12 * 1 minor fix: * Don't send label through printf so that % work directly * 1 documentation changes: * Use HEREDOC and wrap at 80 chars for example result description * 1 usage fix: * Add gemspec for use via bundler git * 1 PR merged: * Merge pull request #24 from zzak/simple-format-result-description ### 2.1.0 / 2014-11-10 * Documentation changes: * Many documentation fixes by Juanito Fatas! * Minor readme fix by Will Leinweber * 2 minor features: * Displaying the total runtime for a job is suppressed unless interesting * Formatting of large values improved (human vs raw mode) * Contributed by Charles Oliver Nutter ### 2.0.0 / 2014-06-18 * The 'Davy Stevenson' release! * Codename: Springtime Hummingbird Dance * Big API refactoring so the internal bits are easier to use * Bump to 2.0 because return types changed to make the API better * Contributors added: * Davy Stevenson * Juanito Fatas * Benoit Daloze * Matias * Tony Arcieri * Vipul A M * Zachary Scott * schneems (Richard Schneeman) ### 1.0.0 / 2012-03-23 * 1 major enhancement * Birthday! benchmark-ips-2.14.0/LICENSE000066400000000000000000000020401466724223400153340ustar00rootroot00000000000000Copyright (c) 2015 Evan Phoenix Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. benchmark-ips-2.14.0/README.md000066400000000000000000000211451466724223400156150ustar00rootroot00000000000000# benchmark-ips * rdoc :: http://rubydoc.info/gems/benchmark-ips * home :: https://github.com/evanphx/benchmark-ips [![Gem Version](https://badge.fury.io/rb/benchmark-ips.svg)](http://badge.fury.io/rb/benchmark-ips) [![Build Status](https://secure.travis-ci.org/evanphx/benchmark-ips.svg)](http://travis-ci.org/evanphx/benchmark-ips) [![Inline docs](http://inch-ci.org/github/evanphx/benchmark-ips.svg)](http://inch-ci.org/github/evanphx/benchmark-ips) * https://github.com/evanphx/benchmark-ips ## DESCRIPTION: An iterations per second enhancement to Benchmark. ## FEATURES/PROBLEMS: * benchmark/ips - benchmarks a blocks iterations/second. For short snippits of code, ips automatically figures out how many times to run the code to get interesting data. No more guessing at random iteration counts! ## SYNOPSIS: ```ruby require 'benchmark/ips' Benchmark.ips do |x| # Configure the number of seconds used during # the warmup phase (default 2) and calculation phase (default 5) x.config(warmup: 2, time: 5) # Typical mode, runs the block as many times as it can x.report("addition") { 1 + 2 } # To reduce overhead, the number of iterations is passed in # and the block must run the code the specific number of times. # Used for when the workload is very small and any overhead # introduces incorrectable errors. x.report("addition2") do |times| i = 0 while i < times i += 1 1 + 2 end end # To reduce overhead even more, grafts the code given into # the loop that performs the iterations internally to reduce # overhead. Typically not needed, use the |times| form instead. x.report("addition3", "1 + 2") # Really long labels should be formatted correctly x.report("addition-test-long-label") { 1 + 2 } # Compare the iterations per second of the various reports! x.compare! end ``` This will generate the following report: ``` Warming up -------------------------------------- addition 3.572M i/100ms addition2 3.672M i/100ms addition3 3.677M i/100ms addition-test-long-label 3.511M i/100ms Calculating ------------------------------------- addition 36.209M (± 2.8%) i/s (27.62 ns/i) - 182.253M in 5.037433s addition2 36.552M (± 7.8%) i/s (27.36 ns/i) - 183.541M in 5.069987s addition3 36.639M (± 4.8%) i/s (27.29 ns/i) - 182.994M in 5.009234s addition-test-long-label 36.164M (± 5.8%) i/s (27.65 ns/i) - 181.312M in 5.038364s Comparison: addition2: 36558904.5 i/s addition3: 36359284.0 i/s - same-ish: difference falls within error addition-test-long-label: 36135428.8 i/s - same-ish: difference falls within error addition: 34666931.3 i/s - same-ish: difference falls within error ``` Benchmark/ips will report the number of iterations per second for a given block of code. When analyzing the results, notice the percent of [standard deviation](http://en.wikipedia.org/wiki/Standard\_deviation) which tells us how spread out our measurements are from the average. A high standard deviation could indicate the results having too much variability. One benefit to using this method is benchmark-ips automatically determines the data points for testing our code, so we can focus on the results instead of guessing iteration counts as we do with the traditional Benchmark library. You can also use `ips_quick` to save a few lines of code: ```ruby Benchmark.ips_quick(:upcase, :downcase, on: "hello") # runs a suite comparing "hello".upcase and "hello".downcase def first; MyJob.perform(1); end def second; MyJobOptimized.perform(1); end Benchmark.ips_quick(:first, :second) # compares :first and :second ``` This adds a very small amount of overhead, which may be significant (i.e. ips_quick will understate the difference) if you're microbenchmarking things that can do over 1 million iterations per second. In that case, you're better off using the full format. ### Custom Suite Pass a custom suite to disable garbage collection during benchmark: ```ruby require 'benchmark/ips' # Enable and start GC before each job run. Disable GC afterwards. # # Inspired by https://www.omniref.com/ruby/2.2.1/symbols/Benchmark/bm?#annotation=4095926&line=182 class GCSuite def warming(*) run_gc end def running(*) run_gc end def warmup_stats(*) end def add_report(*) end private def run_gc GC.enable GC.start GC.disable end end suite = GCSuite.new Benchmark.ips do |x| x.config(:suite => suite) x.report("job1") { ... } x.report("job2") { ... } end ``` ### Independent benchmarking If you are comparing multiple implementations of a piece of code you may want to benchmark them in separate invocations of Ruby so that the measurements are independent of each other. You can do this with the `hold!` command. ```ruby Benchmark.ips do |x| # Hold results between multiple invocations of Ruby x.hold! 'filename' end ``` This will run only one benchmarks each time you run the command, storing results in the specified file. The file is deleted when all results have been gathered and the report is shown. Alternatively, if you prefer a different approach, the `save!` command is available. Examples for [hold!](examples/hold.rb) and [save!](examples/save.rb) are available in the `examples/` directory. ### Multiple iterations In some cases you may want to run multiple iterations of the warmup and calculation stages and take only the last result for comparison. This is useful if you are benchmarking with an implementation of Ruby that optimizes using tracing or on-stack-replacement, because to those implementations the calculation phase may appear as new, unoptimized code. You can do this with the `iterations` option, which by default is `1`. The total time spent will then be `iterations * warmup + iterations * time` seconds. ```ruby Benchmark.ips do |x| x.config(:iterations => 3) # or x.iterations = 3 end ``` ### Online sharing If you want to quickly share your benchmark result with others, run you benchmark with `SHARE=1` argument. For example: `SHARE=1 ruby my_benchmark.rb`. Result will be sent to [benchmark.fyi](https://ips.fastruby.io/) and benchmark-ips will display the link to share the benchmark's result. If you want to run your own instance of [benchmark.fyi](https://github.com/evanphx/benchmark.fyi) and share it to that instance, you can do this: `SHARE_URL=https://ips.example.com ruby my_benchmark.rb` ### Advanced Statistics By default, the margin of error shown is plus-minus one standard deviation. If a more advanced statistical test is wanted, a bootstrap confidence interval can be calculated instead. A bootstrap confidence interval has the advantages of arguably being more mathematically sound for this application than a standard deviation, it additionally produces an error for relative slowdowns, which the standard deviation does not, and it is arguably more intuitive and actionable. When a bootstrap confidence interval is used, a median of the interval is used rather than the mean of the samples, which is what you get with the default standard deviation. The bootstrap confidence interval used is the one described by Tomas Kalibera. Note that for this technique to be valid your benchmark should have reached a non-periodic steady state with statistically independent samples (it should have warmed up) by the time measurements start. Using a bootstrap confidence internal requires that the 'kalibera' gem is installed separately. This gem is not a formal dependency, as by default it is not needed. ``` gem install kalibera ``` ```ruby Benchmark.ips do |x| # The default is :stats => :sd, which doesn't have a configurable confidence x.config(:stats => :bootstrap, :confidence => 95) # or x.stats = :bootstrap x.confidence = 95 # confidence is 95% by default, so it can be omitted end ``` ### Output as JSON You can generate output in JSON. If you want to write JSON to a file, pass filename to `json!` method: ```ruby Benchmark.ips do |x| x.report("some report") { } x.json! 'filename.json' end ``` If you want to write JSON to STDOUT, pass `STDOUT` to `json!` method and set `quiet = true` before `json!`: ```ruby Benchmark.ips do |x| x.report("some report") { } x.quiet = true x.json! STDOUT end ``` This is useful when the output from `benchmark-ips` becomes an input of other tools via stdin. ## REQUIREMENTS: * None! ## INSTALL: $ gem install benchmark-ips ## DEVELOPERS: After checking out the source, run: $ rake newb This task will install any missing dependencies, run the tests/specs, and generate the RDoc. benchmark-ips-2.14.0/Rakefile000066400000000000000000000004411466724223400157770ustar00rootroot00000000000000# -*- ruby -*- require "bundler/setup" require "rake/testtask" require "rubygems/package_task" require "bundler/gem_tasks" gemspec = Gem::Specification.load("benchmark-ips.gemspec") Gem::PackageTask.new(gemspec).define Rake::TestTask.new(:test) task default: :test # vim: syntax=ruby benchmark-ips-2.14.0/benchmark-ips.gemspec000066400000000000000000000026611466724223400204300ustar00rootroot00000000000000# -*- encoding: utf-8 -*- # stub: benchmark-ips 2.1.0 ruby lib d = File.read(File.expand_path("../lib/benchmark/ips.rb", __FILE__)) if d =~ /VERSION = "(\d+\.\d+\.\d+)"/ version = $1 else version = "0.0.1" end Gem::Specification.new do |s| s.name = "benchmark-ips" s.version = version s.required_rubygems_version = Gem::Requirement.new(">= 0") if s.respond_to? :required_rubygems_version= s.require_paths = ["lib"] s.authors = ["Evan Phoenix"] s.date = "2015-01-12" s.description = "A iterations per second enhancement to Benchmark." s.email = ["evan@phx.io"] s.extra_rdoc_files = ["History.md", "LICENSE", "README.md"] s.files = `git ls-files -- examples lib`.split("\n") + %w[History.md LICENSE README.md] s.homepage = "https://github.com/evanphx/benchmark-ips" s.licenses = ["MIT"] s.rdoc_options = ["--main", "README.md"] s.rubygems_version = "2.2.2" s.summary = "A iterations per second enhancement to Benchmark." if s.respond_to? :specification_version then s.specification_version = 4 if Gem::Version.new(Gem::VERSION) >= Gem::Version.new('1.2.0') then s.add_development_dependency(%q, ["~> 5.4"]) s.add_development_dependency(%q, ["~> 4.0"]) else s.add_dependency(%q, ["~> 5.4"]) s.add_dependency(%q, ["~> 4.0"]) end else s.add_dependency(%q, ["~> 5.4"]) s.add_dependency(%q, ["~> 4.0"]) end end benchmark-ips-2.14.0/examples/000077500000000000000000000000001466724223400161515ustar00rootroot00000000000000benchmark-ips-2.14.0/examples/advanced.rb000066400000000000000000000005271466724223400202470ustar00rootroot00000000000000#!/usr/bin/env ruby require 'benchmark/ips' Benchmark.ips do |x| # Use bootstrap confidence intervals x.stats = :bootstrap # Set confidence to 95% x.confidence = 95 # Run multiple iterations for better warmup x.iterations = 3 x.report("mul") { 2 * 2 * 2 * 2 * 2 * 2 * 2 * 2 } x.report("pow") { 2 ** 8 } x.compare! end benchmark-ips-2.14.0/examples/hold.rb000066400000000000000000000023361466724223400174300ustar00rootroot00000000000000#!/usr/bin/env ruby # example to explain hold! usage https://github.com/evanphx/benchmark-ips/issues/85 # The hold! feature expects to be run twice, generally with different Rubys. # hold! can also be used to compare modules changes which impact the run time # RUN_1: ruby examples/hold.rb # Warming up -------------------------------------- # without 172.168k i/100ms # Calculating ------------------------------------- # without 2.656M (± 3.3%) i/s - 13.429M in 5.062098s # # RUN_2: WITH_MODULE=true ruby examples/hold.rb # Warming up -------------------------------------- # with 92.087k i/100ms # Calculating ------------------------------------- # with 1.158M (± 1.4%) i/s - 5.801M in 5.010084s # # Comparison: # without: 2464721.3 i/s # with: 1158179.6 i/s - 2.13x slower require 'benchmark/ips' Benchmark.ips do |x| x.report('without') do 'Bruce'.inspect end if ENV['WITH_MODULE'] == 'true' class String def inspect result = %w[Bruce Wayne is Batman] result.join(' ') end end end x.report('with') do 'Bruce'.inspect end x.hold! 'temp_results' x.compare! end benchmark-ips-2.14.0/examples/quick.rb000066400000000000000000000002771466724223400176200ustar00rootroot00000000000000#!/usr/bin/env ruby require 'benchmark/ips' def add 1 + 1 end def sub 2 - 1 end Benchmark.ips_quick(:add, :sub, warmup: 1, time: 1) h = {} Benchmark.ips_quick(:size, :empty?, on: h)benchmark-ips-2.14.0/examples/save.rb000066400000000000000000000027461466724223400174450ustar00rootroot00000000000000#!/usr/bin/env ruby # example to explain save! # The save! feature expects to be run twice, generally with different Rubys. # save! can also be used to compare modules changes which impact the run time # # If you're comparing ruby versions, Just use the version in the label # # x.report("ruby #{RUBY_VERSION}") { 'Bruce'.inspect } # # Or use a hash # # x.report("version" => RUBY_VERSION, "method" => 'bruce') { 'Bruce'.inspect } # # RUN_1: SAVE_FILE='run1.out' ruby examples/hold.rb # Warming up -------------------------------------- # without 172.168k i/100ms # Calculating ------------------------------------- # without 2.656M (± 3.3%) i/s - 13.429M in 5.062098s # # RUN_2: SAVE_FILE='run1.out' WITH_MODULE=true ruby examples/hold.rb # Warming up -------------------------------------- # with 92.087k i/100ms # Calculating ------------------------------------- # with 1.158M (± 1.4%) i/s - 5.801M in 5.010084s # # Comparison: # without: 2464721.3 i/s # with: 1158179.6 i/s - 2.13x slower # CLEANUP: rm run1.out require 'benchmark/ips' Benchmark.ips do |x| x.report(ENV['WITH_MODULE'] == 'true' ? 'with' : 'without') do 'Bruce'.inspect end if ENV['WITH_MODULE'] == 'true' class String def inspect result = %w[Bruce Wayne is Batman] result.join(' ') end end end x.save! ENV['SAVE_FILE'] if ENV['SAVE_FILE'] x.compare! end benchmark-ips-2.14.0/examples/simple.rb000077500000000000000000000026351466724223400200000ustar00rootroot00000000000000#!/usr/bin/env ruby require 'benchmark/ips' Benchmark.ips do |x| # Configure the number of seconds used during # the warmup phase and calculation phase x.config(:time => 5, :warmup => 2) # These parameters can also be configured this way x.time = 5 x.warmup = 2 # Typical mode, runs the block as many times as it can x.report("addition") { 1 + 2 } # To reduce overhead, the number of iterations is passed in # and the block must run the code the specific number of times. # Used for when the workload is very small and any overhead # introduces incorrectable errors. x.report(:addition2) do |times| i = 0 while i < times 1 + 2 i += 1 end end # To reduce overhead even more, grafts the code given into # the loop that performs the iterations internally to reduce # overhead. Typically not needed, use the |times| form instead. x.report("addition3", "1 + 2") # Really long labels should be formatted correctly x.report("addition-test-long-label") { 1 + 2 } x.compare! end puts <<-EOD Typical results will show addition2 & addition3 to be the most performant, and they should perform reasonably similarly. You should see addition and addition-test-long-label to perform very similarly to each other (as they are running the same test, just with different labels), and they should both run in the neighborhood of 3.5 times slower than addition2 and addition3." EOD benchmark-ips-2.14.0/lib/000077500000000000000000000000001466724223400151015ustar00rootroot00000000000000benchmark-ips-2.14.0/lib/benchmark/000077500000000000000000000000001466724223400170335ustar00rootroot00000000000000benchmark-ips-2.14.0/lib/benchmark/compare.rb000066400000000000000000000071241466724223400210120ustar00rootroot00000000000000# encoding: utf-8 module Benchmark # Functionality of performaing comparison between reports. # # Usage: # # Add +x.compare!+ to perform comparison between reports. # # Example: # > Benchmark.ips do |x| # x.report('Reduce using tag') { [*1..10].reduce(:+) } # x.report('Reduce using to_proc') { [*1..10].reduce(&:+) } # x.compare! # end # # Calculating ------------------------------------- # Reduce using tag 19216 i/100ms # Reduce using to_proc 17437 i/100ms # ------------------------------------------------- # Reduce using tag 278950.0 (±8.5%) i/s - 1402768 in 5.065112s # Reduce using to_proc 247295.4 (±8.0%) i/s - 1238027 in 5.037299s # # Comparison: # Reduce using tag: 278950.0 i/s # Reduce using to_proc: 247295.4 i/s - 1.13x slower # # Besides regular Calculating report, this will also indicates which one is slower. # # +x.compare!+ also takes an +order: :baseline+ option. # # Example: # > Benchmark.ips do |x| # x.report('Reduce using block') { [*1..10].reduce { |sum, n| sum + n } } # x.report('Reduce using tag') { [*1..10].reduce(:+) } # x.report('Reduce using to_proc') { [*1..10].reduce(&:+) } # x.compare!(order: :baseline) # end # # Calculating ------------------------------------- # Reduce using block 886.202k (± 2.2%) i/s - 4.521M in 5.103774s # Reduce using tag 1.821M (± 1.6%) i/s - 9.111M in 5.004183s # Reduce using to_proc 895.948k (± 1.6%) i/s - 4.528M in 5.055368s # # Comparison: # Reduce using block: 886202.5 i/s # Reduce using tag: 1821055.0 i/s - 2.05x (± 0.00) faster # Reduce using to_proc: 895948.1 i/s - same-ish: difference falls within error # # The first report is considered the baseline against which other reports are compared. module Compare # Compare between reports, prints out facts of each report: # runtime, comparative speed difference. # @param entries [Array] Reports to compare. def compare(*entries, order: :fastest) return if entries.size < 2 case order when :baseline baseline = entries.shift sorted = entries.sort_by{ |e| e.stats.central_tendency }.reverse when :fastest sorted = entries.sort_by{ |e| e.stats.central_tendency }.reverse baseline = sorted.shift else raise ArgumentError, "Unknown order: #{order.inspect}" end $stdout.puts "\nComparison:" $stdout.printf "%20s: %10.1f i/s\n", baseline.label.to_s, baseline.stats.central_tendency sorted.each do |report| name = report.label.to_s $stdout.printf "%20s: %10.1f i/s - ", name, report.stats.central_tendency if report.stats.overlaps?(baseline.stats) $stdout.print "same-ish: difference falls within error" elsif report.stats.central_tendency > baseline.stats.central_tendency speedup, error = report.stats.speedup(baseline.stats) $stdout.printf "%.2fx ", speedup if error $stdout.printf " (± %.2f)", error end $stdout.print " faster" else slowdown, error = report.stats.slowdown(baseline.stats) $stdout.printf "%.2fx ", slowdown if error $stdout.printf " (± %.2f)", error end $stdout.print " slower" end $stdout.puts end footer = baseline.stats.footer $stdout.puts footer.rjust(40) if footer $stdout.puts end end extend Benchmark::Compare end benchmark-ips-2.14.0/lib/benchmark/ips.rb000066400000000000000000000147671466724223400201720ustar00rootroot00000000000000# encoding: utf-8 require 'benchmark/timing' require 'benchmark/compare' require 'benchmark/ips/stats/stats_metric' require 'benchmark/ips/stats/sd' require 'benchmark/ips/stats/bootstrap' require 'benchmark/ips/report' require 'benchmark/ips/job/entry' require 'benchmark/ips/job/stream_report' require 'benchmark/ips/job/multi_report' require 'benchmark/ips/job' # Performance benchmarking library module Benchmark # Benchmark in iterations per second, no more guessing! # # See Benchmark.ips for documentation on using this gem~ # # @see {https://github.com/evanphx/benchmark-ips} module IPS # Benchmark-ips Gem version. VERSION = "2.14.0" # CODENAME of current version. CODENAME = "Akagi" # Measure code in block, each code's benchmarked result will display in # iteration per second with standard deviation in given time. # @param time [Integer] Specify how long should benchmark your code in seconds. # @param warmup [Integer] Specify how long should Warmup time run in seconds. # @return [Report] def ips(*args) if args[0].is_a?(Hash) time, warmup, quiet = args[0].values_at(:time, :warmup, :quiet) else time, warmup, quiet = args end sync, $stdout.sync = $stdout.sync, true job = Job.new job_opts = {} job_opts[:time] = time unless time.nil? job_opts[:warmup] = warmup unless warmup.nil? job_opts[:quiet] = quiet unless quiet.nil? job.config job_opts yield job job.load_held_results job.run if job.run_single? && job.all_results_have_been_run? job.clear_held_results else job.save_held_results puts '', 'Pausing here -- run Ruby again to measure the next benchmark...' if job.run_single? end $stdout.sync = sync job.run_comparison job.generate_json report = job.full_report if ENV['SHARE'] || ENV['SHARE_URL'] require 'benchmark/ips/share' share = Share.new report, job share.share end report end # Quickly compare multiple methods on the same object. # @param methods [Symbol...] A list of method names (as symbols) to compare. # @param receiver [Object] The object on which to call the methods. Defaults to Kernel. # @param opts [Hash] Additional options for customizing the benchmark. # @option opts [Integer] :warmup The number of seconds to warm up the benchmark. # @option opts [Integer] :time The number of seconds to run the benchmark. # # @example Compare String#upcase and String#downcase # ips_quick(:upcase, :downcase, on: "hello") # # @example Compare two methods you just defined, with a custom warmup. # def add; 1+1; end # def sub; 2-1; end # ips_quick(:add, :sub, warmup: 10) def ips_quick(*methods, on: Kernel, **opts) ips(opts) do |x| x.compare! methods.each do |name| x.report(name) do |iter| iter.times { on.__send__ name } end end end end # Set options for running the benchmarks. # :format => [:human, :raw] # :human format narrows precision and scales results for readability # :raw format displays 6 places of precision and exact iteration counts def self.options @options ||= {:format => :human} end module Helpers SUFFIXES = ['', 'k', 'M', 'B', 'T', 'Q'].freeze def scale(value) scale = (Math.log10(value) / 3).to_i scale = 0 if scale < 0 || scale >= SUFFIXES.size suffix = SUFFIXES[scale] scaled_value = value.to_f / (1000 ** scale) "%10.3f#{suffix}" % scaled_value end module_function :scale def humanize_duration(duration_ns) if duration_ns < 1000 "%.2f ns" % duration_ns elsif duration_ns < 1_000_000 "%.2f μs" % (duration_ns / 1000) elsif duration_ns < 1_000_000_000 "%.2f ms" % (duration_ns / 1_000_000) else "%.2f s" % (duration_ns / 1_000_000_000) end end module_function :humanize_duration end end extend Benchmark::IPS # make ips/ips_quick available as module-level method ## # :singleton-method: ips # # require 'benchmark/ips' # # Benchmark.ips do |x| # # Configure the number of seconds used during # # the warmup phase (default 2) and calculation phase (default 5) # x.config(:time => 5, :warmup => 2) # # # These parameters can also be configured this way # x.time = 5 # x.warmup = 2 # # # Typical mode, runs the block as many times as it can # x.report("addition") { 1 + 2 } # # # To reduce overhead, the number of iterations is passed in # # and the block must run the code the specific number of times. # # Used for when the workload is very small and any overhead # # introduces incorrectable errors. # x.report("addition2") do |times| # i = 0 # while i < times # 1 + 2 # i += 1 # end # end # # # To reduce overhead even more, grafts the code given into # # the loop that performs the iterations internally to reduce # # overhead. Typically not needed, use the |times| form instead. # x.report("addition3", "1 + 2") # # # Really long labels should be formatted correctly # x.report("addition-test-long-label") { 1 + 2 } # # # Compare the iterations per second of the various reports! # x.compare! # end # # This will generate the following report: # # Calculating ------------------------------------- # addition 71.254k i/100ms # addition2 68.658k i/100ms # addition3 83.079k i/100ms # addition-test-long-label # 70.129k i/100ms # ------------------------------------------------- # addition 4.955M (± 8.7%) i/s - 24.155M # addition2 24.011M (± 9.5%) i/s - 114.246M # addition3 23.958M (±10.1%) i/s - 115.064M # addition-test-long-label # 5.014M (± 9.1%) i/s - 24.545M # # Comparison: # addition2: 24011974.8 i/s # addition3: 23958619.8 i/s - 1.00x slower # addition-test-long-label: 5014756.0 i/s - 4.79x slower # addition: 4955278.9 i/s - 4.85x slower # # See also Benchmark::IPS end benchmark-ips-2.14.0/lib/benchmark/ips/000077500000000000000000000000001466724223400176265ustar00rootroot00000000000000benchmark-ips-2.14.0/lib/benchmark/ips/job.rb000066400000000000000000000274431466724223400207370ustar00rootroot00000000000000module Benchmark module IPS # Benchmark jobs. class Job # Microseconds per 100 millisecond. MICROSECONDS_PER_100MS = 100_000 # Microseconds per second. MICROSECONDS_PER_SECOND = Timing::MICROSECONDS_PER_SECOND # The percentage of the expected runtime to allow # before reporting a weird runtime MAX_TIME_SKEW = 0.05 POW_2_30 = 1 << 30 # Two-element arrays, consisting of label and block pairs. # @return [Array] list of entries attr_reader :list # Determining whether to run comparison utility. # @return [Boolean] true if needs to run compare. attr_reader :compare # Determining whether to hold results between Ruby invocations # @return [Boolean] attr_accessor :hold # Report object containing information about the run. # @return [Report] the report object. attr_reader :full_report # Storing Iterations in time period. # @return [Hash] attr_reader :timing # Warmup time setter and getter (in seconds). # @return [Integer] attr_accessor :warmup # Calculation time setter and getter (in seconds). # @return [Integer] attr_accessor :time # Warmup and calculation iterations. # @return [Integer] attr_accessor :iterations # Statistics model. # @return [Object] attr_accessor :stats # Confidence. # @return [Integer] attr_accessor :confidence # Silence output # @return [Boolean] def quiet @out.quiet? end # Suite # @return [Benchmark::IPS::MultiReport] def suite @out end # Instantiate the Benchmark::IPS::Job. def initialize opts={} @list = [] @run_single = false @json_path = false @compare = false @compare_order = :fastest @held_path = nil @held_results = nil @timing = Hash.new 1 # default to 1 in case warmup isn't run @full_report = Report.new # Default warmup and calculation time in seconds. @warmup = 2 @time = 5 @iterations = 1 # Default statistical model @stats = :sd @confidence = 95 @out = MultiReport.new(StreamReport.new) end # Job configuration options, set +@warmup+ and +@time+. # @option opts [Integer] :warmup Warmup time. # @option opts [Integer] :time Calculation time. # @option iterations [Integer] :time Warmup and calculation iterations. def config opts @warmup = opts[:warmup] if opts[:warmup] @time = opts[:time] if opts[:time] @iterations = opts[:iterations] if opts[:iterations] @stats = opts[:stats] if opts[:stats] @confidence = opts[:confidence] if opts[:confidence] self.quiet = opts[:quiet] if opts.key?(:quiet) self.suite = opts[:suite] if opts[:suite] end def quiet=(val) if val # remove instances of StreamReport @out.quiet! else # ensure there is an instance of StreamReport @out << StreamReport.new if @out.quiet? end end def suite=(suite) @out << suite end # Return true if job needs to be compared. # @return [Boolean] Need to compare? def compare? @compare end # Run comparison utility. def compare!(order: :fastest) @compare = true @compare_order = order end # Return true if results are held while multiple Ruby invocations # @return [Boolean] Need to hold results between multiple Ruby invocations? def hold? !!@held_path end # Hold after each iteration. # @param held_path [String] File name to store hold file. def hold!(held_path) @held_path = held_path @run_single = true end # Save interim results. Similar to hold, but all reports are run # The report label must change for each invocation. # One way to achieve this is to include the version in the label. # @param held_path [String] File name to store hold file. def save!(held_path) @held_path = held_path @run_single = false end # Return true if items are to be run one at a time. # For the traditional hold, this is true # @return [Boolean] Run just a single item? def run_single? @run_single end # Return true if job needs to generate json. # @return [Boolean] Need to generate json? def json? !!@json_path end # Generate json to given path, defaults to "data.json". def json!(path="data.json") @json_path = path end # Registers the given label and block pair in the job list. # @param label [String] Label of benchmarked code. # @param str [String] Code to be benchmarked. # @param blk [Proc] Code to be benchmarked. # @raise [ArgumentError] Raises if str and blk are both present. # @raise [ArgumentError] Raises if str and blk are both absent. def item(label="", str=nil, &blk) # :yield: if blk and str raise ArgumentError, "specify a block and a str, but not both" end action = str || blk raise ArgumentError, "no block or string" unless action @list.push Entry.new(label, action) self end alias_method :report, :item # Calculate the cycles needed to run for approx 100ms, # given the number of iterations to run the given time. # @param [Float] time_msec Each iteration's time in ms. # @param [Integer] iters Iterations. # @return [Integer] Cycles per 100ms. def cycles_per_100ms time_msec, iters cycles = ((MICROSECONDS_PER_100MS / time_msec) * iters).to_i cycles <= 0 ? 1 : cycles end # Calculate the time difference of before and after in microseconds. # @param [Time] before time. # @param [Time] after time. # @return [Float] Time difference of before and after. def time_us before, after (after.to_f - before.to_f) * MICROSECONDS_PER_SECOND end # Calculate the iterations per second given the number # of cycles run and the time in microseconds that elapsed. # @param [Integer] cycles Cycles. # @param [Integer] time_us Time in microsecond. # @return [Float] Iteration per second. def iterations_per_sec cycles, time_us MICROSECONDS_PER_SECOND * (cycles.to_f / time_us.to_f) end def load_held_results return unless @held_path && File.exist?(@held_path) && !File.zero?(@held_path) require "json" @held_results = {} JSON.load(IO.read(@held_path)).each do |result| @held_results[result['item']] = result create_report(result['item'], result['measured_us'], result['iter'], create_stats(result['samples']), result['cycles']) end end def save_held_results return unless @held_path require "json" data = full_report.entries.map { |e| { 'item' => e.label, 'measured_us' => e.microseconds, 'iter' => e.iterations, 'samples' => e.samples, 'cycles' => e.measurement_cycle } } IO.write(@held_path, JSON.generate(data) << "\n") end def all_results_have_been_run? @full_report.entries.size == @list.size end def clear_held_results File.delete @held_path if File.exist?(@held_path) end def run if @warmup && @warmup != 0 then @out.start_warming @iterations.times do run_warmup end end @out.start_running @iterations.times do |n| run_benchmark end @out.footer end # Run warmup. def run_warmup @list.each do |item| next if run_single? && @held_results && @held_results.key?(item.label) @out.warming item.label, @warmup Timing.clean_env # Run for up to half of the configured warmup time with an increasing # number of cycles to reduce overhead and improve accuracy. # This also avoids running with a constant number of cycles, which a # JIT might speculate on and then have to recompile in #run_benchmark. before = Timing.now target = Timing.add_second before, @warmup / 2.0 cycles = 1 begin t0 = Timing.now item.call_times cycles t1 = Timing.now warmup_iter = cycles warmup_time_us = Timing.time_us(t0, t1) # If the number of cycles would go outside the 32-bit signed integers range # then exit the loop to avoid overflows and start the 100ms warmup runs break if cycles >= POW_2_30 cycles *= 2 end while Timing.now + warmup_time_us * 2 < target cycles = cycles_per_100ms warmup_time_us, warmup_iter @timing[item] = cycles # Run for the remaining of warmup in a similar way as #run_benchmark. target = Timing.add_second before, @warmup while Timing.now + MICROSECONDS_PER_100MS < target item.call_times cycles end @out.warmup_stats warmup_time_us, @timing[item] break if run_single? end end # Run calculation. def run_benchmark @list.each do |item| next if run_single? && @held_results && @held_results.key?(item.label) @out.running item.label, @time Timing.clean_env iter = 0 measurements_us = [] # Running this number of cycles should take around 100ms. cycles = @timing[item] target = Timing.add_second Timing.now, @time begin before = Timing.now item.call_times cycles after = Timing.now # If for some reason the timing said this took no time (O_o) # then ignore the iteration entirely and start another. iter_us = Timing.time_us before, after next if iter_us <= 0.0 iter += cycles measurements_us << iter_us end while Timing.now < target final_time = before measured_us = measurements_us.inject(:+) samples = measurements_us.map { |time_us| iterations_per_sec cycles, time_us } rep = create_report(item.label, measured_us, iter, create_stats(samples), cycles) if (final_time - target).abs >= (@time.to_f * MAX_TIME_SKEW) rep.show_total_time! end @out.add_report rep, caller(1).first break if run_single? end end def create_stats(samples) case @stats when :sd Stats::SD.new(samples) when :bootstrap Stats::Bootstrap.new(samples, @confidence) else raise "unknown stats #{@stats}" end end # Run comparison of entries in +@full_report+. def run_comparison @full_report.run_comparison(@compare_order) if compare? end # Generate json from +@full_report+. def generate_json @full_report.generate_json @json_path if json? end # Create report by add entry to +@full_report+. # @param label [String] Report item label. # @param measured_us [Integer] Measured time in microsecond. # @param iter [Integer] Iterations. # @param samples [Array] Sampled iterations per second. # @param cycles [Integer] Number of Cycles. # @return [Report::Entry] Entry with data. def create_report(label, measured_us, iter, samples, cycles) @full_report.add_entry label, measured_us, iter, samples, cycles end end end end benchmark-ips-2.14.0/lib/benchmark/ips/job/000077500000000000000000000000001466724223400204005ustar00rootroot00000000000000benchmark-ips-2.14.0/lib/benchmark/ips/job/entry.rb000066400000000000000000000052661466724223400220770ustar00rootroot00000000000000module Benchmark module IPS # Benchmark jobs. class Job # Entries in Benchmark Jobs. class Entry # Instantiate the Benchmark::IPS::Job::Entry. # @param label [#to_s] Label of Benchmarked code. # @param action [String, Proc] Code to be benchmarked. # @raise [ArgumentError] Raises when action is not String or not responding to +call+. def initialize(label, action) @label = label # We define #call_times on the singleton class of each Entry instance. # That way, there is no polymorphism for `@action.call` inside #call_times. if action.kind_of? String compile_string action @action = self else unless action.respond_to? :call raise ArgumentError, "invalid action, must respond to #call" end @action = action if action.respond_to? :arity and action.arity > 0 compile_block_with_manual_loop else compile_block end end end # The label of benchmarking action. # @return [#to_s] Label of action. attr_reader :label # The benchmarking action. # @return [String, Proc] Code to be called, could be String / Proc. attr_reader :action # Call action by given times. # @param times [Integer] Times to call +@action+. # @return [Integer] Number of times the +@action+ has been called. def call_times(times) raise '#call_times should be redefined per Benchmark::IPS::Job::Entry instance' end def compile_block m = (class << self; self; end) code = <<-CODE def call_times(times) act = @action i = 0 while i < times act.call i += 1 end end CODE m.class_eval code end def compile_block_with_manual_loop m = (class << self; self; end) code = <<-CODE def call_times(times) @action.call(times) end CODE m.class_eval code end # Compile code into +call_times+ method. # @param str [String] Code to be compiled. # @return [Symbol] :call_times. def compile_string(str) m = (class << self; self; end) code = <<-CODE def call_times(__total); __i = 0 while __i < __total #{str}; __i += 1 end end CODE m.class_eval code end end end end end benchmark-ips-2.14.0/lib/benchmark/ips/job/multi_report.rb000066400000000000000000000032061466724223400234530ustar00rootroot00000000000000module Benchmark module IPS class Job class MultiReport # @returns out [Array] list of reports to send output attr_accessor :out def empty? @out.empty? end def quiet? @out.none? { |rpt| rpt.kind_of?(StreamReport) } end def quiet! @out.delete_if { |rpt| rpt.kind_of?(StreamReport) } end # @param report [StreamReport] report to accept input? def <<(report) if report.kind_of?(MultiReport) self << report.out elsif report.kind_of?(Enumerable) @out += report elsif report @out << report end end # @param out [Array] list of reports to send output def initialize(out = nil) @out = [] self << out end def start_warming @out.each { |o| o.start_warming if o.respond_to?(:start_warming) } end def warming(label, warmup) @out.each { |o| o.warming(label, warmup) } end def warmup_stats(warmup_time_us, timing) @out.each { |o| o.warmup_stats(warmup_time_us, timing) } end def start_running @out.each { |o| o.start_running if o.respond_to?(:start_running) } end def running(label, warmup) @out.each { |o| o.running(label, warmup) } end def add_report(item, caller) @out.each { |o| o.add_report(item, caller) } end def footer @out.each { |o| o.footer if o.respond_to?(:footer) } end end end end end benchmark-ips-2.14.0/lib/benchmark/ips/job/stream_report.rb000066400000000000000000000031061466724223400236130ustar00rootroot00000000000000module Benchmark module IPS class Job class StreamReport def initialize(stream = $stdout) @last_item = nil @out = stream end def start_warming @out.puts RUBY_DESCRIPTION @out.puts "Warming up --------------------------------------" end def start_running @out.puts "Calculating -------------------------------------" end def warming(label, _warmup) @out.print rjust(label) end def warmup_stats(_warmup_time_us, timing) case format when :human @out.printf "%s i/100ms\n", Helpers.scale(timing) else @out.printf "%10d i/100ms\n", timing end end alias_method :running, :warming def add_report(item, caller) @out.puts " #{item.body}" @last_item = item end def footer return unless @last_item footer = @last_item.stats.footer @out.puts footer.rjust(40) if footer end private # @return [Symbol] format used for benchmarking def format Benchmark::IPS.options[:format] end # Add padding to label's right if label's length < 20, # Otherwise add a new line and 20 whitespaces. # @return [String] Right justified label. def rjust(label) label = label.to_s if label.size > 20 "#{label}\n#{' ' * 20}" else label.rjust(20) end end end end end end benchmark-ips-2.14.0/lib/benchmark/ips/report.rb000066400000000000000000000145271466724223400214770ustar00rootroot00000000000000# encoding: utf-8 module Benchmark module IPS # Report contains benchmarking entries. # Perform operations like add new entry, run comparison between entries. class Report # Represents benchmarking code data for Report. class Entry # Instantiate the Benchmark::IPS::Report::Entry. # @param [#to_s] label Label of entry. # @param [Integer] us Measured time in microsecond. # @param [Integer] iters Iterations. # @param [Object] stats Statistics. # @param [Integer] cycles Number of Cycles. def initialize(label, us, iters, stats, cycles) @label = label @microseconds = us @iterations = iters @stats = stats @measurement_cycle = cycles @show_total_time = false end # Label of entry. # @return [String] the label of entry. attr_reader :label # Measured time in microsecond. # @return [Integer] number of microseconds. attr_reader :microseconds # Number of Iterations. # @return [Integer] number of iterations. attr_reader :iterations # Statistical summary of samples. # @return [Object] statisical summary. attr_reader :stats # LEGACY: Iterations per second. # @return [Float] number of iterations per second. def ips @stats.central_tendency end # LEGACY: Standard deviation of iteration per second. # @return [Float] standard deviation of iteration per second. def ips_sd @stats.error end def samples @stats.samples end # Number of Cycles. # @return [Integer] number of cycles. attr_reader :measurement_cycle # Control if the total time the job took is reported. # Typically this value is not significant because it's very # close to the expected time, so it's suppressed by default. def show_total_time! @show_total_time = true end # Return entry's microseconds in seconds. # @return [Float] +@microseconds+ in seconds. def seconds @microseconds.to_f / 1_000_000.0 end # Return entry's standard deviation of iteration per second in percentage. # @return [Float] +@ips_sd+ in percentage. def error_percentage @stats.error_percentage end alias_method :runtime, :seconds # Return Entry body text with left padding. # Body text contains information of iteration per second with # percentage of standard deviation, iterations in runtime. # @return [String] Left justified body. def body per_iter = (" (%s/i)" % Helpers.humanize_duration(1_000_000_000 / @stats.central_tendency)).rjust(15) case Benchmark::IPS.options[:format] when :human left = ("%s (±%4.1f%%) i/s" % [Helpers.scale(@stats.central_tendency), @stats.error_percentage]).ljust(20) iters = Helpers.scale(@iterations) if @show_total_time left + per_iter + (" - %s in %10.6fs" % [iters, runtime]) else left + per_iter + (" - %s" % iters) end else left = ("%10.1f (±%.1f%%) i/s" % [@stats.central_tendency, @stats.error_percentage]).ljust(20) if @show_total_time left + per_iter + (" - %10d in %10.6fs" % [@iterations, runtime]) else left + per_iter + (" - %10d" % @iterations) end end end # Return header with padding if +@label+ is < length of 20. # @return [String] Right justified header (+@label+). def header @label.to_s.rjust(20) end # Return string representation of Entry object. # @return [String] Header and body. def to_s "#{header} #{body}" end # Print entry to current standard output ($stdout). def display $stdout.puts to_s end end # End of Entry # class Report # Entry to represent each benchmarked code in Report. # @return [Array] Entries in Report. attr_reader :entries # Instantiate the Report. def initialize @entries = [] @data = nil end # Add entry to report. # @param label [String] Entry label. # @param microseconds [Integer] Measured time in microsecond. # @param iters [Integer] Iterations. # @param stats [Object] Statistical results. # @param measurement_cycle [Integer] Number of cycles. # @return [Report::Entry] Last added entry. def add_entry label, microseconds, iters, stats, measurement_cycle entry = Entry.new(label, microseconds, iters, stats, measurement_cycle) @entries.delete_if { |e| e.label == label } @entries << entry entry end # Entries data in array for generate json. # Each entry is a hash, consists of: # name: Entry#label # ips: Entry#ips # stddev: Entry#ips_sd # microseconds: Entry#microseconds # iterations: Entry#iterations # cycles: Entry#measurement_cycles # @return [Array] Array of hashes def data @data ||= @entries.collect do |entry| { :name => entry.label, :central_tendency => entry.stats.central_tendency, :ips => entry.stats.central_tendency, # for backwards compatibility :error => entry.stats.error, :stddev => entry.stats.error, # for backwards compatibility :microseconds => entry.microseconds, :iterations => entry.iterations, :cycles => entry.measurement_cycle, } end end # Run comparison of entries. def run_comparison(order) Benchmark.compare(*@entries, order: order) end # Generate json from Report#data to given path. # @param path [String] path to generate json. def generate_json(path) require "json" if path.respond_to?(:write) # STDOUT path.write JSON.pretty_generate(data) else File.open path, "w" do |f| f.write JSON.pretty_generate(data) end end end end end end benchmark-ips-2.14.0/lib/benchmark/ips/share.rb000066400000000000000000000020361466724223400212560ustar00rootroot00000000000000# frozen_string_literal: true require 'net/http' require 'net/https' require 'json' module Benchmark module IPS class Share DEFAULT_URL = "https://ips.fastruby.io" def initialize(report, job) @report = report @job = job end def share base = (ENV['SHARE_URL'] || DEFAULT_URL) url = URI(File.join(base, "reports")) req = Net::HTTP::Post.new(url) data = { "entries" => @report.data, "options" => { "compare" => @job.compare? } } req.body = JSON.generate(data) http = Net::HTTP.new(url.hostname, url.port) if url.scheme == "https" http.use_ssl = true http.ssl_version = :TLSv1_2 end res = http.start do |h| h.request req end if Net::HTTPOK === res data = JSON.parse res.body puts "Shared at: #{File.join(base, data["id"])}" else puts "Error sharing report" end end end end end benchmark-ips-2.14.0/lib/benchmark/ips/stats/000077500000000000000000000000001466724223400207645ustar00rootroot00000000000000benchmark-ips-2.14.0/lib/benchmark/ips/stats/bootstrap.rb000066400000000000000000000035771466724223400233420ustar00rootroot00000000000000module Benchmark module IPS module Stats class Bootstrap include StatsMetric attr_reader :data, :error, :samples def initialize(samples, confidence) dependencies @iterations = 10_000 @confidence = (confidence / 100.0).to_s @samples = samples @data = Kalibera::Data.new({[0] => samples}, [1, samples.size]) interval = @data.bootstrap_confidence_interval(@iterations, @confidence) @median = interval.median @error = interval.error end # Average stat value # @return [Float] central_tendency def central_tendency @median end # Determines how much slower this stat is than the baseline stat # if this average is lower than the faster baseline, higher average is better (e.g. ips) (calculate accordingly) # @param baseline [SD|Bootstrap] faster baseline # @returns [Array] the slowdown and the error (not calculated for standard deviation) def slowdown(baseline) low, slowdown, high = baseline.data.bootstrap_quotient(@data, @iterations, @confidence) error = Timing.mean([slowdown - low, high - slowdown]) [slowdown, error] end def speedup(baseline) baseline.slowdown(self) end def footer "with #{(@confidence.to_f * 100).round(1)}% confidence" end def dependencies require 'kalibera' rescue LoadError puts puts "Can't load the kalibera gem - this is required to use the :bootstrap stats options." puts "It's optional, so we don't formally depend on it and it isn't installed along with benchmark-ips." puts "You probably want to do something like 'gem install kalibera' to fix this." abort end end end end end benchmark-ips-2.14.0/lib/benchmark/ips/stats/sd.rb000066400000000000000000000022651466724223400217240ustar00rootroot00000000000000module Benchmark module IPS module Stats class SD include StatsMetric attr_reader :error, :samples def initialize(samples) @samples = samples @mean = Timing.mean(samples) @error = Timing.stddev(samples, @mean).round end # Average stat value # @return [Float] central_tendency def central_tendency @mean end # Determines how much slower this stat is than the baseline stat # if this average is lower than the faster baseline, higher average is better (e.g. ips) (calculate accordingly) # @param baseline [SD|Bootstrap] faster baseline # @returns [Array] the slowdown and the error (not calculated for standard deviation) def slowdown(baseline) if baseline.central_tendency > central_tendency [baseline.central_tendency.to_f / central_tendency, nil] else [central_tendency.to_f / baseline.central_tendency, nil] end end def speedup(baseline) baseline.slowdown(self) end def footer nil end end end end end benchmark-ips-2.14.0/lib/benchmark/ips/stats/stats_metric.rb000066400000000000000000000012201466724223400240050ustar00rootroot00000000000000module Benchmark module IPS module Stats module StatsMetric # Return entry's standard deviation of iteration per second in percentage. # @return [Float] +@ips_sd+ in percentage. def error_percentage 100.0 * (error.to_f / central_tendency) end def overlaps?(baseline) baseline_low = baseline.central_tendency - baseline.error baseline_high = baseline.central_tendency + baseline.error my_high = central_tendency + error my_low = central_tendency - error my_high > baseline_low && my_low < baseline_high end end end end end benchmark-ips-2.14.0/lib/benchmark/timing.rb000066400000000000000000000043301466724223400206470ustar00rootroot00000000000000module Benchmark # Perform calculations on Timing results. module Timing # Microseconds per second. MICROSECONDS_PER_SECOND = 1_000_000 # Calculate (arithmetic) mean of given samples. # @param [Array] samples Samples to calculate mean. # @return [Float] Mean of given samples. def self.mean(samples) sum = samples.inject(:+) sum / samples.size end # Calculate variance of given samples. # @param [Float] m Optional mean (Expected value). # @return [Float] Variance of given samples. def self.variance(samples, m=nil) m ||= mean(samples) total = samples.inject(0) { |acc, i| acc + ((i - m) ** 2) } total / samples.size end # Calculate standard deviation of given samples. # @param [Array] samples Samples to calculate standard deviation. # @param [Float] m Optional mean (Expected value). # @return [Float] standard deviation of given samples. def self.stddev(samples, m=nil) Math.sqrt variance(samples, m) end # Recycle used objects by starting Garbage Collector. def self.clean_env # rbx if GC.respond_to? :run GC.run(true) else GC.start end end # Use a monotonic clock if available, otherwise use Time begin Process.clock_gettime Process::CLOCK_MONOTONIC, :float_microsecond # Get an object that represents now and can be converted to microseconds def self.now Process.clock_gettime Process::CLOCK_MONOTONIC, :float_microsecond end # Add one second to the time represenetation def self.add_second(t, s) t + (s * MICROSECONDS_PER_SECOND) end # Return the number of microseconds between the 2 moments def self.time_us(before, after) after - before end rescue NameError # Get an object that represents now and can be converted to microseconds def self.now Time.now end # Add one second to the time represenetation def self.add_second(t, s) t + s end # Return the number of microseconds between the 2 moments def self.time_us(before, after) (after.to_f - before.to_f) * MICROSECONDS_PER_SECOND end end end end benchmark-ips-2.14.0/test/000077500000000000000000000000001466724223400153125ustar00rootroot00000000000000benchmark-ips-2.14.0/test/test_benchmark_ips.rb000066400000000000000000000160431466724223400215070ustar00rootroot00000000000000require "minitest/autorun" require "benchmark/ips" require "stringio" require "tmpdir" class TestBenchmarkIPS < Minitest::Test def setup @old_stdout = $stdout $stdout = StringIO.new end def teardown $stdout = @old_stdout end def test_kwargs Benchmark.ips(:time => 0.001, :warmup => 0.001, :quiet => false) do |x| x.report("sleep 0.25") { sleep(0.25) } end assert $stdout.string.size > 0 end def test_warmup0 $stdout = @old_stdout out, err = capture_io do Benchmark.ips(:time => 1, :warmup => 0, :quiet => false) do |x| x.report("sleep 0.25") { sleep(0.25) } end end refute_match(/Warming up -+/, out) assert_empty err end def test_output Benchmark.ips(1) do |x| x.warmup = 0 x.time = 0.1 x.report("operation") { 100 * 100 } end assert $stdout.string.size > 0 end def test_quiet Benchmark.ips(nil, nil, true) do |x| x.config(:warmup => 0.001, :time => 0.001) x.report("operation") { 100 * 100 } end assert $stdout.string.size.zero? Benchmark.ips(:quiet => true) do |x| x.config(:warmup => 0.001, :time => 0.001) x.report("operation") { 100 * 100 } end assert $stdout.string.size.zero? Benchmark.ips do |x| x.config(:warmup => 0.001, :time => 0.001) x.quiet = true x.report("operation") { 100 * 100 } end assert $stdout.string.size.zero? end def test_quiet_option_override Benchmark.ips(quiet: true) do |x| x.config(:warmup => 0.001, :time => 0.001) x.quiet = false x.report("operation") { 100 * 100 } end assert $stdout.string.size > 0 $stdout.truncate(0) Benchmark.ips(quiet: true) do |x| x.config(quiet: false, warmup: 0.001, time: 0.001) x.report("operation") { 100 * 100 } end assert $stdout.string.size > 0 $stdout.truncate(0) Benchmark.ips(quiet: true, warmup: 0.001, time: 0.001) do |x| # Calling config should not make quiet option overridden when no specified x.config({}) x.report("operation") { 100 * 100 } end assert $stdout.string.size.zero? end def test_ips report = Benchmark.ips do |x| x.config(:time => 1, :warmup => 1) x.report("sleep 0.25") { sleep(0.25) } x.report("sleep 0.05") { sleep(0.05) } x.compare! end rep1 = report.entries[0] rep2 = report.entries[1] assert_equal "sleep 0.25", rep1.label assert_equal 4, rep1.iterations assert_in_delta 4.0, rep1.ips, 0.2 assert_equal "sleep 0.05", rep2.label assert_in_delta 20.0, rep2.iterations.to_f, 1.0 assert_in_delta 20.0, rep2.ips, 2.0 end def test_ips_alternate_config report = Benchmark.ips do |x| x.time = 1 x.warmup = 1 x.report("sleep 0.25") { sleep(0.25) } end rep = report.entries.first assert_equal "sleep 0.25", rep.label assert_equal 4, rep.iterations assert_in_delta 4.0, rep.ips, 0.4 end def test_ips_old_config Benchmark.ips(1, 1) do |x| assert_equal 1, x.time assert_equal 1, x.warmup return end end def test_ips_defaults report = Benchmark.ips do |x| x.report("sleep 0.25") { sleep(0.25) } end rep = report.entries.first assert_equal "sleep 0.25", rep.label assert_equal 4*5, rep.iterations assert_in_delta 4.0, rep.ips, 0.2 end def test_ips_report_using_symbol report = Benchmark.ips do |x| x.warmup = 0 x.time = 0.1 x.report(:sleep_a_quarter_second) { 1 + 1 } end rep = report.entries.first assert_equal :sleep_a_quarter_second, rep.label end def test_ips_default_data report = Benchmark.ips do |x| x.config(:warmup => 0.001, :time => 0.001) x.report("sleep 0.25") { sleep(0.25) } end all_data = report.data assert all_data assert_equal "sleep 0.25", all_data[0][:name] assert all_data[0][:ips] assert all_data[0][:stddev] end def test_ips_empty report = Benchmark.ips do |x| x.config(:warmup => 0.001, :time => 0.001) end all_data = report.data assert all_data assert_equal [], all_data end def test_json_output json_file = Tempfile.new("data.json") Benchmark.ips do |x| x.config(:warmup => 0.001, :time => 0.001) x.report("sleep 0.25") { sleep(0.25) } x.json! json_file.path end json_data = json_file.read assert json_data data = JSON.parse json_data assert data assert_equal 1, data.size assert_equal "sleep 0.25", data[0]["name"] assert data[0]["ips"] assert data[0]["stddev"] end def test_json_output_to_stdout Benchmark.ips do |x| x.config(:warmup => 0.001, :time => 0.001) x.report("sleep 0.25") { sleep(0.25) } x.quiet = true x.json! $stdout end assert $stdout.string.size > 0 data = JSON.parse $stdout.string assert data assert_equal 1, data.size assert_equal "sleep 0.25", data[0]["name"] assert data[0]["ips"] assert data[0]["stddev"] end def test_hold! temp_file_name = Dir::Tmpname.create(["benchmark-ips", ".tmp"]) { } Benchmark.ips(:time => 0.001, :warmup => 0.001) do |x| x.report("operation") { 100 * 100 } x.report("operation2") { 100 * 100 } x.hold! temp_file_name end assert File.exist?(temp_file_name) File.unlink(temp_file_name) end def test_small_warmup_and_time report = Benchmark.ips do |x| x.config(:warmup => 0.0000000001, :time => 0.001) x.report("addition") { 1 + 2 } end assert_operator report.entries[0].iterations, :>=, 1 report = Benchmark.ips do |x| x.config(:warmup => 0, :time => 0.0000000001) x.report("addition") { 1 + 2 } end assert_equal 1, report.entries[0].iterations report = Benchmark.ips do |x| x.config(:warmup => 0.001, :time => 0.0000000001) x.report("addition") { 1 + 2 } end assert_operator report.entries[0].iterations, :>=, 1 report = Benchmark.ips do |x| x.config(:warmup => 0.0000000001, :time => 0.0000000001) x.report("addition") { 1 + 2 } end assert_operator report.entries[0].iterations, :>=, 1 report = Benchmark.ips do |x| x.config(:warmup => 0, :time => 0) x.report("addition") { 1 + 2 } end assert_equal 1, report.entries[0].iterations end def test_humanize_duration assert_equal Benchmark::IPS::Helpers.humanize_duration(0.000000001), "0.00 ns" assert_equal Benchmark::IPS::Helpers.humanize_duration(123.456789), "123.46 ns" assert_equal Benchmark::IPS::Helpers.humanize_duration(12345.67890123), "12.35 μs" assert_equal Benchmark::IPS::Helpers.humanize_duration(123456789.0123456789), "123.46 ms" assert_equal Benchmark::IPS::Helpers.humanize_duration(123456789012.3456789012), "123.46 s" end def test_quick Benchmark.ips_quick(:upcase, :downcase, on: "Hello World!", warmup: 0.001, time: 0.001) assert $stdout.string.size > 0 end def test_quick_on_kernel Benchmark.ips_quick(:srand, :rand, warmup: 0.001, time: 0.001) assert $stdout.string.size > 0 end end benchmark-ips-2.14.0/test/test_report.rb000066400000000000000000000074441466724223400202220ustar00rootroot00000000000000require "minitest/autorun" require "benchmark/ips" require "stringio" class TestReport < Minitest::Test class StdSuite attr_accessor :calls def initialize @calls = [] end def warming(_a, _b) ; @calls << :warming ; end def warmup_stats(_a, _b) ; @calls << :warmup_stats ; end def running(_a, _b) ; @calls << :running ; end def add_report(_a, _b) ; @calls << :add_report ; end end class FullReport < StdSuite def start_warming ; @calls << :start_warming ; end def start_running ; @calls << :start_running ; end def footer ; @calls << :footer ; end end def setup @old_stdout = $stdout $stdout = StringIO.new end def teardown $stdout = @old_stdout end def test_ips_config_suite suite = StdSuite.new Benchmark.ips(0.1, 0.1) do |x| x.config(:suite => suite) x.report("job") {} end assert_equal [:warming, :warmup_stats, :running, :add_report], suite.calls end def test_ips_config_suite_by_accsr suite = StdSuite.new Benchmark.ips(0.1, 0.1) do |x| x.suite = suite x.report("job") {} end assert_equal [:warming, :warmup_stats, :running, :add_report], suite.calls end def test_quiet_false_default Benchmark.ips do |x| refute x.quiet end end def test_quiet_false_config Benchmark.ips(quiet: false) do |x| refute x.quiet end end def test_quiet_false_config_by_accsr Benchmark.ips do |x| x.quiet = false refute x.quiet end end def test_quiet_false_change Benchmark.ips(quiet: true) do |x| x.quiet = false refute x.quiet end end def test_quiet_false $stdout = @old_stdout out, err = capture_io do Benchmark.ips(:time => 1, :warmup => 0, :quiet => false) do |x| x.report("sleep 0.25") { sleep(0.25) } end end assert_match(/Calculating -+/, out) assert_empty err end # all reports are run after block is fully defined # so last value wins for all tests def test_quiet_false_change_mind $stdout = @old_stdout # all reports are run after block is defined # so changing the value does not matter. last value wins for all out, err = capture_io do Benchmark.ips(:time => 1, :warmup => 0, :quiet => true) do |x| x.report("sleep 0.25") { sleep(0.25) } x.quiet = false end end assert_match(/Calculating -+/, out) assert_empty err end def test_quiet_true_config Benchmark.ips(:quiet => true) do |x| assert x.quiet end end def test_quiet_true_by_accsr Benchmark.ips do |x| x.quiet = true assert x.quiet end end def test_quiet_true_change Benchmark.ips(:quiet => false) do |x| x.quiet = true assert x.quiet end end def test_quiet_true $stdout = @old_stdout out, err = capture_io do Benchmark.ips(:time => 1, :warmup => 0, :quiet => true) do |x| x.report("sleep 0.25") { sleep(0.25) } end end refute_match(/Calculating -+/, out) assert_empty err end # all reports are run after block is fully defined # so last value wins for all tests def test_quiet_true_change_mind $stdout = @old_stdout out, err = capture_io do Benchmark.ips(:time => 1, :warmup => 0, :quiet => false) do |x| x.report("sleep 0.25") { sleep(0.25) } x.quiet = true end end refute_match(/Calculating -+/, out) assert_empty err end def test_multi_report suite = Benchmark::IPS::Job::MultiReport.new suite << StdSuite.new suite << StdSuite.new Benchmark.ips(:time => 0.1, :warmup => 0.1, :quiet => true) do |x| x.suite = suite x.report("job") {} end suite.out.each do |rpt| assert_equal [:warming, :warmup_stats, :running, :add_report], rpt.calls end end end