bayestestR/0000755000176200001440000000000013620704270012400 5ustar liggesusersbayestestR/NAMESPACE0000644000176200001440000002473613620150257013633 0ustar liggesusers# Generated by roxygen2: do not edit by hand S3method(as.data.frame,density) S3method(as.double,bayesfactor_inclusion) S3method(as.double,bayesfactor_models) S3method(as.double,bayesfactor_parameters) S3method(as.double,bayesfactor_restricted) S3method(as.double,map_estimate) S3method(as.double,mhdior) S3method(as.double,p_direction) S3method(as.double,p_map) S3method(as.double,p_rope) S3method(as.double,p_significance) S3method(as.double,rope) S3method(as.numeric,bayesfactor_inclusion) S3method(as.numeric,bayesfactor_models) S3method(as.numeric,bayesfactor_parameters) S3method(as.numeric,bayesfactor_restricted) S3method(as.numeric,map_estimate) S3method(as.numeric,mhdior) S3method(as.numeric,p_direction) S3method(as.numeric,p_map) S3method(as.numeric,p_significance) S3method(bayesfactor_inclusion,BFBayesFactor) S3method(bayesfactor_inclusion,bayesfactor_models) S3method(bayesfactor_models,BFBayesFactor) S3method(bayesfactor_models,brmsfit) S3method(bayesfactor_models,default) S3method(bayesfactor_models,stanreg) S3method(bayesfactor_parameters,bayesfactor_models) S3method(bayesfactor_parameters,brmsfit) S3method(bayesfactor_parameters,data.frame) S3method(bayesfactor_parameters,emmGrid) S3method(bayesfactor_parameters,numeric) S3method(bayesfactor_parameters,sim) S3method(bayesfactor_parameters,sim.merMod) S3method(bayesfactor_parameters,stanreg) S3method(bayesfactor_restricted,brmsfit) S3method(bayesfactor_restricted,data.frame) S3method(bayesfactor_restricted,emmGrid) S3method(bayesfactor_restricted,stanreg) S3method(check_prior,brmsfit) S3method(check_prior,stanreg) S3method(ci,BFBayesFactor) S3method(ci,MCMCglmm) S3method(ci,brmsfit) S3method(ci,data.frame) S3method(ci,emmGrid) S3method(ci,mcmc) S3method(ci,numeric) S3method(ci,sim) S3method(ci,sim.merMod) S3method(ci,stanreg) S3method(describe_posterior,BFBayesFactor) S3method(describe_posterior,MCMCglmm) S3method(describe_posterior,brmsfit) S3method(describe_posterior,data.frame) S3method(describe_posterior,double) S3method(describe_posterior,emmGrid) S3method(describe_posterior,mcmc) S3method(describe_posterior,numeric) S3method(describe_posterior,sim) S3method(describe_posterior,sim.merMod) S3method(describe_posterior,stanmvreg) S3method(describe_posterior,stanreg) S3method(describe_prior,BFBayesFactor) S3method(describe_prior,brmsfit) S3method(describe_prior,stanreg) S3method(diagnostic_posterior,BFBayesFactor) S3method(diagnostic_posterior,brmsfit) S3method(diagnostic_posterior,data.frame) S3method(diagnostic_posterior,numeric) S3method(diagnostic_posterior,stanreg) S3method(effective_sample,MCMCglmm) S3method(effective_sample,brmsfit) S3method(effective_sample,stanreg) S3method(equivalence_test,BFBayesFactor) S3method(equivalence_test,brmsfit) S3method(equivalence_test,data.frame) S3method(equivalence_test,default) S3method(equivalence_test,emmGrid) S3method(equivalence_test,mcmc) S3method(equivalence_test,numeric) S3method(equivalence_test,sim) S3method(equivalence_test,sim.merMod) S3method(equivalence_test,stanreg) S3method(estimate_density,MCMCglmm) S3method(estimate_density,brmsfit) S3method(estimate_density,data.frame) S3method(estimate_density,emmGrid) S3method(estimate_density,mcmc) S3method(estimate_density,numeric) S3method(estimate_density,stanreg) S3method(eti,BFBayesFactor) S3method(eti,MCMCglmm) S3method(eti,brmsfit) S3method(eti,data.frame) S3method(eti,emmGrid) S3method(eti,mcmc) S3method(eti,numeric) S3method(eti,sim) S3method(eti,sim.merMod) S3method(eti,stanreg) S3method(hdi,BFBayesFactor) S3method(hdi,MCMCglmm) S3method(hdi,brmsfit) S3method(hdi,data.frame) S3method(hdi,emmGrid) S3method(hdi,mcmc) S3method(hdi,numeric) S3method(hdi,sim) S3method(hdi,sim.merMod) S3method(hdi,stanreg) S3method(map_estimate,brmsfit) S3method(map_estimate,numeric) S3method(map_estimate,stanreg) S3method(mcse,brmsfit) S3method(mcse,stanreg) S3method(mhdior,BFBayesFactor) S3method(mhdior,brmsfit) S3method(mhdior,data.frame) S3method(mhdior,emmGrid) S3method(mhdior,numeric) S3method(mhdior,stanreg) S3method(p_direction,BFBayesFactor) S3method(p_direction,MCMCglmm) S3method(p_direction,brmsfit) S3method(p_direction,data.frame) S3method(p_direction,emmGrid) S3method(p_direction,mcmc) S3method(p_direction,numeric) S3method(p_direction,sim) S3method(p_direction,sim.merMod) S3method(p_direction,stanreg) S3method(p_map,BFBayesFactor) S3method(p_map,MCMCglmm) S3method(p_map,brmsfit) S3method(p_map,data.frame) S3method(p_map,emmGrid) S3method(p_map,mcmc) S3method(p_map,numeric) S3method(p_map,sim) S3method(p_map,sim.merMod) S3method(p_map,stanreg) S3method(p_rope,BFBayesFactor) S3method(p_rope,MCMCglmm) S3method(p_rope,brmsfit) S3method(p_rope,data.frame) S3method(p_rope,default) S3method(p_rope,emmGrid) S3method(p_rope,mcmc) S3method(p_rope,numeric) S3method(p_rope,sim) S3method(p_rope,sim.merMod) S3method(p_rope,stanreg) S3method(p_significance,BFBayesFactor) S3method(p_significance,MCMCglmm) S3method(p_significance,brmsfit) S3method(p_significance,data.frame) S3method(p_significance,emmGrid) S3method(p_significance,mcmc) S3method(p_significance,numeric) S3method(p_significance,stanreg) S3method(plot,bayesfactor_models) S3method(plot,bayesfactor_parameters) S3method(plot,bayestestR_eti) S3method(plot,bayestestR_hdi) S3method(plot,bayestestR_si) S3method(plot,equivalence_test) S3method(plot,estimate_density) S3method(plot,map_estimate) S3method(plot,overlap) S3method(plot,p_direction) S3method(plot,p_significance) S3method(plot,point_estimate) S3method(plot,rope) S3method(point_estimate,BFBayesFactor) S3method(point_estimate,MCMCglmm) S3method(point_estimate,brmsfit) S3method(point_estimate,data.frame) S3method(point_estimate,emmGrid) S3method(point_estimate,mcmc) S3method(point_estimate,numeric) S3method(point_estimate,sim) S3method(point_estimate,sim.merMod) S3method(point_estimate,stanreg) S3method(print,bayesfactor_inclusion) S3method(print,bayesfactor_models) S3method(print,bayesfactor_parameters) S3method(print,bayesfactor_restricted) S3method(print,bayestestR_ci) S3method(print,bayestestR_eti) S3method(print,bayestestR_hdi) S3method(print,bayestestR_si) S3method(print,describe_posterior) S3method(print,equivalence_test) S3method(print,map_estimate) S3method(print,mhdior) S3method(print,overlap) S3method(print,p_direction) S3method(print,p_map) S3method(print,p_rope) S3method(print,p_significance) S3method(print,point_estimate) S3method(print,rope) S3method(rope,BFBayesFactor) S3method(rope,MCMCglmm) S3method(rope,brmsfit) S3method(rope,data.frame) S3method(rope,default) S3method(rope,emmGrid) S3method(rope,mcmc) S3method(rope,numeric) S3method(rope,sim) S3method(rope,sim.merMod) S3method(rope,stanreg) S3method(rope_range,BFBayesFactor) S3method(rope_range,brmsfit) S3method(rope_range,default) S3method(rope_range,lm) S3method(rope_range,merMod) S3method(rope_range,stanreg) S3method(sensitivity_to_prior,stanreg) S3method(si,brmsfit) S3method(si,data.frame) S3method(si,emmGrid) S3method(si,numeric) S3method(si,stanreg) S3method(simulate_prior,brmsfit) S3method(simulate_prior,stanreg) S3method(update,bayesfactor_models) S3method(weighted_posteriors,BFBayesFactor) S3method(weighted_posteriors,brmsfit) S3method(weighted_posteriors,stanreg) export(area_under_curve) export(auc) export(bayesfactor) export(bayesfactor_inclusion) export(bayesfactor_models) export(bayesfactor_parameters) export(bayesfactor_pointull) export(bayesfactor_restricted) export(bayesfactor_rope) export(bayesian_as_frequentist) export(bf_inclusion) export(bf_models) export(bf_parameters) export(bf_pointull) export(bf_restricted) export(bf_rope) export(check_prior) export(ci) export(contr.bayes) export(convert_bayesian_as_frequentist) export(convert_p_to_pd) export(convert_pd_to_p) export(density_at) export(describe_posterior) export(describe_prior) export(diagnostic_posterior) export(distribution) export(distribution_beta) export(distribution_binomial) export(distribution_cauchy) export(distribution_chisquared) export(distribution_custom) export(distribution_gamma) export(distribution_mixture_normal) export(distribution_normal) export(distribution_poisson) export(distribution_student) export(distribution_tweedie) export(distribution_uniform) export(effective_sample) export(equivalence_test) export(estimate_density) export(eti) export(hdi) export(map_estimate) export(mcse) export(mhdior) export(overlap) export(p_direction) export(p_map) export(p_pointnull) export(p_rope) export(p_significance) export(p_to_pd) export(pd) export(pd_to_p) export(point_estimate) export(reshape_ci) export(rnorm_perfect) export(rope) export(rope_range) export(sensitivity_to_prior) export(si) export(simulate_correlation) export(simulate_prior) export(simulate_ttest) export(weighted_posteriors) importFrom(graphics,plot) importFrom(graphics,polygon) importFrom(insight,clean_parameters) importFrom(insight,find_algorithm) importFrom(insight,find_formula) importFrom(insight,find_parameters) importFrom(insight,format_table) importFrom(insight,get_parameters) importFrom(insight,get_priors) importFrom(insight,get_response) importFrom(insight,is_model_supported) importFrom(insight,is_multivariate) importFrom(insight,model_info) importFrom(insight,print_color) importFrom(methods,is) importFrom(stats,BIC) importFrom(stats,approx) importFrom(stats,approxfun) importFrom(stats,as.formula) importFrom(stats,cor) importFrom(stats,cor.test) importFrom(stats,density) importFrom(stats,getCall) importFrom(stats,glm) importFrom(stats,integrate) importFrom(stats,lm) importFrom(stats,mad) importFrom(stats,median) importFrom(stats,na.omit) importFrom(stats,predict) importFrom(stats,qbeta) importFrom(stats,qbinom) importFrom(stats,qcauchy) importFrom(stats,qchisq) importFrom(stats,qgamma) importFrom(stats,qnorm) importFrom(stats,qpois) importFrom(stats,qt) importFrom(stats,quantile) importFrom(stats,qunif) importFrom(stats,rbeta) importFrom(stats,rbinom) importFrom(stats,rcauchy) importFrom(stats,rchisq) importFrom(stats,reshape) importFrom(stats,rgamma) importFrom(stats,rnorm) importFrom(stats,rpois) importFrom(stats,rt) importFrom(stats,runif) importFrom(stats,sd) importFrom(stats,setNames) importFrom(stats,splinefun) importFrom(stats,terms) importFrom(stats,terms.formula) importFrom(stats,update) importFrom(utils,capture.output) importFrom(utils,install.packages) importFrom(utils,stack) bayestestR/README.md0000644000176200001440000005117713607554753013710 0ustar liggesusers # bayestestR [![CRAN](http://www.r-pkg.org/badges/version/bayestestR)](https://cran.r-project.org/package=bayestestR) [![downloads](http://cranlogs.r-pkg.org/badges/bayestestR)](https://cran.r-project.org/package=bayestestR) [![Build Status](https://travis-ci.org/easystats/bayestestR.svg?branch=master)](https://travis-ci.org/easystats/bayestestR) [![codecov](https://codecov.io/gh/easystats/bayestestR/branch/master/graph/badge.svg)](https://codecov.io/gh/easystats/bayestestR) [![DOI](https://joss.theoj.org/papers/10.21105/joss.01541/status.svg)](https://doi.org/10.21105/joss.01541) ***Become a Bayesian master you will*** Existing R packages allow users to easily fit a large variety of models and extract and visualize the posterior draws. However, most of these packages only return a limited set of indices (e.g., point-estimates and CIs). **bayestestR** provides a comprehensive and consistent set of functions to analyze and describe posterior distributions generated by a variety of models objects, including popular modeling packages such as **rstanarm**, **brms** or **BayesFactor**. You can reference the package and its documentation as follows: - Makowski, D., Ben-Shachar, M. S., & Lüdecke, D. (2019). *bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework*. Journal of Open Source Software, 4(40), 1541. [10.21105/joss.01541](https://doi.org/10.21105/joss.01541) - Makowski, D., Ben-Shachar, M. S., Chen, S. H. A., & Lüdecke, D. (2019). *Indices of Effect Existence and Significance in the Bayesian Framework*. Frontiers in Psychology 2019;10:2767. [10.3389/fpsyg.2019.02767](https://doi.org/10.3389/fpsyg.2019.02767) ## Installation Run the following: ``` r install.packages(bayestestR) ``` ## Documentation [![Documentation](https://img.shields.io/badge/documentation-bayestestR-orange.svg?colorB=E91E63)](https://easystats.github.io/bayestestR/) [![Blog](https://img.shields.io/badge/blog-easystats-orange.svg?colorB=FF9800)](https://easystats.github.io/blog/posts/) [![Features](https://img.shields.io/badge/features-bayestestR-orange.svg?colorB=2196F3)](https://easystats.github.io/bayestestR/reference/index.html) Click on the buttons above to access the package [**documentation**](https://easystats.github.io/bayestestR/) and the [**easystats blog**](https://easystats.github.io/blog/posts/), and check-out these vignettes: #### Tutorials - [Get Started with Bayesian Analysis](https://easystats.github.io/bayestestR/articles/bayestestR.html) - [Example 1: Initiation to Bayesian models](https://easystats.github.io/bayestestR/articles/example1.html) - [Example 2: Confirmation of Bayesian skills](https://easystats.github.io/bayestestR/articles/example2.html) - [Example 3: Become a Bayesian master](https://easystats.github.io/bayestestR/articles/example3.html) #### Articles - [Credible Intervals (CI)](https://easystats.github.io/bayestestR/articles/credible_interval.html) - [Probability of Direction (pd)](https://easystats.github.io/bayestestR/articles/probability_of_direction.html) - [Region of Practical Equivalence (ROPE)](https://easystats.github.io/bayestestR/articles/region_of_practical_equivalence.html) - [Bayes Factors (BF)](https://easystats.github.io/bayestestR/articles/bayes_factors.html) - [Comparison of Point-Estimates](https://easystats.github.io/bayestestR/articles/indicesEstimationComparison.html) - [Comparison of Indices of Effect Existence](https://easystats.github.io/bayestestR/articles/indicesExistenceComparison.html) - [Reporting Guidelines](https://easystats.github.io/bayestestR/articles/guidelines.html) # Features In the Bayesian framework, parameters are estimated in a probabilistic fashion as *distributions*. These distributions can be summarised and described by **reporting 4 types of indices**: - [**Centrality**](https://easystats.github.io/bayestestR/articles/indicesEstimationComparison.html) - `mean()`, `median()` or [**`map_estimate()`**](https://easystats.github.io/bayestestR/reference/map_estimate.html) for an estimation of the mode. - [**`point_estimate()`**](https://easystats.github.io/bayestestR/reference/point_estimate.html) can be used to get them at once and can be run directly on models. - [**Uncertainty**](https://easystats.github.io/bayestestR/articles/credible_interval.html) - [**`hdi()`**](https://easystats.github.io/bayestestR/reference/hdi.html) for *Highest Density Intervals (HDI)* or [**`eti()`**](https://easystats.github.io/bayestestR/reference/eti.html) for *Equal-Tailed Intervals (ETI)*. - [**`ci()`**](https://easystats.github.io/bayestestR/reference/ci.html) can be used as a general method for Confidence and Credible Intervals (CI). - [**Effect Existence**](https://easystats.github.io/bayestestR/articles/indicesExistenceComparison.html): whether an effect is different from 0. - [**`p_direction()`**](https://easystats.github.io/bayestestR/reference/p_direction.html) for a Bayesian equivalent of the frequentist *p*-value (see [Makowski et al., 2019](https://doi.org/10.3389/fpsyg.2019.02767)) - [**`p_pointnull()`**](https://easystats.github.io/bayestestR/reference/p_map.html) represents the odds of null hypothesis (*h0 = 0*) compared to the most likely hypothesis (the MAP). - [**`bf_pointnull()`**](https://easystats.github.io/bayestestR/reference/bayesfactor_parameters.html) for a classic *Bayes Factor (BF)* assessing the likelihood of effect presence against its absence (*h0 = 0*). - [**Effect Significance**](https://easystats.github.io/bayestestR/articles/indicesExistenceComparison.html): whether the effect size can be considered as non-negligible. - [**`p_rope()`**](https://easystats.github.io/bayestestR/reference/p_rope.html) is the probability of the effect falling inside a [*Region of Practical Equivalence (ROPE)*](https://easystats.github.io/bayestestR/articles/region_of_practical_equivalence.html). - [**`bf_rope()`**](https://easystats.github.io/bayestestR/reference/bayesfactor_parameters.html) computes a Bayes factor against the null as defined by a region (the ROPE). - [**`p_significance()`**](https://easystats.github.io/bayestestR/reference/p_significance.html) that combines a region of equivalence with the probability of direction. [**`describe_posterior()`**](https://easystats.github.io/bayestestR/reference/describe_posterior.html) is the master function with which you can compute all of the indices cited below at once. ``` r describe_posterior( rnorm(1000), centrality = "median", test = c("p_direction", "p_significance") ) ## Parameter Median CI CI_low CI_high pd ps ## 1 Posterior -0.047 89 -1.7 1.4 0.52 0.48 ``` `describe_posterior()` works for many objects, including more complex *brmsfit*-models. For better readability, the output is separated by model components: ``` r zinb <- read.csv("http://stats.idre.ucla.edu/stat/data/fish.csv") set.seed(123) model <- brm( bf( count ~ child + camper + (1 | persons), zi ~ child + camper + (1 | persons) ), data = zinb, family = zero_inflated_poisson(), chains = 1, iter = 500 ) describe_posterior( model, effects = "all", component = "all", test = c("p_direction", "p_significance"), centrality = "all" ) ``` ## # Description of Posterior Distributions ## ## # Fixed Effects (Conditional Model) ## ## Parameter | Median | Mean | MAP | CI | CI_low | CI_high | pd | ps | ESS | Rhat ## ------------------------------------------------------------------------------------------ ## Intercept | 1.319 | 1.186 | 1.450 | 89 | 0.049 | 2.275 | 0.940 | 0.920 | 78 | 1.005 ## child | -1.162 | -1.162 | -1.175 | 89 | -1.320 | -0.980 | 1.000 | 1.000 | 172 | 0.996 ## camper | 0.727 | 0.731 | 0.737 | 89 | 0.587 | 0.858 | 1.000 | 1.000 | 233 | 0.996 ## ## # Fixed Effects (Zero-Inflated Model) ## ## Parameter | Median | Mean | MAP | CI | CI_low | CI_high | pd | ps | ESS | Rhat ## ------------------------------------------------------------------------------------------ ## Intercept | -0.778 | -0.731 | -0.890 | 89 | -1.893 | 0.218 | 0.876 | 0.840 | 92 | 1.004 ## child | 1.888 | 1.882 | 1.906 | 89 | 1.302 | 2.304 | 1.000 | 1.000 | 72 | 1.015 ## camper | -0.840 | -0.838 | -0.778 | 89 | -1.337 | -0.231 | 0.992 | 0.988 | 182 | 0.998 ## ## # Random Effects (Conditional Model) ## ## Parameter | Median | Mean | MAP | CI | CI_low | CI_high | pd | ps | ESS | Rhat ## ------------------------------------------------------------------------------------------ ## persons 1 | -1.315 | -1.233 | -1.397 | 89 | -2.555 | -0.031 | 0.940 | 0.924 | 80 | 1.004 ## persons 2 | -0.380 | -0.264 | -0.542 | 89 | -1.451 | 1.008 | 0.660 | 0.632 | 78 | 1.006 ## persons 3 | 0.307 | 0.438 | 0.136 | 89 | -0.728 | 1.588 | 0.708 | 0.644 | 77 | 1.003 ## persons 4 | 1.207 | 1.331 | 1.030 | 89 | 0.290 | 2.537 | 0.960 | 0.960 | 78 | 1.004 ## ## # Random Effects (Zero-Inflated Model) ## ## Parameter | Median | Mean | MAP | CI | CI_low | CI_high | pd | ps | ESS | Rhat ## ------------------------------------------------------------------------------------------ ## persons 1 | 1.355 | 1.319 | 1.366 | 89 | 0.368 | 2.659 | 0.956 | 0.952 | 91 | 1.005 ## persons 2 | 0.382 | 0.357 | 0.509 | 89 | -0.726 | 1.488 | 0.724 | 0.668 | 99 | 1.000 ## persons 3 | -0.117 | -0.142 | -0.103 | 89 | -1.162 | 1.128 | 0.580 | 0.512 | 94 | 0.997 ## persons 4 | -1.166 | -1.270 | -1.024 | 89 | -2.462 | -0.061 | 0.972 | 0.960 | 113 | 0.997 *bayestestR* also includes [**many other features**](https://easystats.github.io/bayestestR/reference/index.html) useful for your Bayesian analsyes. Here are some more examples: ## Point-estimates ``` r library(bayestestR) posterior <- distribution_gamma(10000, 1.5) # Generate a skewed distribution centrality <- point_estimate(posterior) # Get indices of centrality centrality ## # Point Estimates ## ## Median | Mean | MAP ## -------------------- ## 1.18 | 1.50 | 0.51 ``` As for other [**easystats**](https://github.com/easystats) packages, `plot()` methods are available from the [**see**](http://easystats.github.io/see) package for many functions: ![](man/figures/unnamed-chunk-8-1.png) While the **median** and the **mean** are available through base R functions, [**`map_estimate()`**](https://easystats.github.io/bayestestR/reference/map_estimate.html) in *bayestestR* can be used to directly find the **Highest Maximum A Posteriori (MAP)** estimate of a posterior, *i.e.*, the value associated with the highest probability density (the “peak” of the posterior distribution). In other words, it is an estimation of the *mode* for continuous parameters. ## Uncertainty (CI) [**`hdi()`**](https://easystats.github.io/bayestestR/reference/hdi.html) computes the **Highest Density Interval (HDI)** of a posterior distribution, i.e., the interval which contains all points within the interval have a higher probability density than points outside the interval. The HDI can be used in the context of Bayesian posterior characterisation as **Credible Interval (CI)**. Unlike equal-tailed intervals (see [`eti()`](https://easystats.github.io/bayestestR/reference/eti.html)) that typically exclude 2.5% from each tail of the distribution, the HDI is *not* equal-tailed and therefore always includes the mode(s) of posterior distributions. By default, `hdi()` returns the 89% intervals (`ci = 0.89`), deemed to be more stable than, for instance, 95% intervals. An effective sample size of at least 10.000 is recommended if 95% intervals should be computed (Kruschke, 2015). Moreover, 89 indicates the arbitrariness of interval limits - its only remarkable property is being the highest prime number that does not exceed the already unstable 95% threshold (McElreath, 2018). ``` r posterior <- distribution_chisquared(100, 3) hdi(posterior, ci = .89) ## # Highest Density Interval ## ## 89% HDI ## ------------ ## [0.11, 6.05] eti(posterior, ci = .89) ## # Equal-Tailed Interval ## ## 89% ETI ## ------------ ## [0.42, 7.27] ``` ![](man/figures/unnamed-chunk-10-1.png) ## Existence and Significance Testing ### Probability of Direction (*pd*) [**`p_direction()`**](https://easystats.github.io/bayestestR/reference/p_direction.html) computes the **Probability of Direction** (***p*d**, also known as the Maximum Probability of Effect - *MPE*). It varies between 50% and 100% (*i.e.*, `0.5` and `1`) and can be interpreted as the probability (expressed in percentage) that a parameter (described by its posterior distribution) is strictly positive or negative (whichever is the most probable). It is mathematically defined as the proportion of the posterior distribution that is of the median’s sign. Although differently expressed, this index is fairly similar (*i.e.*, is strongly correlated) to the frequentist ***p*-value**. **Relationship with the p-value**: In most cases, it seems that the *pd* corresponds to the frequentist one-sided *p*-value through the formula `p-value = (1-pd/100)` and to the two-sided *p*-value (the most commonly reported) through the formula `p-value = 2*(1-pd/100)`. Thus, a `pd` of `95%`, `97.5%` `99.5%` and `99.95%` corresponds approximately to a two-sided *p*-value of respectively `.1`, `.05`, `.01` and `.001`. See the [*reporting guidelines*](https://easystats.github.io/bayestestR/articles/guidelines.html). ``` r posterior <- distribution_normal(100, 0.4, 0.2) p_direction(posterior) ## pd = 98.00% ``` ![](man/figures/unnamed-chunk-12-1.png) ### ROPE [**`rope()`**](https://easystats.github.io/bayestestR/reference/rope.html) computes the proportion (in percentage) of the HDI (default to the 89% HDI) of a posterior distribution that lies within a region of practical equivalence. Statistically, the probability of a posterior distribution of being different from 0 does not make much sense (the probability of it being different from a single point being infinite). Therefore, the idea underlining ROPE is to let the user define an area around the null value enclosing values that are *equivalent to the null* value for practical purposes (Kruschke & Liddell, 2018, p. @kruschke2018rejecting). Kruschke suggests that such null value could be set, by default, to the -0.1 to 0.1 range of a standardized parameter (negligible effect size according to Cohen, 1988). This could be generalized: For instance, for linear models, the ROPE could be set as `0 +/- .1 * sd(y)`. This ROPE range can be automatically computed for models using the [rope\_range](https://easystats.github.io/bayestestR/reference/rope_range.html) function. Kruschke suggests using the proportion of the 95% (or 90%, considered more stable) HDI that falls within the ROPE as an index for “null-hypothesis” testing (as understood under the Bayesian framework, see [equivalence\_test](https://easystats.github.io/bayestestR/reference/equivalence_test.html)). ``` r posterior <- distribution_normal(100, 0.4, 0.2) rope(posterior, range = c(-0.1, 0.1)) ## # Proportion of samples inside the ROPE [-0.10, 0.10]: ## ## inside ROPE ## ----------- ## 1.11 % ``` ![](man/figures/unnamed-chunk-14-1.png) ### Bayes Factor [**`bayesfactor_parameters()`**](https://easystats.github.io/bayestestR/reference/bayesfactor_parameters.html) computes Bayes factors against the null (either a point or an interval), bases on prior and posterior samples of a single parameter. This Bayes factor indicates the degree by which the mass of the posterior distribution has shifted further away from or closer to the null value(s) (relative to the prior distribution), thus indicating if the null value has become less or more likely given the observed data. When the null is an interval, the Bayes factor is computed by comparing the prior and posterior odds of the parameter falling within or outside the null; When the null is a point, a Savage-Dickey density ratio is computed, which is also an approximation of a Bayes factor comparing the marginal likelihoods of the model against a model in which the tested parameter has been restricted to the point null (Wagenmakers, Lodewyckx, Kuriyal, & Grasman, 2010). ``` r prior <- rnorm(1000, mean = 0, sd = 1) posterior <- rnorm(1000, mean = 1, sd = 0.7) bayesfactor_parameters(posterior, prior, direction = "two-sided", null = 0) ## # Bayes Factor (Savage-Dickey density ratio) ## ## BF ## ---- ## 2.03 ## ## * Evidence Against The Null: [0] ``` ![](man/figures/unnamed-chunk-16-1.png) *The lollipops represent the density of a point-null on the prior distribution (the blue lollipop on the dotted distribution) and on the posterior distribution (the red lollipop on the yellow distribution). The ratio between the two - the Savage-Dickey ratio - indicates the degree by which the mass of the parameter distribution has shifted away from or closer to the null.* For more info, see [the Bayes factors vignette](https://easystats.github.io/bayestestR/articles/bayes_factors.html). ## Utilities ### Find ROPE’s appropriate range [**`rope_range()`**](https://easystats.github.io/bayestestR/reference/rope_range.html): This function attempts at automatically finding suitable “default” values for the Region Of Practical Equivalence (ROPE). Kruschke (2018) suggests that such null value could be set, by default, to a range from `-0.1` to `0.1` of a standardized parameter (negligible effect size according to Cohen, 1988), which can be generalised for linear models to `-0.1 * sd(y), 0.1 * sd(y)`. For logistic models, the parameters expressed in log odds ratio can be converted to standardized difference through the formula `sqrt(3)/pi`, resulting in a range of `-0.05` to `0.05`. ``` r rope_range(model) ``` ### Density Estimation [**`estimate_density()`**](https://easystats.github.io/bayestestR/reference/estimate_density.html): This function is a wrapper over different methods of density estimation. By default, it uses the base R `density` with by default uses a different smoothing bandwidth (`"SJ"`) from the legacy default implemented the base R `density` function (`"nrd0"`). However, Deng & Wickham suggest that `method = "KernSmooth"` is the fastest and the most accurate. ### Perfect Distributions [**`distribution()`**](https://easystats.github.io/bayestestR/reference/distribution.html): Generate a sample of size n with near-perfect distributions. ``` r distribution(n = 10) ## [1] -1.28 -0.88 -0.59 -0.34 -0.11 0.11 0.34 0.59 0.88 1.28 ``` ### Probability of a Value [**`density_at()`**](https://easystats.github.io/bayestestR/reference/density_at.html): Compute the density of a given point of a distribution. ``` r density_at(rnorm(1000, 1, 1), 1) ## [1] 0.39 ``` # References
Kruschke, J. K. (2015). *Doing Bayesian data analysis: A tutorial with R, JAGS, and Stan* (2. ed). Amsterdam: Elsevier, Academic Press.
Kruschke, J. K. (2018). Rejecting or accepting parameter values in Bayesian estimation. *Advances in Methods and Practices in Psychological Science*, *1*(2), 270–280.
Kruschke, J. K., & Liddell, T. M. (2018). The Bayesian new statistics: Hypothesis testing, estimation, meta-analysis, and power analysis from a Bayesian perspective. *Psychonomic Bulletin & Review*, *25*(1), 178–206.
McElreath, R. (2018). *Statistical rethinking*.
Wagenmakers, E.-J., Lodewyckx, T., Kuriyal, H., & Grasman, R. (2010). Bayesian hypothesis testing for psychologists: A tutorial on the SavageDickey method. *Cognitive Psychology*, *60*(3), 158–189.
bayestestR/man/0000755000176200001440000000000013616666250013165 5ustar liggesusersbayestestR/man/overlap.Rd0000644000176200001440000000236513571067532015130 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/overlap.R \name{overlap} \alias{overlap} \title{Overlap Coefficient} \usage{ overlap( x, y, method_density = "kernel", method_auc = "trapezoid", precision = 2^10, extend = TRUE, extend_scale = 0.1, ... ) } \arguments{ \item{x}{Vector of x values.} \item{y}{Vector of x values.} \item{method_density}{Density estimation method. See \code{\link{estimate_density}}.} \item{method_auc}{Area Under the Curve (AUC) estimation method. See \code{\link{area_under_curve}}.} \item{precision}{Number of points of density data. See the \code{n} parameter in \link[=density]{density}.} \item{extend}{Extend the range of the x axis by a factor of \code{extend_scale}.} \item{extend_scale}{Ratio of range by which to extend the x axis. A value of \code{0.1} means that the x axis will be extended by \code{1/10} of the range of the data.} \item{...}{Currently not used.} } \description{ A method to calculate the overlap coefficient between two empirical distributions (that can be used as a measure of similarity between two samples). } \examples{ library(bayestestR) x <- distribution_normal(1000, 2, 0.5) y <- distribution_normal(1000, 0, 1) overlap(x, y) plot(overlap(x, y)) } bayestestR/man/dot-prior_new_location.Rd0000644000176200001440000000050113620150257020116 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/sensitivity_to_prior.R \name{.prior_new_location} \alias{.prior_new_location} \title{Set a new location for a prior} \usage{ .prior_new_location(prior, sign, magnitude = 10) } \description{ Set a new location for a prior } \keyword{internal} bayestestR/man/p_significance.Rd0000644000176200001440000000722613616544116016420 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/p_significance.R \name{p_significance} \alias{p_significance} \alias{p_significance.numeric} \alias{p_significance.emmGrid} \alias{p_significance.stanreg} \alias{p_significance.brmsfit} \title{Practical Significance (ps)} \usage{ p_significance(x, ...) \method{p_significance}{numeric}(x, threshold = "default", ...) \method{p_significance}{emmGrid}(x, threshold = "default", ...) \method{p_significance}{stanreg}( x, threshold = "default", effects = c("fixed", "random", "all"), parameters = NULL, verbose = TRUE, ... ) \method{p_significance}{brmsfit}( x, threshold = "default", effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, verbose = TRUE, ... ) } \arguments{ \item{x}{Vector representing a posterior distribution. Can also be a \code{stanreg} or \code{brmsfit} model.} \item{...}{Currently not used.} \item{threshold}{The threshold value that separates significant from negligible effect. If \code{"default"}, the range is set to \code{0.1} if input is a vector, and based on \code{\link[=rope_range]{rope_range()}} if a Bayesian model is provided.} \item{effects}{Should results for fixed effects, random effects or both be returned? Only applies to mixed models. May be abbreviated.} \item{parameters}{Regular expression pattern that describes the parameters that should be returned. Meta-parameters (like \code{lp__} or \code{prior_}) are filtered by default, so only parameters that typically appear in the \code{summary()} are returned. Use \code{parameters} to select specific parameters for the output.} \item{verbose}{Toggle off warnings.} \item{component}{Should results for all parameters, parameters for the conditional model or the zero-inflated part of the model be returned? May be abbreviated. Only applies to \pkg{brms}-models.} } \value{ Values between 0 and 1 corresponding to the probability of practical significance (ps). } \description{ Compute the probability of \strong{Practical Significance} (\strong{\emph{ps}}), which can be conceptualized as a unidirectional equivalence test. It returns the probability that effect is above a given threshold corresponding to a negligible effect in the median's direction. Mathematically, it is defined as the proportion of the posterior distribution of the median sign above the threshold. } \details{ \code{p_significance()} returns the proportion of a probability distribution (\code{x}) that is outside a certain range (the negligible effect, or ROPE, see argument \code{threshold}). If there are values of the distribution both below and above the ROPE, \code{p_significance()} returns the higher probability of a value being outside the ROPE. Typically, this value should be larger than 0.5 to indicate practical significance. However, if the range of the negligible effect is rather large compared to the range of the probability distribution \code{x}, \code{p_significance()} will be less than 0.5, which indicates no clear practical significance. } \examples{ library(bayestestR) # Simulate a posterior distribution of mean 1 and SD 1 # ---------------------------------------------------- posterior <- rnorm(1000, mean = 1, sd = 1) p_significance(posterior) # Simulate a dataframe of posterior distributions # ----------------------------------------------- df <- data.frame(replicate(4, rnorm(100))) p_significance(df) \dontrun{ # rstanarm models # ----------------------------------------------- if (require("rstanarm")) { model <- rstanarm::stan_glm(mpg ~ wt + cyl, data = mtcars, chains = 2, refresh = 0 ) p_significance(model) } } } bayestestR/man/bayesfactor_parameters.Rd0000644000176200001440000002360213616544116020200 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/bayesfactor_parameters.R \name{bayesfactor_parameters} \alias{bayesfactor_parameters} \alias{bayesfactor_pointull} \alias{bayesfactor_rope} \alias{bf_parameters} \alias{bf_pointull} \alias{bf_rope} \alias{bayesfactor_parameters.numeric} \alias{bayesfactor_parameters.stanreg} \alias{bayesfactor_parameters.brmsfit} \alias{bayesfactor_parameters.emmGrid} \alias{bayesfactor_parameters.data.frame} \title{Bayes Factors (BF) for a Single Parameter} \usage{ bayesfactor_parameters( posterior, prior = NULL, direction = "two-sided", null = 0, verbose = TRUE, ... ) bayesfactor_pointull( posterior, prior = NULL, direction = "two-sided", null = 0, verbose = TRUE, ... ) bayesfactor_rope( posterior, prior = NULL, direction = "two-sided", null = rope_range(posterior), verbose = TRUE, ... ) bf_parameters( posterior, prior = NULL, direction = "two-sided", null = 0, verbose = TRUE, ... ) bf_pointull( posterior, prior = NULL, direction = "two-sided", null = 0, verbose = TRUE, ... ) bf_rope( posterior, prior = NULL, direction = "two-sided", null = rope_range(posterior), verbose = TRUE, ... ) \method{bayesfactor_parameters}{numeric}( posterior, prior = NULL, direction = "two-sided", null = 0, verbose = TRUE, ... ) \method{bayesfactor_parameters}{stanreg}( posterior, prior = NULL, direction = "two-sided", null = 0, verbose = TRUE, effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, ... ) \method{bayesfactor_parameters}{brmsfit}( posterior, prior = NULL, direction = "two-sided", null = 0, verbose = TRUE, effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, ... ) \method{bayesfactor_parameters}{emmGrid}( posterior, prior = NULL, direction = "two-sided", null = 0, verbose = TRUE, ... ) \method{bayesfactor_parameters}{data.frame}( posterior, prior = NULL, direction = "two-sided", null = 0, verbose = TRUE, ... ) } \arguments{ \item{posterior}{A numerical vector, \code{stanreg} / \code{brmsfit} object, \code{emmGrid} or a data frame - representing a posterior distribution(s) from (see 'Details').} \item{prior}{An object representing a prior distribution (see 'Details').} \item{direction}{Test type (see 'Details'). One of \code{0}, \code{"two-sided"} (default, two tailed), \code{-1}, \code{"left"} (left tailed) or \code{1}, \code{"right"} (right tailed).} \item{null}{Value of the null, either a scaler (for point-null) or a a range (for a interval-null).} \item{verbose}{Toggle off warnings.} \item{...}{Currently not used.} \item{effects}{Should results for fixed effects, random effects or both be returned? Only applies to mixed models. May be abbreviated.} \item{component}{Should results for all parameters, parameters for the conditional model or the zero-inflated part of the model be returned? May be abbreviated. Only applies to \pkg{brms}-models.} \item{parameters}{Regular expression pattern that describes the parameters that should be returned. Meta-parameters (like \code{lp__} or \code{prior_}) are filtered by default, so only parameters that typically appear in the \code{summary()} are returned. Use \code{parameters} to select specific parameters for the output.} } \value{ A data frame containing the Bayes factor representing evidence \emph{against} the null. } \description{ This method computes Bayes factors against the null (either a point or an interval), based on prior and posterior samples of a single parameter. This Bayes factor indicates the degree by which the mass of the posterior distribution has shifted further away from or closer to the null value(s) (relative to the prior distribution), thus indicating if the null value has become less or more likely given the observed data. \cr \cr When the null is an interval, the Bayes factor is computed by comparing the prior and posterior odds of the parameter falling within or outside the null interval (Morey & Rouder, 2011; Liao et al., 2020); When the null is a point, a Savage-Dickey density ratio is computed, which is also an approximation of a Bayes factor comparing the marginal likelihoods of the model against a model in which the tested parameter has been restricted to the point null (Wagenmakers et al., 2010; Heck, 2019). \cr \cr Note that the \code{logspline} package is used for estimating densities and probabilies, and must be installed for the function to work. \cr \cr \code{bayesfactor_pointnull()} and \code{bayesfactor_rope()} are wrappers around \code{bayesfactor_parameters} with different defaults for the null to be tested against (a point and a range, respectively). Aliases of the main functions are prefixed with \code{bf_*}, like \code{bf_parameters()} or \code{bf_pointnull()} \cr \cr \strong{For more info, in particular on specifying correct priors for factors with more than 2 levels, see \href{https://easystats.github.io/bayestestR/articles/bayes_factors.html}{the Bayes factors vignette}.} } \details{ This method is used to compute Bayes factors based on prior and posterior distributions. \cr\cr For the computation of Bayes factors, the model priors must be proper priors (at the very least they should be \emph{not flat}, and it is preferable that they be \emph{informative}); As the priors for the alternative get wider, the likelihood of the null value(s) increases, to the extreme that for completely flat priors the null is infinitely more favorable than the alternative (this is called \emph{the Jeffreys-Lindley-Bartlett paradox}). Thus, you should only ever try (or want) to compute a Bayes factor when you have an informed prior. \cr\cr (Note that by default, \code{brms::brm()} uses flat priors for fixed-effects; See example below.) \subsection{Setting the correct \code{prior}}{ It is important to provide the correct \code{prior} for meaningful results. \itemize{ \item When \code{posterior} is a numerical vector, \code{prior} should also be a numerical vector. \item When \code{posterior} is a \code{data.frame}, \code{prior} should also be a \code{data.frame}, with matching column order. \item When \code{posterior} is a \code{stanreg} or \code{brmsfit} model: \itemize{ \item \code{prior} can be set to \code{NULL}, in which case prior samples are drawn internally. \item \code{prior} can also be a model equvilant to \code{posterior} but with samples from the priors \emph{only}. } \item When \code{posterior} is an \code{emmGrid} object: \itemize{ \item \code{prior} should be the \code{stanreg} or \code{brmsfit} model used to create the \code{emmGrid} objects. \item \code{prior} can also be an \code{emmGrid} object equvilant to \code{posterior} but created with a model of priors samples \emph{only}. } }} \subsection{One-sided Tests (setting an order restriction)}{ One sided tests (controlled by \code{direction}) are conducted by restricting the prior and posterior of the non-null values (the "alternative") to one side of the null only (\cite{Morey & Wagenmakers, 2014}). For example, if we have a prior hypothesis that the parameter should be positive, the alternative will be restricted to the region to the right of the null (point or interval). } \subsection{Interpreting Bayes Factors}{ A Bayes factor greater than 1 can be interpereted as evidence against the null, at which one convention is that a Bayes factor greater than 3 can be considered as "substantial" evidence against the null (and vice versa, a Bayes factor smaller than 1/3 indicates substantial evidence in favor of the null-model) (\cite{Wetzels et al. 2011}). } } \examples{ library(bayestestR) prior <- distribution_normal(1000, mean = 0, sd = 1) posterior <- distribution_normal(1000, mean = .5, sd = .3) bayesfactor_parameters(posterior, prior) \dontrun{ # rstanarm models # --------------- if (require("rstanarm") &6 require("emmeans")) { contrasts(sleep$group) <- contr.bayes # see vingette stan_model <- stan_lmer(extra ~ group + (1 | ID), data = sleep) bayesfactor_parameters(stan_model) bayesfactor_parameters(stan_model, null = rope_range(stan_model)) # emmGrid objects # --------------- group_diff <- pairs(emmeans(stan_model, ~group)) bayesfactor_parameters(group_diff, prior = stan_model) } # brms models # ----------- if (require("brms")) { contrasts(sleep$group) <- contr.bayes # see vingette my_custom_priors <- set_prior("student_t(3, 0, 1)", class = "b") + set_prior("student_t(3, 0, 1)", class = "sd", group = "ID") brms_model <- brm(extra ~ group + (1 | ID), data = sleep, prior = my_custom_priors ) bayesfactor_parameters(brms_model) } } } \references{ \itemize{ \item Wagenmakers, E. J., Lodewyckx, T., Kuriyal, H., and Grasman, R. (2010). Bayesian hypothesis testing for psychologists: A tutorial on the Savage-Dickey method. Cognitive psychology, 60(3), 158-189. \item Heck, D. W. (2019). A caveat on the Savage–Dickey density ratio: The case of computing Bayes factors for regression parameters. British Journal of Mathematical and Statistical Psychology, 72(2), 316-333. \item Morey, R. D., & Wagenmakers, E. J. (2014). Simple relation between Bayesian order-restricted and point-null hypothesis tests. Statistics & Probability Letters, 92, 121-124. \item Morey, R. D., & Rouder, J. N. (2011). Bayes factor approaches for testing interval null hypotheses. Psychological methods, 16(4), 406. \item Liao, J. G., Midya, V., & Berg, A. (2020). Connecting and contrasting the Bayes factor and a modified ROPE procedure for testing interval null hypotheses. The American Statistician, 1-19. \item Wetzels, R., Matzke, D., Lee, M. D., Rouder, J. N., Iverson, G. J., and Wagenmakers, E.-J. (2011). Statistical Evidence in Experimental Psychology: An Empirical Comparison Using 855 t Tests. Perspectives on Psychological Science, 6(3), 291–298. \doi{10.1177/1745691611406923} } } \author{ Mattan S. Ben-Shachar } bayestestR/man/dot-select_nums.Rd0000644000176200001440000000036513620150257016553 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utils.R \name{.select_nums} \alias{.select_nums} \title{select numerics columns} \usage{ .select_nums(x) } \description{ select numerics columns } \keyword{internal} bayestestR/man/bayesfactor.Rd0000644000176200001440000000554313616544116015761 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/bayesfactor.R \name{bayesfactor} \alias{bayesfactor} \title{Bayes Factors (BF)} \usage{ bayesfactor( ..., prior = NULL, direction = "two-sided", null = 0, hypothesis = NULL, effects = c("fixed", "random", "all"), verbose = TRUE, denominator = 1, match_models = FALSE, prior_odds = NULL ) } \arguments{ \item{...}{A numeric vector, model object(s), or the output from \code{bayesfactor_models}.} \item{prior}{An object representing a prior distribution (see 'Details').} \item{direction}{Test type (see 'Details'). One of \code{0}, \code{"two-sided"} (default, two tailed), \code{-1}, \code{"left"} (left tailed) or \code{1}, \code{"right"} (right tailed).} \item{null}{Value of the null, either a scaler (for point-null) or a a range (for a interval-null).} \item{hypothesis}{A character vector specifying the restrictions as logical conditions (see examples below).} \item{effects}{Should results for fixed effects, random effects or both be returned? Only applies to mixed models. May be abbreviated.} \item{verbose}{Toggle off warnings.} \item{denominator}{Either an integer indicating which of the models to use as the denominator, or a model to be used as a denominator. Ignored for \code{BFBayesFactor}.} \item{match_models}{See details.} \item{prior_odds}{Optional vector of prior odds for the models. See \code{BayesFactor::priorOdds<-}.} } \value{ Some type of Bayes factor, depending on the input. See \code{\link{bayesfactor_parameters}}, \code{\link{bayesfactor_models}} or \code{\link{bayesfactor_inclusion}} } \description{ This function compte the Bayes factors (BFs) that are appropriate to the input. For vectors or single models, it will compute \code{\link[=bayesfactor_parameters]{BFs for single parameters}}, or is \code{hypothesis} is specified, \code{\link[=bayesfactor_restricted]{BFs for restricted models}}. For multiple models, it will return the BF corresponding to \code{\link[=bayesfactor_models]{comparison between models}} and if a model comparison is passed, it will compute the \code{\link[=bayesfactor_inclusion]{inclusion BF}}. \cr\cr For a complete overview of these functions, read the \href{https://easystats.github.io/bayestestR/articles/bayes_factors.html}{Bayes factor vignette}. } \examples{ library(bayestestR) # Vectors prior <- distribution_normal(1000, mean = 0, sd = 1) posterior <- distribution_normal(1000, mean = .5, sd = .3) bayesfactor(posterior, prior = prior) \dontrun{ # rstanarm models # --------------- if (require("rstanarm")) { model <- stan_lmer(extra ~ group + (1 | ID), data = sleep) bayesfactor(model) } } # Frequentist models # --------------- m0 <- lm(extra ~ 1, data = sleep) m1 <- lm(extra ~ group, data = sleep) m2 <- lm(extra ~ group + ID, data = sleep) comparison <- bayesfactor(m0, m1, m2) comparison bayesfactor(comparison) } bayestestR/man/mhdior.Rd0000644000176200001440000001056213616544116014736 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mhdior.R \name{mhdior} \alias{mhdior} \alias{mhdior.numeric} \alias{mhdior.data.frame} \alias{mhdior.emmGrid} \alias{mhdior.BFBayesFactor} \alias{mhdior.stanreg} \alias{mhdior.brmsfit} \title{Maximum HDI level inside/outside ROPE (MHDIOR)} \usage{ mhdior(x, ...) \method{mhdior}{numeric}(x, range = "default", precision = 0.1, ...) \method{mhdior}{data.frame}(x, range = "default", precision = 0.1, ...) \method{mhdior}{emmGrid}(x, range = "default", precision = 0.1, ...) \method{mhdior}{BFBayesFactor}(x, range = "default", precision = 0.1, ...) \method{mhdior}{stanreg}( x, range = "default", precision = 0.1, effects = c("fixed", "random", "all"), parameters = NULL, ... ) \method{mhdior}{brmsfit}( x, range = "default", precision = 0.1, effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, ... ) } \arguments{ \item{x}{Vector representing a posterior distribution. Can also be a \code{stanreg} or \code{brmsfit} model.} \item{...}{Currently not used.} \item{range}{ROPE's lower and higher bounds. Should be a vector of length two (e.g., \code{c(-0.1, 0.1)}) or \code{"default"}. If \code{"default"}, the range is set to \code{c(-0.1, 0.1)} if input is a vector, and based on \code{\link[=rope_range]{rope_range()}} if a Bayesian model is provided.} \item{precision}{The precision by which to explore the ROPE space (in percentage). Lower values increase the precision of the returned p value but can be quite computationaly costly.} \item{effects}{Should results for fixed effects, random effects or both be returned? Only applies to mixed models. May be abbreviated.} \item{parameters}{Regular expression pattern that describes the parameters that should be returned. Meta-parameters (like \code{lp__} or \code{prior_}) are filtered by default, so only parameters that typically appear in the \code{summary()} are returned. Use \code{parameters} to select specific parameters for the output.} \item{component}{Should results for all parameters, parameters for the conditional model or the zero-inflated part of the model be returned? May be abbreviated. Only applies to \pkg{brms}-models.} } \description{ The MHDIOR (pronounced 'em-eich-dior') is an exploratory and non-validated index representing the maximum percentage of \link[=hdi]{HDI} that does not contain (or is entirely contained, in which case the value is prefixed with a negative sign), in the negligible values space defined by the \link[=rope]{ROPE}. It differs from the ROPE percentage, \emph{i.e.}, from the proportion of a given CI in the ROPE, as it represents the maximum CI values needed to reach a ROPE proportion of 0\% or 100\%. Whether the index reflects the ROPE reaching 0\% or 100\% is indicated through the sign: a negative sign is added to indicate that the probability corresponds to the probability of a not significant effect (a percentage in ROPE of 100\%). For instance, a MHDIOR of 97\% means that there is a probability of .97 that a parameter (described by its posterior distribution) is outside the ROPE. In other words, the 97\% HDI is the maximum HDI level for which the percentage in ROPE is 0\%. On the contrary, a ROPE-based p of -97\% indicates that there is a probability of .97 that the parameter is inside the ROPE (percentage in ROPE of 100\%). A value close to 0\% would indicate that the mode of the distribution falls perfectly at the edge of the ROPE, in which case the percentage of HDI needed to be on either side of the ROPE becomes infinitely small. Negative values do not refer to negative values \emph{per se}, simply indicating that the value corresponds to non-significance rather than significance. } \examples{ \dontrun{ library(bayestestR) # precision = 1 is used to speed up examples... mhdior( x = rnorm(1000, mean = 1, sd = 1), range = c(-0.1, 0.1), precision = 1 ) df <- data.frame(replicate(4, rnorm(100))) mhdior(df, precision = 1) if (require("rstanarm")) { model <- stan_glm( mpg ~ wt + gear, data = mtcars, chains = 2, iter = 200, refresh = 0 ) mhdior(model, precision = 1) } if (require("emmeans")) { mhdior(emtrends(model, ~1, "wt")) } if (require("brms")) { model <- brms::brm(mpg ~ wt + cyl, data = mtcars) mhdior(model) } if (require("BayesFactor")) { bf <- ttestBF(x = rnorm(100, 1, 1)) mhdior(bf) } } } bayestestR/man/ci.Rd0000644000176200001440000001117513616544116014050 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ci.R \name{ci} \alias{ci} \alias{ci.numeric} \alias{ci.data.frame} \alias{ci.emmGrid} \alias{ci.sim.merMod} \alias{ci.sim} \alias{ci.stanreg} \alias{ci.brmsfit} \alias{ci.BFBayesFactor} \alias{ci.MCMCglmm} \title{Confidence/Credible/Compatibility Interval (CI)} \usage{ ci(x, ...) \method{ci}{numeric}(x, ci = 0.89, method = "ETI", verbose = TRUE, BF = 1, ...) \method{ci}{data.frame}(x, ci = 0.89, method = "ETI", verbose = TRUE, BF = 1, ...) \method{ci}{emmGrid}(x, ci = 0.89, method = "ETI", verbose = TRUE, BF = 1, ...) \method{ci}{sim.merMod}( x, ci = 0.89, method = "ETI", effects = c("fixed", "random", "all"), parameters = NULL, verbose = TRUE, ... ) \method{ci}{sim}(x, ci = 0.89, method = "ETI", parameters = NULL, verbose = TRUE, ...) \method{ci}{stanreg}( x, ci = 0.89, method = "ETI", effects = c("fixed", "random", "all"), parameters = NULL, verbose = TRUE, BF = 1, ... ) \method{ci}{brmsfit}( x, ci = 0.89, method = "ETI", effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, verbose = TRUE, BF = 1, ... ) \method{ci}{BFBayesFactor}(x, ci = 0.89, method = "ETI", verbose = TRUE, BF = 1, ...) \method{ci}{MCMCglmm}(x, ci = 0.89, method = "ETI", verbose = TRUE, ...) } \arguments{ \item{x}{A \code{stanreg} or \code{brmsfit} model, or a vector representing a posterior distribution.} \item{...}{Currently not used.} \item{ci}{Value or vector of probability of the CI (between 0 and 1) to be estimated. Default to \code{.89} (89\%) for Bayesian models and \code{.95} (95\%) for frequentist models.} \item{method}{Can be \link[=eti]{'ETI'} (default), \link[=hdi]{'HDI'} or \link[=si]{'SI'}.} \item{verbose}{Toggle off warnings.} \item{BF}{The amount of support required to be included in the support interval.} \item{effects}{Should results for fixed effects, random effects or both be returned? Only applies to mixed models. May be abbreviated.} \item{parameters}{Regular expression pattern that describes the parameters that should be returned. Meta-parameters (like \code{lp__} or \code{prior_}) are filtered by default, so only parameters that typically appear in the \code{summary()} are returned. Use \code{parameters} to select specific parameters for the output.} \item{component}{Should results for all parameters, parameters for the conditional model or the zero-inflated part of the model be returned? May be abbreviated. Only applies to \pkg{brms}-models.} } \value{ A data frame with following columns: \itemize{ \item \code{Parameter} The model parameter(s), if \code{x} is a model-object. If \code{x} is a vector, this column is missing. \item \code{CI} The probability of the credible interval. \item \code{CI_low}, \code{CI_high} The lower and upper credible interval limits for the parameters. } } \description{ Compute Confidence/Credible/Compatibility Intervals (CI) or Support Intervals (SI) for Bayesian and frequentist models. The Documentation is accessible for: } \details{ \itemize{ \item \href{https://easystats.github.io/bayestestR/articles/credible_interval.html}{Bayesian models} \item \href{https://easystats.github.io/parameters/reference/ci.merMod.html}{Frequentist models} } } \note{ When it comes to interpretation, we recommend thinking of the CI in terms of an "uncertainty" or "compatibility" interval, the latter being defined as \dQuote{Given any value in the interval and the background assumptions, the data should not seem very surprising} (\cite{Gelman & Greenland 2019}). } \examples{ library(bayestestR) posterior <- rnorm(1000) ci(posterior, method = "ETI") ci(posterior, method = "HDI") df <- data.frame(replicate(4, rnorm(100))) ci(df, method = "ETI", ci = c(.80, .89, .95)) ci(df, method = "HDI", ci = c(.80, .89, .95)) \dontrun{ if (require("rstanarm")) { model <- stan_glm(mpg ~ wt, data = mtcars, chains = 2, iter = 200, refresh = 0) ci(model, method = "ETI", ci = c(.80, .89)) ci(model, method = "HDI", ci = c(.80, .89)) ci(model, method = "SI") } if (require("brms")) { model <- brms::brm(mpg ~ wt + cyl, data = mtcars) ci(model, method = "ETI") ci(model, method = "HDI") ci(model, method = "SI") } if (require("BayesFactor")) { bf <- ttestBF(x = rnorm(100, 1, 1)) ci(bf, method = "ETI") ci(bf, method = "HDI") } if (require("emmeans")) { model <- emtrends(model, ~1, "wt") ci(model, method = "ETI") ci(model, method = "HDI") ci(model, method = "SI") } } } \references{ Gelman A, Greenland S. Are confidence intervals better termed "uncertainty intervals"? BMJ 2019;l5381. \doi{10.1136/bmj.l5381} } bayestestR/man/pd_to_p.Rd0000644000176200001440000000164013571067531015076 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/convert_pd_to_p.R \name{pd_to_p} \alias{pd_to_p} \alias{p_to_pd} \alias{convert_p_to_pd} \alias{convert_pd_to_p} \title{Convert between Probability of Direction (pd) and p-value.} \usage{ pd_to_p(pd, direction = "two-sided", ...) p_to_pd(p, direction = "two-sided", ...) convert_p_to_pd(p, direction = "two-sided", ...) convert_pd_to_p(pd, direction = "two-sided", ...) } \arguments{ \item{pd}{A Probability of Direction (pd) value (between 0 and 1).} \item{direction}{What type of p-value is requested or provided. Can be \code{"two-sided"} (default, two tailed) or \code{"one-sided"} (one tailed).} \item{...}{Arguments passed to or from other methods.} \item{p}{A p-value.} } \description{ Enables a conversion between sProbability of Direction (pd) and p-value. } \examples{ pd_to_p(pd = 0.95) pd_to_p(pd = 0.95, direction = "one-sided") } bayestestR/man/update.bayesfactor_models.Rd0000644000176200001440000000204313616544116020575 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/update.bayesfactor_models.R \name{update.bayesfactor_models} \alias{update.bayesfactor_models} \title{Update bayesfactor_models} \usage{ \method{update}{bayesfactor_models}(object, subset = NULL, reference = NULL, ...) } \arguments{ \item{object}{A \code{\link{bayesfactor_models}} object.} \item{subset}{Vector of model indices to keep or remove.} \item{reference}{Index of model to rereference to, or \code{"top"} to reference to the best model, or \code{"bottom"} to reference to the worst model.} \item{...}{Currently not used.} } \description{ Update bayesfactor_models } \examples{ \dontrun{ library(lme4) lmer1 <- lmer(Sepal.Length ~ Petal.Length + (1 | Species), data = iris) lmer2 <- lmer(Sepal.Length ~ Petal.Length + (Petal.Length | Species), data = iris) lmer3 <- lmer( Sepal.Length ~ Petal.Length + (Petal.Length | Species) + (1 | Petal.Width), data = iris ) m <- bayesfactor_models(lmer1, lmer2, lmer3, denominator = 1) m update(m, reference = "bottom") } } bayestestR/man/point_estimate.Rd0000644000176200001440000000704513603652205016475 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/point_estimate.R \name{point_estimate} \alias{point_estimate} \alias{point_estimate.stanreg} \alias{point_estimate.brmsfit} \alias{point_estimate.BFBayesFactor} \title{Point-estimates of posterior distributions} \usage{ point_estimate(x, centrality = "all", dispersion = FALSE, ...) \method{point_estimate}{stanreg}( x, centrality = "all", dispersion = FALSE, effects = c("fixed", "random", "all"), parameters = NULL, ... ) \method{point_estimate}{brmsfit}( x, centrality = "all", dispersion = FALSE, effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, ... ) \method{point_estimate}{BFBayesFactor}(x, centrality = "all", dispersion = FALSE, ...) } \arguments{ \item{x}{Vector representing a posterior distribution. Can also be a \code{stanreg}, \code{brmsfit} or a \code{BayesFactor} model.} \item{centrality}{The point-estimates (centrality indices) to compute. Character (vector) or list with one or more of these options: \code{"median"}, \code{"mean"}, \code{"MAP"} or \code{"all"}.} \item{dispersion}{Logical, if \code{TRUE}, computes indices of dispersion related to the estimate(s) (\code{SD} and \code{MAD} for \code{mean} and \code{median}, respectively).} \item{...}{Additional arguments to be passed to or from methods.} \item{effects}{Should results for fixed effects, random effects or both be returned? Only applies to mixed models. May be abbreviated.} \item{parameters}{Regular expression pattern that describes the parameters that should be returned. Meta-parameters (like \code{lp__} or \code{prior_}) are filtered by default, so only parameters that typically appear in the \code{summary()} are returned. Use \code{parameters} to select specific parameters for the output.} \item{component}{Should results for all parameters, parameters for the conditional model or the zero-inflated part of the model be returned? May be abbreviated. Only applies to \pkg{brms}-models.} } \description{ Compute various point-estimates, such as the mean, the median or the MAP, to describe posterior distributions. } \examples{ library(bayestestR) point_estimate(rnorm(1000)) point_estimate(rnorm(1000), centrality = "all", dispersion = TRUE) point_estimate(rnorm(1000), centrality = c("median", "MAP")) df <- data.frame(replicate(4, rnorm(100))) point_estimate(df, centrality = "all", dispersion = TRUE) point_estimate(df, centrality = c("median", "MAP")) \dontrun{ # rstanarm models # ----------------------------------------------- library(rstanarm) model <- rstanarm::stan_glm(mpg ~ wt + cyl, data = mtcars) point_estimate(model, centrality = "all", dispersion = TRUE) point_estimate(model, centrality = c("median", "MAP")) # emmeans estimates # ----------------------------------------------- library(emmeans) point_estimate(emtrends(model, ~1, "wt"), centrality = c("median", "MAP")) # brms models # ----------------------------------------------- library(brms) model <- brms::brm(mpg ~ wt + cyl, data = mtcars) point_estimate(model, centrality = "all", dispersion = TRUE) point_estimate(model, centrality = c("median", "MAP")) # BayesFactor objects # ----------------------------------------------- library(BayesFactor) bf <- ttestBF(x = rnorm(100, 1, 1)) point_estimate(bf, centrality = "all", dispersion = TRUE) point_estimate(bf, centrality = c("median", "MAP")) } } \references{ \href{https://easystats.github.io/bayestestR/articles/indicesEstimationComparison.html}{Vignette In-Depth 1: Comparison of Point-Estimates} } bayestestR/man/area_under_curve.Rd0000644000176200001440000000277213571067531016772 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/area_under_curve.R \name{area_under_curve} \alias{area_under_curve} \alias{auc} \title{Area under the Curve (AUC)} \usage{ area_under_curve(x, y, method = c("trapezoid", "step", "spline"), ...) auc(x, y, method = c("trapezoid", "step", "spline"), ...) } \arguments{ \item{x}{Vector of x values.} \item{y}{Vector of y values.} \item{method}{Method to compute the Area Under the Curve (AUC). Can be \code{"trapezoid"} (default), \code{"step"} or \code{"spline"}. If "trapezoid", the curve is formed by connecting all points by a direct line (composite trapezoid rule). If "step" is chosen then a stepwise connection of two points is used. For calculating the area under a spline interpolation the splinefun function is used in combination with integrate.} \item{...}{Arguments passed to or from other methods.} } \description{ Based on the DescTools \code{AUC} function. It can calculate the area under the curve with a naive algorithm or a more elaborated spline approach. The curve must be given by vectors of xy-coordinates. This function can handle unsorted x values (by sorting x) and ties for the x values (by ignoring duplicates). } \examples{ library(bayestestR) posterior <- distribution_normal(1000) dens <- estimate_density(posterior) dens <- dens[dens$x > 0, ] x <- dens$x y <- dens$y area_under_curve(x, y, method = "trapezoid") area_under_curve(x, y, method = "step") area_under_curve(x, y, method = "spline") } \seealso{ DescTools } bayestestR/man/p_map.Rd0000644000176200001440000000775313616544116014560 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/p_map.R \name{p_map} \alias{p_map} \alias{p_pointnull} \alias{p_map.stanreg} \alias{p_map.brmsfit} \title{Bayesian p-value based on the density at the Maximum A Posteriori (MAP)} \usage{ p_map(x, precision = 2^10, method = "kernel", ...) p_pointnull(x, precision = 2^10, method = "kernel", ...) \method{p_map}{stanreg}( x, precision = 2^10, method = "kernel", effects = c("fixed", "random", "all"), parameters = NULL, ... ) \method{p_map}{brmsfit}( x, precision = 2^10, method = "kernel", effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, ... ) } \arguments{ \item{x}{Vector representing a posterior distribution. Can also be a \code{stanreg}, \code{brmsfit} or a \code{BayesFactor} model.} \item{precision}{Number of points of density data. See the \code{n} parameter in \link[=density]{density}.} \item{method}{Density estimation method. Can be \code{"kernel"} (default), \code{"logspline"} or \code{"KernSmooth"}.} \item{...}{Currently not used.} \item{effects}{Should results for fixed effects, random effects or both be returned? Only applies to mixed models. May be abbreviated.} \item{parameters}{Regular expression pattern that describes the parameters that should be returned. Meta-parameters (like \code{lp__} or \code{prior_}) are filtered by default, so only parameters that typically appear in the \code{summary()} are returned. Use \code{parameters} to select specific parameters for the output.} \item{component}{Should results for all parameters, parameters for the conditional model or the zero-inflated part of the model be returned? May be abbreviated. Only applies to \pkg{brms}-models.} } \description{ Compute a Bayesian equivalent of the \emph{p}-value, related to the odds that a parameter (described by its posterior distribution) has against the null hypothesis (\emph{h0}) using Mills' (2014, 2017) \emph{Objective Bayesian Hypothesis Testing} framework. It corresponds to the density value at 0 divided by the density at the Maximum A Posteriori (MAP). } \details{ Note that this method is sensitive to the density estimation \code{method} (see the secion in the examples below). \subsection{Strengths and Limitations}{ \strong{Strengths:} Straightforward computation. Objective property of the posterior distribution. \cr \cr \strong{Limitations:} Limited information favoring the null hypothesis. Relates on density approximation. Indirect relationship between mathematical definition and interpretation. Only suitable for weak / very diffused priors. } } \examples{ library(bayestestR) p_map(rnorm(1000, 0, 1)) p_map(rnorm(1000, 10, 1)) \dontrun{ library(rstanarm) model <- stan_glm(mpg ~ wt + gear, data = mtcars, chains = 2, iter = 200, refresh = 0) p_map(model) library(emmeans) p_map(emtrends(model, ~1, "wt")) library(brms) model <- brms::brm(mpg ~ wt + cyl, data = mtcars) p_map(model) library(BayesFactor) bf <- ttestBF(x = rnorm(100, 1, 1)) p_map(bf) } \donttest{ # --------------------------------------- # Robustness to density estimation method set.seed(333) data <- data.frame() for (iteration in 1:250) { x <- rnorm(1000, 1, 1) result <- data.frame( "Kernel" = p_map(x, method = "kernel"), "KernSmooth" = p_map(x, method = "KernSmooth"), "logspline" = p_map(x, method = "logspline") ) data <- rbind(data, result) } data$KernSmooth <- data$Kernel - data$KernSmooth data$logspline <- data$Kernel - data$logspline summary(data$KernSmooth) summary(data$logspline) boxplot(data[c("KernSmooth", "logspline")]) } } \references{ \itemize{ \item Makowski D, Ben-Shachar MS, Chen SHA, Lüdecke D (2019) Indices of Effect Existence and Significance in the Bayesian Framework. Frontiers in Psychology 2019;10:2767. \doi{10.3389/fpsyg.2019.02767} \item Mills, J. A. (2018). Objective Bayesian Precise Hypothesis Testing. University of Cincinnati. } } \seealso{ \href{https://www.youtube.com/watch?v=Ip8Ci5KUVRc}{Jeff Mill's talk} } bayestestR/man/equivalence_test.Rd0000644000176200001440000001724513571067531017022 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/equivalence_test.R \name{equivalence_test} \alias{equivalence_test} \alias{equivalence_test.default} \alias{equivalence_test.numeric} \alias{equivalence_test.data.frame} \alias{equivalence_test.emmGrid} \alias{equivalence_test.BFBayesFactor} \alias{equivalence_test.stanreg} \alias{equivalence_test.brmsfit} \title{Test for Practical Equivalence} \usage{ equivalence_test(x, ...) \method{equivalence_test}{default}(x, ...) \method{equivalence_test}{numeric}(x, range = "default", ci = 0.89, verbose = TRUE, ...) \method{equivalence_test}{data.frame}(x, range = "default", ci = 0.89, verbose = TRUE, ...) \method{equivalence_test}{emmGrid}(x, range = "default", ci = 0.89, verbose = TRUE, ...) \method{equivalence_test}{BFBayesFactor}(x, range = "default", ci = 0.89, verbose = TRUE, ...) \method{equivalence_test}{stanreg}( x, range = "default", ci = 0.89, effects = c("fixed", "random", "all"), parameters = NULL, verbose = TRUE, ... ) \method{equivalence_test}{brmsfit}( x, range = "default", ci = 0.89, effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, verbose = TRUE, ... ) } \arguments{ \item{x}{Vector representing a posterior distribution. Can also be a \code{stanreg} or \code{brmsfit} model.} \item{...}{Currently not used.} \item{range}{ROPE's lower and higher bounds. Should be a vector of length two (e.g., \code{c(-0.1, 0.1)}) or \code{"default"}. If \code{"default"}, the range is set to \code{c(-0.1, 0.1)} if input is a vector, and based on \code{\link[=rope_range]{rope_range()}} if a Bayesian model is provided.} \item{ci}{The Credible Interval (CI) probability, corresponding to the proportion of HDI, to use for the percentage in ROPE.} \item{verbose}{Toggle off warnings.} \item{effects}{Should results for fixed effects, random effects or both be returned? Only applies to mixed models. May be abbreviated.} \item{parameters}{Regular expression pattern that describes the parameters that should be returned. Meta-parameters (like \code{lp__} or \code{prior_}) are filtered by default, so only parameters that typically appear in the \code{summary()} are returned. Use \code{parameters} to select specific parameters for the output.} \item{component}{Should results for all parameters, parameters for the conditional model or the zero-inflated part of the model be returned? May be abbreviated. Only applies to \pkg{brms}-models.} } \value{ A data frame with following columns: \itemize{ \item \code{Parameter} The model parameter(s), if \code{x} is a model-object. If \code{x} is a vector, this column is missing. \item \code{CI} The probability of the HDI. \item \code{ROPE_low}, \code{ROPE_high} The limits of the ROPE. These values are identical for all parameters. \item \code{ROPE_Percentage} The proportion of the HDI that lies inside the ROPE. \item \code{ROPE_Equivalence} The "test result", as character. Either "rejected", "accepted" or "undecided". \item \code{HDI_low} , \code{HDI_high} The lower and upper HDI limits for the parameters. } } \description{ Perform a \strong{Test for Practical Equivalence} for Bayesian and frequentist models. } \details{ Documentation is accessible for: \itemize{ \item \href{https://easystats.github.io/bayestestR/reference/equivalence_test.html}{Bayesian models} \item \href{https://easystats.github.io/parameters/reference/equivalence_test.lm.html}{Frequentist models} } For Bayesian models, the \strong{Test for Practical Equivalence} is based on the \emph{"HDI+ROPE decision rule"} (\cite{Kruschke, 2014, 2018}) to check whether parameter values should be accepted or rejected against an explicitly formulated "null hypothesis" (i.e., a ROPE). In other words, it checks the percentage of the 89\% \link[=hdi]{HDI} that is the null region (the ROPE). If this percentage is sufficiently low, the null hypothesis is rejected. If this percentage is sufficiently high, the null hypothesis is accepted. Using the \link[=rope]{ROPE} and the \link[=hdi]{HDI}, \cite{Kruschke (2018)} suggests using the percentage of the 95\% (or 89\%, considered more stable) HDI that falls within the ROPE as a decision rule. If the HDI is completely outside the ROPE, the "null hypothesis" for this parameter is "rejected". If the ROPE completely covers the HDI, i.e., all most credible values of a parameter are inside the region of practical equivalence, the null hypothesis is accepted. Else, it’s undecided whether to accept or reject the null hypothesis. If the full ROPE is used (i.e., 100\% of the HDI), then the null hypothesis is rejected or accepted if the percentage of the posterior within the ROPE is smaller than to 2.5\% or greater than 97.5\%. Desirable results are low proportions inside the ROPE (the closer to zero the better). \cr \cr Some attention is required for finding suitable values for the ROPE limits (argument \code{range}). See 'Details' in \code{\link[=rope_range]{rope_range()}} for further information. \cr \cr \strong{Multicollinearity: Non-independent covariates} \cr \cr When parameters show strong correlations, i.e. when covariates are not independent, the joint parameter distributions may shift towards or away from the ROPE. In such cases, the test for practical equivalence may have inappropriate results. Collinearity invalidates ROPE and hypothesis testing based on univariate marginals, as the probabilities are conditional on independence. Most problematic are the results of the "undecided" parameters, which may either move further towards "rejection" or away from it (\cite{Kruschke 2014, 340f}). \cr \cr \code{equivalence_test()} performs a simple check for pairwise correlations between parameters, but as there can be collinearity between more than two variables, a first step to check the assumptions of this hypothesis testing is to look at different pair plots. An even more sophisticated check is the projection predictive variable selection (\cite{Piironen and Vehtari 2017}). } \note{ There is a \code{print()}-method with a \code{digits}-argument to control the amount of digits in the output, and there is a \code{plot()}-method to visualize the results from the equivalence-test (for models only). } \examples{ library(bayestestR) equivalence_test(x = rnorm(1000, 0, 0.01), range = c(-0.1, 0.1)) equivalence_test(x = rnorm(1000, 0, 1), range = c(-0.1, 0.1)) equivalence_test(x = rnorm(1000, 1, 0.01), range = c(-0.1, 0.1)) equivalence_test(x = rnorm(1000, 1, 1), ci = c(.50, .99)) # print more digits test <- equivalence_test(x = rnorm(1000, 1, 1), ci = c(.50, .99)) print(test, digits = 4) \dontrun{ library(rstanarm) model <- rstanarm::stan_glm(mpg ~ wt + cyl, data = mtcars) equivalence_test(model) equivalence_test(model, ci = c(.50, 1)) # plot result test <- equivalence_test(model) plot(test) library(emmeans) equivalence_test(emtrends(model, ~1, "wt")) library(brms) model <- brms::brm(mpg ~ wt + cyl, data = mtcars) equivalence_test(model) equivalence_test(model, ci = c(.50, .99)) ibrary(BayesFactor) bf <- ttestBF(x = rnorm(100, 1, 1)) equivalence_test(bf) equivalence_test(bf, ci = c(.50, .99)) } } \references{ \itemize{ \item Kruschke, J. K. (2018). Rejecting or accepting parameter values in Bayesian estimation. Advances in Methods and Practices in Psychological Science, 1(2), 270-280. \doi{10.1177/2515245918771304} \item Kruschke, J. (2014). Doing Bayesian data analysis: A tutorial with R, JAGS, and Stan. Academic Press \item Piironen, J., & Vehtari, A. (2017). Comparison of Bayesian predictive methods for model selection. Statistics and Computing, 27(3), 711–735. \doi{10.1007/s11222-016-9649-y} } } bayestestR/man/convert_bayesian_as_frequentist.Rd0000644000176200001440000000243313616544116022121 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/convert_bayesian_to_frequentist.R \name{convert_bayesian_as_frequentist} \alias{convert_bayesian_as_frequentist} \alias{bayesian_as_frequentist} \title{Convert (refit) a Bayesian model to frequentist} \usage{ convert_bayesian_as_frequentist(model, data = NULL) bayesian_as_frequentist(model, data = NULL) } \arguments{ \item{model}{A Bayesian model.} \item{data}{Data used by the model. If \code{NULL}, will try to extract it from the model.} } \description{ Refit Bayesian model as frequentist. Can be useful for comparisons. } \examples{ \donttest{ # Rstanarm ---------------------- if (require("rstanarm")) { # Simple regressions model <- stan_glm(Sepal.Length ~ Petal.Length * Species, data = iris, chains = 2, refresh = 0 ) bayesian_as_frequentist(model) model <- stan_glm(vs ~ mpg, family = "binomial", data = mtcars, chains = 2, refresh = 0 ) bayesian_as_frequentist(model) # Mixed models model <- stan_glmer(Sepal.Length ~ Petal.Length + (1 | Species), data = iris, chains = 2, refresh = 0 ) bayesian_as_frequentist(model) model <- stan_glmer(vs ~ mpg + (1 | cyl), family = "binomial", data = mtcars, chains = 2, refresh = 0 ) bayesian_as_frequentist(model) } } } bayestestR/man/as.data.frame.density.Rd0000644000176200001440000000057713620150257017535 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/estimate_density.R \name{as.data.frame.density} \alias{as.data.frame.density} \title{Coerce to a Data Frame} \usage{ \method{as.data.frame}{density}(x, ...) } \arguments{ \item{x}{any \R object.} \item{...}{additional arguments to be passed to or from methods.} } \description{ Coerce to a Data Frame } bayestestR/man/contr.bayes.Rd0000644000176200001440000000427213571067531015705 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/contr.bayes.R \name{contr.bayes} \alias{contr.bayes} \title{Orthonormal Contrast Matrices for Bayesian Estimation} \usage{ contr.bayes(n, contrasts = TRUE) } \arguments{ \item{n}{a vector of levels for a factor, or the number of levels.} \item{contrasts}{logical indicating whether contrasts should be computed.} } \value{ A \code{matrix} with n rows and k columns, with k=n-1 if contrasts is \code{TRUE} and k=n if contrasts is \code{FALSE}. } \description{ Returns a design or model matrix of orthonormal contrasts such that the marginal prior on all effects is identical. Implementation from Singmann \& Gronau's \href{https://github.com/bayesstuff/bfrms/}{\code{bfrms}}, following the description in Rouder, Morey, Speckman, \& Province (2012, p. 363). } \details{ Though using this factor coding scheme might obscure the interpretation of parameters, it is essential for correct estimation of Bayes factors for contrasts and multi-level order restrictions. See info on specifying correct priors for factors with more than 2 levels in \href{https://easystats.github.io/bayestestR/articles/bayes_factors.html}{the Bayes factors vignette}. } \examples{ \dontrun{ contr.bayes(2) # Q_2 in Rouder et al. (2012, p. 363) # [,1] # [1,] -0.7071068 # [2,] 0.7071068 contr.bayes(5) # equivalent to Q_5 in Rouder et al. (2012, p. 363) # [,1] [,2] [,3] [,4] # [1,] 0.0000000 0.8944272 0.0000000 0.0000000 # [2,] 0.0000000 -0.2236068 -0.5000000 0.7071068 # [3,] 0.7071068 -0.2236068 -0.1666667 -0.4714045 # [4,] -0.7071068 -0.2236068 -0.1666667 -0.4714045 # [5,] 0.0000000 -0.2236068 0.8333333 0.2357023 ## check decomposition Q3 <- contr.bayes(3) Q3 \%*\% t(Q3) # [,1] [,2] [,3] # [1,] 0.6666667 -0.3333333 -0.3333333 # [2,] -0.3333333 0.6666667 -0.3333333 # [3,] -0.3333333 -0.3333333 0.6666667 ## 2/3 on diagonal and -1/3 on off-diagonal elements } } \references{ Rouder, J. N., Morey, R. D., Speckman, P. L., \& Province, J. M. (2012). Default Bayes factors for ANOVA designs. *Journal of Mathematical Psychology*, 56(5), 356-374. https://doi.org/10.1016/j.jmp.2012.08.001 } bayestestR/man/si.Rd0000644000176200001440000001362413610203766014066 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/si.R \name{si} \alias{si} \alias{si.numeric} \alias{si.stanreg} \alias{si.brmsfit} \alias{si.emmGrid} \alias{si.data.frame} \title{Compute Support Intervals} \usage{ si(posterior, prior = NULL, BF = 1, verbose = TRUE, ...) \method{si}{numeric}(posterior, prior = NULL, BF = 1, verbose = TRUE, ...) \method{si}{stanreg}( posterior, prior = NULL, BF = 1, verbose = TRUE, effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, ... ) \method{si}{brmsfit}( posterior, prior = NULL, BF = 1, verbose = TRUE, effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, ... ) \method{si}{emmGrid}(posterior, prior = NULL, BF = 1, verbose = TRUE, ...) \method{si}{data.frame}(posterior, prior = NULL, BF = 1, verbose = TRUE, ...) } \arguments{ \item{posterior}{A numerical vector, \code{stanreg} / \code{brmsfit} object, \code{emmGrid} or a data frame - representing a posterior distribution(s) from (see 'Details').} \item{prior}{An object representing a prior distribution (see 'Details').} \item{BF}{The amount of support required to be included in the support interval.} \item{verbose}{Toggle off warnings.} \item{...}{Arguments passed to and from other methods.} \item{effects}{Should results for fixed effects, random effects or both be returned? Only applies to mixed models. May be abbreviated.} \item{component}{Should results for all parameters, parameters for the conditional model or the zero-inflated part of the model be returned? May be abbreviated. Only applies to \pkg{brms}-models.} \item{parameters}{Regular expression pattern that describes the parameters that should be returned. Meta-parameters (like \code{lp__} or \code{prior_}) are filtered by default, so only parameters that typically appear in the \code{summary()} are returned. Use \code{parameters} to select specific parameters for the output.} } \value{ A data frame containing the lower and upper bounds of the SI. \cr Note that if the level of requested support is higher than observed in the data, the interval will be \code{[NA,NA]}. } \description{ A support interval contains only the values of the parameter that predict the observed data better than average, by some degree \emph{k}; these are values of the parameter that are associated with an updating factor greater or equal than \emph{k}. From the perspective of the Savage-Dickey Bayes factor, testing against a point null hypothesis for any value within the support interval will yield a Bayes factor smaller than \emph{1/k}. \cr \cr \strong{For more info, in particular on specifying correct priors for factors with more than 2 levels, see \href{https://easystats.github.io/bayestestR/articles/bayes_factors.html}{the Bayes factors vignette}.} } \details{ This method is used to compute support intervals based on prior and posterior distributions. For the computation of support intervals, the model priors must be proper priors (at the very least they should be \emph{not flat}, and it is preferable that they be \emph{informative} - note that by default, \code{brms::brm()} uses flat priors for fixed-effects; see example below). \subsection{Setting the correct \code{prior}}{ It is important to provide the correct \code{prior} for meaningful results. \itemize{ \item When \code{posterior} is a numerical vector, \code{prior} should also be a numerical vector. \item When \code{posterior} is a \code{data.frame}, \code{prior} should also be a \code{data.frame}, with matching column order. \item When \code{posterior} is a \code{stanreg} or \code{brmsfit} model: \itemize{ \item \code{prior} can be set to \code{NULL}, in which case prior samples are drawn internally. \item \code{prior} can also be a model equvilant to \code{posterior} but with samples from the priors \emph{only}. } \item When \code{posterior} is an \code{emmGrid} object: \itemize{ \item \code{prior} should be the \code{stanreg} or \code{brmsfit} model used to create the \code{emmGrid} objects. \item \code{prior} can also be an \code{emmGrid} object equvilant to \code{posterior} but created with a model of priors samples \emph{only}. } }} \subsection{Choosing a value of \code{BF}}{ The choice of \code{BF} (the level of support) depends on what we want our interval to represent: \itemize{ \item A \code{BF} = 1 contains values whose credibility is not decreased by observing the data. \item A \code{BF} > 1 contains values who recived more impressive support from the data. \item A \code{BF} < 1 contains values whose credibility has \emph{not} been impressively decreased by observing the data. Testing against values outside this interval will produce a Bayes factor larger than 1/\code{BF} in support of the alternative. E.g., if an SI (BF = 1/3) excludes 0, the Bayes factor against the point-null will be larger than 3. } } } \examples{ library(bayestestR) prior <- distribution_normal(1000, mean = 0, sd = 1) posterior <- distribution_normal(1000, mean = .5, sd = .3) si(posterior, prior) \dontrun{ # rstanarm models # --------------- library(rstanarm) contrasts(sleep$group) <- contr.bayes # see vingette stan_model <- stan_lmer(extra ~ group + (1 | ID), data = sleep) si(stan_model) si(stan_model, BF = 3) # emmGrid objects # --------------- library(emmeans) group_diff <- pairs(emmeans(stan_model, ~group)) si(group_diff, prior = stan_model) # brms models # ----------- library(brms) contrasts(sleep$group) <- contr.bayes # see vingette my_custom_priors <- set_prior("student_t(3, 0, 1)", class = "b") + set_prior("student_t(3, 0, 1)", class = "sd", group = "ID") brms_model <- brm(extra ~ group + (1 | ID), data = sleep, prior = my_custom_priors ) si(brms_model) } } \references{ Wagenmakers, E., Gronau, Q. F., Dablander, F., & Etz, A. (2018, November 22). The Support Interval. \doi{10.31234/osf.io/zwnxb} } bayestestR/man/weighted_posteriors.Rd0000644000176200001440000001163113610210602017523 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/weighted_posteriors.R \name{weighted_posteriors} \alias{weighted_posteriors} \alias{weighted_posteriors.stanreg} \alias{weighted_posteriors.brmsfit} \alias{weighted_posteriors.BFBayesFactor} \title{Generate posterior distributions weighted across models} \usage{ weighted_posteriors(..., prior_odds = NULL, missing = 0, verbose = TRUE) \method{weighted_posteriors}{stanreg}( ..., prior_odds = NULL, missing = 0, verbose = TRUE, effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL ) \method{weighted_posteriors}{brmsfit}( ..., prior_odds = NULL, missing = 0, verbose = TRUE, effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL ) \method{weighted_posteriors}{BFBayesFactor}(..., prior_odds = NULL, missing = 0, verbose = TRUE) } \arguments{ \item{...}{Fitted models (see details), all fit on the same data, or a single \code{BFBayesFactor} object (see 'Details').} \item{prior_odds}{Optional vector of prior odds for the models. See \code{BayesFactor::priorOdds<-}.} \item{missing}{An optional numeric value to use if a model does not contain a parameter that appears in other models. Defaults to 0.} \item{verbose}{Toggle off warnings.} \item{effects}{Should results for fixed effects, random effects or both be returned? Only applies to mixed models. May be abbreviated.} \item{component}{Should results for all parameters, parameters for the conditional model or the zero-inflated part of the model be returned? May be abbreviated. Only applies to \pkg{brms}-models.} \item{parameters}{Regular expression pattern that describes the parameters that should be returned. Meta-parameters (like \code{lp__} or \code{prior_}) are filtered by default, so only parameters that typically appear in the \code{summary()} are returned. Use \code{parameters} to select specific parameters for the output.} } \value{ A data frame with posterior distributions (weighted accross models) . } \description{ Extract posterior samples of parameters, weighted across models. Weighting is done by comparing posterior model probabilities, via \code{\link{bayesfactor_models}}. } \details{ Note that across models some parameters might play different roles. For example, the parameter \code{A} plays a different role in the model \code{Y ~ A + B} (where it is a main effect) than it does in the model \code{Y ~ A + B + A:B} (where it is a simple effect). In many cases centering of predictors (mean subtracting for continuous variables, and effects coding via \code{contr.sum} or orthonormal coding via {\code{\link{contr.bayes}}} for factors) can reduce this issue. In any case you should be mindful of this issue. \cr\cr See \code{\link{bayesfactor_models}} details for more info on passed models. \cr\cr Note that for \code{BayesFactor} models, posterior samples cannot be generated from intercept only models. \cr\cr This function is similar in function to \code{brms::\link[brms]{posterior_average}}. } \examples{ \donttest{ library(rstanarm) library(see) stan_m0 <- stan_glm(extra ~ 1, data = sleep, family = gaussian(), refresh=0, diagnostic_file = file.path(tempdir(), "df0.csv")) stan_m1 <- stan_glm(extra ~ group, data = sleep, family = gaussian(), refresh=0, diagnostic_file = file.path(tempdir(), "df1.csv")) res <- weighted_posteriors(stan_m0, stan_m1) plot(eti(res)) # With BayesFactor and brms library(BayesFactor) library(brms) BFmods <- anovaBF(extra ~ group + ID, sleep, whichRandom = "ID") res <- weighted_posteriors(BFmods)[1:3] plot(eti(res)) # Compare to brms::posterior_average fit1 <- brm(rating ~ treat + period + carry, data = inhaler, save_all_pars = TRUE) fit2 <- brm(rating ~ period + carry, data = inhaler, save_all_pars = TRUE) res_BT <- weighted_posteriors(fit1, fit2) res_brms <- brms::posterior_average(fit1, fit2, weights = "marglik", missing = 0)[, 1:4] plot(eti(res_BT)) plot(eti(res_brms)) } } \references{ \itemize{ \item Clyde, M., Desimone, H., & Parmigiani, G. (1996). Prediction via orthogonalized model mixing. Journal of the American Statistical Association, 91(435), 1197-1208. \item Hinne, M., Gronau, Q. F., van den Bergh, D., and Wagenmakers, E. (2019, March 25). A conceptual introduction to Bayesian Model Averaging. \doi{10.31234/osf.io/wgb64} \item Rouder, J. N., Haaf, J. M., & Vandekerckhove, J. (2018). Bayesian inference for psychology, part IV: Parameter estimation and Bayes factors. Psychonomic bulletin & review, 25(1), 102-113. \item van den Bergh, D., Haaf, J. M., Ly, A., Rouder, J. N., & Wagenmakers, E. J. (2019). A cautionary note on estimating effect size. } } \seealso{ \code{\link{bayesfactor_inclusion}} for Bayesian model averaging. } bayestestR/man/distribution.Rd0000644000176200001440000000632413610203766016171 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/distribution.R \name{distribution} \alias{distribution} \alias{distribution_normal} \alias{distribution_binomial} \alias{distribution_cauchy} \alias{distribution_poisson} \alias{distribution_student} \alias{distribution_chisquared} \alias{distribution_uniform} \alias{distribution_beta} \alias{distribution_tweedie} \alias{distribution_gamma} \alias{distribution_custom} \alias{distribution_mixture_normal} \alias{rnorm_perfect} \title{Empirical Distributions} \usage{ distribution(type = "normal", ...) distribution_normal(n, mean = 0, sd = 1, random = FALSE, ...) distribution_binomial(n, size = 1, prob = 0.5, random = FALSE, ...) distribution_cauchy(n, location = 0, scale = 1, random = FALSE, ...) distribution_poisson(n, lambda = 1, random = FALSE, ...) distribution_student(n, df, ncp, random = FALSE, ...) distribution_chisquared(n, df, ncp = 0, random = FALSE, ...) distribution_uniform(n, min = 0, max = 1, random = FALSE, ...) distribution_beta(n, shape1, shape2, ncp = 0, random = FALSE, ...) distribution_tweedie(n, xi = NULL, mu, phi, power = NULL, random = FALSE, ...) distribution_gamma(n, shape, scale = 1, random = FALSE, ...) distribution_custom(n, type = "norm", ..., random = FALSE) distribution_mixture_normal(n, mean = c(-3, 3), sd = 1, random = FALSE, ...) rnorm_perfect(n, mean = 0, sd = 1) } \arguments{ \item{type}{Can be any of the names from base R's \link[stats]{Distributions}, like \code{"cauchy"}, \code{"pois"} or \code{"beta"}.} \item{...}{Arguments passed to or from other methods.} \item{n}{number of observations. If \code{length(n) > 1}, the length is taken to be the number required.} \item{mean}{vector of means.} \item{sd}{vector of standard deviations.} \item{random}{Generate near-perfect or random (simple wrappers for the base R \code{r*} functions) distributions.} \item{size}{number of trials (zero or more).} \item{prob}{probability of success on each trial.} \item{location}{location and scale parameters.} \item{scale}{location and scale parameters.} \item{lambda}{vector of (non-negative) means.} \item{df}{degrees of freedom (\eqn{> 0}, maybe non-integer). \code{df = Inf} is allowed.} \item{ncp}{non-centrality parameter \eqn{\delta}{delta}; currently except for \code{rt()}, only for \code{abs(ncp) <= 37.62}. If omitted, use the central t distribution.} \item{min}{lower and upper limits of the distribution. Must be finite.} \item{max}{lower and upper limits of the distribution. Must be finite.} \item{shape1}{non-negative parameters of the Beta distribution.} \item{shape2}{non-negative parameters of the Beta distribution.} \item{xi}{the value of \eqn{\xi}{xi} such that the variance is \eqn{\mbox{var}[Y]=\phi\mu^{\xi}}{var(Y) = phi * mu^xi}} \item{mu}{the mean} \item{phi}{the dispersion} \item{power}{a synonym for \eqn{\xi}{xi}} \item{shape}{shape and scale parameters. Must be positive, \code{scale} strictly.} } \description{ Generate a sequence of n-quantiles, i.e., a sample of size \code{n} with a near-perfect distribution. } \examples{ library(bayestestR) x <- distribution(n = 10) plot(density(x)) x <- distribution(type = "gamma", n = 100, shape = 2) plot(density(x)) } bayestestR/man/describe_posterior.Rd0000644000176200001440000001705013616544116017341 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/describe_posterior.R \name{describe_posterior} \alias{describe_posterior} \alias{describe_posterior.numeric} \alias{describe_posterior.stanreg} \alias{describe_posterior.stanmvreg} \alias{describe_posterior.MCMCglmm} \alias{describe_posterior.brmsfit} \alias{describe_posterior.BFBayesFactor} \title{Describe Posterior Distributions} \usage{ describe_posterior( posteriors, centrality = "median", dispersion = FALSE, ci = 0.89, ci_method = "hdi", test = c("p_direction", "rope"), rope_range = "default", rope_ci = 0.89, ... ) \method{describe_posterior}{numeric}( posteriors, centrality = "median", dispersion = FALSE, ci = 0.89, ci_method = "hdi", test = c("p_direction", "rope"), rope_range = "default", rope_ci = 0.89, bf_prior = NULL, BF = 1, ... ) \method{describe_posterior}{stanreg}( posteriors, centrality = "median", dispersion = FALSE, ci = 0.89, ci_method = "hdi", test = c("p_direction", "rope"), rope_range = "default", rope_ci = 0.89, bf_prior = NULL, diagnostic = c("ESS", "Rhat"), priors = FALSE, effects = c("fixed", "random", "all"), parameters = NULL, BF = 1, ... ) \method{describe_posterior}{stanmvreg}( posteriors, centrality = "median", dispersion = FALSE, ci = 0.89, ci_method = "hdi", test = "p_direction", rope_range = "default", rope_ci = 0.89, bf_prior = NULL, diagnostic = c("ESS", "Rhat"), priors = FALSE, effects = c("fixed", "random", "all"), parameters = NULL, ... ) \method{describe_posterior}{MCMCglmm}( posteriors, centrality = "median", dispersion = FALSE, ci = 0.89, ci_method = "hdi", test = c("p_direction", "rope"), rope_range = "default", rope_ci = 0.89, diagnostic = "ESS", parameters = NULL, ... ) \method{describe_posterior}{brmsfit}( posteriors, centrality = "median", dispersion = FALSE, ci = 0.89, ci_method = "hdi", test = c("p_direction", "rope"), rope_range = "default", rope_ci = 0.89, bf_prior = NULL, diagnostic = c("ESS", "Rhat"), effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, BF = 1, ... ) \method{describe_posterior}{BFBayesFactor}( posteriors, centrality = "median", dispersion = FALSE, ci = 0.89, ci_method = "hdi", test = c("p_direction", "rope", "bf"), rope_range = "default", rope_ci = 0.89, priors = TRUE, ... ) } \arguments{ \item{posteriors}{A vector, dataframe or model of posterior draws.} \item{centrality}{The point-estimates (centrality indices) to compute. Character (vector) or list with one or more of these options: \code{"median"}, \code{"mean"}, \code{"MAP"} or \code{"all"}.} \item{dispersion}{Logical, if \code{TRUE}, computes indices of dispersion related to the estimate(s) (\code{SD} and \code{MAD} for \code{mean} and \code{median}, respectively).} \item{ci}{Value or vector of probability of the CI (between 0 and 1) to be estimated. Default to \code{.89} (89\%) for Bayesian models and \code{.95} (95\%) for frequentist models.} \item{ci_method}{The type of index used for Credible Interval. Can be \code{"HDI"} (default, see \code{\link[bayestestR:hdi]{hdi}}), \code{"ETI"} (see \code{\link[bayestestR:eti]{eti}}) or \code{"SI"} (see \code{\link[bayestestR:si]{si}}).} \item{test}{The indices of effect existence to compute. Character (vector) or list with one or more of these options: \code{"p_direction"} (or \code{"pd"}), \code{"rope"}, \code{"p_map"}, \code{"equivalence_test"} (or \code{"equitest"}), \code{"bayesfactor"} (or \code{"bf"}) or \code{"all"} to compute all tests. For each "test", the corresponding \pkg{bayestestR} function is called (e.g. \code{\link[bayestestR:rope]{rope}} or \code{\link[bayestestR:p_direction]{p_direction}}) and its results included in the summary output.} \item{rope_range}{ROPE's lower and higher bounds. Should be a list of two values (e.g., \code{c(-0.1, 0.1)}) or \code{"default"}. If \code{"default"}, the bounds are set to \code{x +- 0.1*SD(response)}.} \item{rope_ci}{The Credible Interval (CI) probability, corresponding to the proportion of HDI, to use for the percentage in ROPE.} \item{...}{Additional arguments to be passed to or from methods.} \item{bf_prior}{Distribution representing a prior for the computation of Bayes factors / SI. Used if the input is a posterior, otherwise (in the case of models) ignored.} \item{BF}{The amount of support required to be included in the support interval.} \item{diagnostic}{Diagnostic metrics to compute. Character (vector) or list with one or more of these options: \code{"ESS"}, \code{"Rhat"}, \code{"MCSE"} or \code{"all"}.} \item{priors}{Add the prior used for each parameter.} \item{effects}{Should results for fixed effects, random effects or both be returned? Only applies to mixed models. May be abbreviated.} \item{parameters}{Regular expression pattern that describes the parameters that should be returned. Meta-parameters (like \code{lp__} or \code{prior_}) are filtered by default, so only parameters that typically appear in the \code{summary()} are returned. Use \code{parameters} to select specific parameters for the output.} \item{component}{Should results for all parameters, parameters for the conditional model or the zero-inflated part of the model be returned? May be abbreviated. Only applies to \pkg{brms}-models.} } \description{ Compute indices relevant to describe and characterise the posterior distributions. } \details{ One or more components of point estimates (like posterior mean or median), intervals and tests can be ommitted from the summary output by setting the related argument to \code{NULL}. For example, \code{test = NULL} and \code{centrality = NULL} would only return the HDI (or CI). } \examples{ library(bayestestR) x <- rnorm(1000) describe_posterior(x) describe_posterior(x, centrality = "all", dispersion = TRUE, test = "all") describe_posterior(x, ci = c(0.80, 0.90)) df <- data.frame(replicate(4, rnorm(100))) describe_posterior(df) describe_posterior(df, centrality = "all", dispersion = TRUE, test = "all") describe_posterior(df, ci = c(0.80, 0.90)) \dontrun{ # rstanarm models # ----------------------------------------------- if (require("rstanarm") && require("emmeans")) { model <- stan_glm(mpg ~ wt + gear, data = mtcars, chains = 2, iter = 200, refresh = 0) describe_posterior(model) describe_posterior(model, centrality = "all", dispersion = TRUE, test = "all") describe_posterior(model, ci = c(0.80, 0.90)) # emmeans estimates # ----------------------------------------------- describe_posterior(emtrends(model, ~1, "wt")) } # brms models # ----------------------------------------------- if (require("brms")) { model <- brms::brm(mpg ~ wt + cyl, data = mtcars) describe_posterior(model) describe_posterior(model, centrality = "all", dispersion = TRUE, test = "all") describe_posterior(model, ci = c(0.80, 0.90)) } # BayesFactor objects # ----------------------------------------------- if (require("BayesFactor")) { bf <- ttestBF(x = rnorm(100, 1, 1)) describe_posterior(bf) describe_posterior(bf, centrality = "all", dispersion = TRUE, test = "all") describe_posterior(bf, ci = c(0.80, 0.90)) } } } \references{ \itemize{ \item \href{https://easystats.github.io/bayestestR/articles/indicesEstimationComparison.html}{Comparison of Point-Estimates} \item \href{https://easystats.github.io/bayestestR/articles/region_of_practical_equivalence.html}{Region of Practical Equivalence (ROPE)} \item \href{https://easystats.github.io/bayestestR/articles/bayes_factors.html}{Bayes factors} } } bayestestR/man/hdi.Rd0000644000176200001440000001307413616544116014221 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/hdi.R \name{hdi} \alias{hdi} \alias{hdi.numeric} \alias{hdi.data.frame} \alias{hdi.MCMCglmm} \alias{hdi.sim.merMod} \alias{hdi.sim} \alias{hdi.emmGrid} \alias{hdi.stanreg} \alias{hdi.brmsfit} \alias{hdi.BFBayesFactor} \title{Highest Density Interval (HDI)} \usage{ hdi(x, ...) \method{hdi}{numeric}(x, ci = 0.89, verbose = TRUE, ...) \method{hdi}{data.frame}(x, ci = 0.89, verbose = TRUE, ...) \method{hdi}{MCMCglmm}(x, ci = 0.89, verbose = TRUE, ...) \method{hdi}{sim.merMod}( x, ci = 0.89, effects = c("fixed", "random", "all"), parameters = NULL, verbose = TRUE, ... ) \method{hdi}{sim}(x, ci = 0.89, parameters = NULL, verbose = TRUE, ...) \method{hdi}{emmGrid}(x, ci = 0.89, verbose = TRUE, ...) \method{hdi}{stanreg}( x, ci = 0.89, effects = c("fixed", "random", "all"), parameters = NULL, verbose = TRUE, ... ) \method{hdi}{brmsfit}( x, ci = 0.89, effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, verbose = TRUE, ... ) \method{hdi}{BFBayesFactor}(x, ci = 0.89, verbose = TRUE, ...) } \arguments{ \item{x}{Vector representing a posterior distribution. Can also be a \code{stanreg}, \code{brmsfit} or a \code{BayesFactor} model.} \item{...}{Currently not used.} \item{ci}{Value or vector of probability of the (credible) interval - CI (between 0 and 1) to be estimated. Default to \code{.89} (89\%).} \item{verbose}{Toggle off warnings.} \item{effects}{Should results for fixed effects, random effects or both be returned? Only applies to mixed models. May be abbreviated.} \item{parameters}{Regular expression pattern that describes the parameters that should be returned. Meta-parameters (like \code{lp__} or \code{prior_}) are filtered by default, so only parameters that typically appear in the \code{summary()} are returned. Use \code{parameters} to select specific parameters for the output.} \item{component}{Should results for all parameters, parameters for the conditional model or the zero-inflated part of the model be returned? May be abbreviated. Only applies to \pkg{brms}-models.} } \value{ A data frame with following columns: \itemize{ \item \code{Parameter} The model parameter(s), if \code{x} is a model-object. If \code{x} is a vector, this column is missing. \item \code{CI} The probability of the credible interval. \item \code{CI_low}, \code{CI_high} The lower and upper credible interval limits for the parameters. } } \description{ Compute the \strong{Highest Density Interval (HDI)} of posterior distributions. All points within this interval have a higher probability density than points outside the interval. The HDI can be used in the context of uncertainty characterisation of posterior distributions as \strong{Credible Interval (CI)}. } \details{ Unlike equal-tailed intervals (see \code{eti()}) that typically exclude 2.5\% from each tail of the distribution and always include the median, the HDI is \emph{not} equal-tailed and therefore always includes the mode(s) of posterior distributions. \cr \cr By default, \code{hdi()} and \code{eti()} return the 89\% intervals (\code{ci = 0.89}), deemed to be more stable than, for instance, 95\% intervals (\cite{Kruschke, 2014}). An effective sample size of at least 10.000 is recommended if 95\% intervals should be computed (\cite{Kruschke, 2014, p. 183ff}). Moreover, 89 indicates the arbitrariness of interval limits - its only remarkable property is being the highest prime number that does not exceed the already unstable 95\% threshold (\cite{McElreath, 2015}). \cr \cr A 90\% equal-tailed interval (ETI) has 5\% of the distribution on either side of its limits. It indicates the 5th percentile and the 95h percentile. In symmetric distributions, the two methods of computing credible intervals, the ETI and the \link[=hdi]{HDI}, return similar results. \cr \cr This is not the case for skewed distributions. Indeed, it is possible that parameter values in the ETI have lower credibility (are less probable) than parameter values outside the ETI. This property seems undesirable as a summary of the credible values in a distribution. \cr \cr On the other hand, the ETI range does change when transformations are applied to the distribution (for instance, for a log odds scale to probabilities): the lower and higher bounds of the transformed distribution will correspond to the transformed lower and higher bounds of the original distribution. On the contrary, applying transformations to the distribution will change the resulting HDI. } \examples{ library(bayestestR) posterior <- rnorm(1000) hdi(posterior, ci = .89) hdi(posterior, ci = c(.80, .90, .95)) df <- data.frame(replicate(4, rnorm(100))) hdi(df) hdi(df, ci = c(.80, .90, .95)) \dontrun{ library(rstanarm) model <- stan_glm(mpg ~ wt + gear, data = mtcars, chains = 2, iter = 200, refresh = 0) hdi(model) hdi(model, ci = c(.80, .90, .95)) library(emmeans) hdi(emtrends(model, ~1, "wt")) library(brms) model <- brms::brm(mpg ~ wt + cyl, data = mtcars) hdi(model) hdi(model, ci = c(.80, .90, .95)) library(BayesFactor) bf <- ttestBF(x = rnorm(100, 1, 1)) hdi(bf) hdi(bf, ci = c(.80, .90, .95)) } } \references{ \itemize{ \item Kruschke, J. (2014). Doing Bayesian data analysis: A tutorial with R, JAGS, and Stan. Academic Press. \item McElreath, R. (2015). Statistical rethinking: A Bayesian course with examples in R and Stan. Chapman and Hall/CRC. } } \author{ Credits go to \href{https://rdrr.io/cran/ggdistribute/src/R/stats.R}{ggdistribute} and \href{https://github.com/mikemeredith/HDInterval}{HDInterval}. } bayestestR/man/effective_sample.Rd0000644000176200001440000000477713616544116016770 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/effective_sample.R \name{effective_sample} \alias{effective_sample} \alias{effective_sample.brmsfit} \alias{effective_sample.stanreg} \alias{effective_sample.MCMCglmm} \title{Effective Sample Size (ESS)} \usage{ effective_sample(model, ...) \method{effective_sample}{brmsfit}( model, effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, ... ) \method{effective_sample}{stanreg}( model, effects = c("fixed", "random", "all"), parameters = NULL, ... ) \method{effective_sample}{MCMCglmm}( model, effects = c("fixed", "random", "all"), parameters = NULL, ... ) } \arguments{ \item{model}{A \code{stanreg}, \code{stanfit}, or \code{brmsfit} object.} \item{...}{Currently not used.} \item{effects}{Should results for fixed effects, random effects or both be returned? Only applies to mixed models. May be abbreviated.} \item{component}{Should results for all parameters, parameters for the conditional model or the zero-inflated part of the model be returned? May be abbreviated. Only applies to \pkg{brms}-models.} \item{parameters}{Regular expression pattern that describes the parameters that should be returned. Meta-parameters (like \code{lp__} or \code{prior_}) are filtered by default, so only parameters that typically appear in the \code{summary()} are returned. Use \code{parameters} to select specific parameters for the output.} } \value{ A data frame with two columns: Parameter name and effective sample size (ESS). } \description{ This function returns the effective sample size (ESS). } \details{ \strong{Effective Sample (ESS)} should be as large as possible, altough for most applications, an effective sample size greater than 1,000 is sufficient for stable estimates (Bürkner, 2017). The ESS corresponds to the number of independent samples with the same estimation power as the N autocorrelated samples. It is is a measure of \dQuote{how much independent information there is in autocorrelated chains} (\emph{Kruschke 2015, p182-3}). } \examples{ \dontrun{ library(rstanarm) model <- stan_glm(mpg ~ wt + gear, data = mtcars, chains = 2, iter = 200, refresh = 0) effective_sample(model) } } \references{ \itemize{ \item Kruschke, J. (2014). Doing Bayesian data analysis: A tutorial with R, JAGS, and Stan. Academic Press. \item Bürkner, P. C. (2017). brms: An R package for Bayesian multilevel models using Stan. Journal of Statistical Software, 80(1), 1-28 } } bayestestR/man/estimate_density.Rd0000644000176200001440000000672213616544116017031 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/estimate_density.R \name{estimate_density} \alias{estimate_density} \title{Density Estimation} \usage{ estimate_density( x, method = "kernel", precision = 2^10, extend = FALSE, extend_scale = 0.1, bw = "SJ", ... ) } \arguments{ \item{x}{Vector representing a posterior distribution. Can also be a \code{stanreg}, \code{brmsfit} or a \code{BayesFactor} model.} \item{method}{Density estimation method. Can be \code{"kernel"} (default), \code{"logspline"} or \code{"KernSmooth"}.} \item{precision}{Number of points of density data. See the \code{n} parameter in \link[=density]{density}.} \item{extend}{Extend the range of the x axis by a factor of \code{extend_scale}.} \item{extend_scale}{Ratio of range by which to extend the x axis. A value of \code{0.1} means that the x axis will be extended by \code{1/10} of the range of the data.} \item{bw}{the smoothing bandwidth to be used. The kernels are scaled such that this is the standard deviation of the smoothing kernel. (Note this differs from the reference books cited below, and from S-PLUS.) \code{bw} can also be a character string giving a rule to choose the bandwidth. See \code{\link[stats]{bw.nrd}}. \cr The default, \code{"nrd0"}, has remained the default for historical and compatibility reasons, rather than as a general recommendation, where e.g., \code{"SJ"} would rather fit, see also Venables and Ripley (2002). The specified (or computed) value of \code{bw} is multiplied by \code{adjust}. } \item{...}{Currently not used.} } \description{ This function is a wrapper over different methods of density estimation. By default, it uses the base R \link{density} with by default uses a different smoothing bandwidth (\code{"SJ"}) from the legacy default implemented the base R \link{density} function (\code{"nrd0"}). However, Deng \& Wickham suggest that \code{method = "KernSmooth"} is the fastest and the most accurate. } \examples{ library(bayestestR) set.seed(1) x <- rnorm(250, 1) # Methods density_kernel <- estimate_density(x, method = "kernel") density_logspline <- estimate_density(x, method = "logspline") density_KernSmooth <- estimate_density(x, method = "KernSmooth") density_mixture <- estimate_density(x, method = "mixture") hist(x, prob = TRUE) lines(density_kernel$x, density_kernel$y, col = "black", lwd = 2) lines(density_logspline$x, density_logspline$y, col = "red", lwd = 2) lines(density_KernSmooth$x, density_KernSmooth$y, col = "blue", lwd = 2) lines(density_mixture$x, density_mixture$y, col = "green", lwd = 2) # Extension density_extended <- estimate_density(x, extend = TRUE) density_default <- estimate_density(x, extend = FALSE) hist(x, prob = TRUE) lines(density_extended$x, density_extended$y, col = "red", lwd = 3) lines(density_default$x, density_default$y, col = "black", lwd = 3) df <- data.frame(replicate(4, rnorm(100))) head(estimate_density(df)) \dontrun{ # rstanarm models # ----------------------------------------------- library(rstanarm) model <- stan_glm(mpg ~ wt + gear, data = mtcars, chains = 2, iter = 200, refresh = 0) head(estimate_density(model)) library(emmeans) head(estimate_density(emtrends(model, ~1, "wt"))) # brms models # ----------------------------------------------- library(brms) model <- brms::brm(mpg ~ wt + cyl, data = mtcars) estimate_density(model) } } \references{ Deng, H., & Wickham, H. (2011). Density estimation in R. Electronic publication. } bayestestR/man/bayesfactor_restricted.Rd0000644000176200001440000001514513616544116020210 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/bayesfactor_restricted.R \name{bayesfactor_restricted} \alias{bayesfactor_restricted} \alias{bf_restricted} \alias{bayesfactor_restricted.stanreg} \alias{bayesfactor_restricted.brmsfit} \alias{bayesfactor_restricted.emmGrid} \title{Bayes Factors (BF) for Order Restricted Models} \usage{ bayesfactor_restricted( posterior, hypothesis, prior = NULL, verbose = TRUE, ... ) bf_restricted(posterior, hypothesis, prior = NULL, verbose = TRUE, ...) \method{bayesfactor_restricted}{stanreg}( posterior, hypothesis, prior = NULL, verbose = TRUE, effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), ... ) \method{bayesfactor_restricted}{brmsfit}( posterior, hypothesis, prior = NULL, verbose = TRUE, effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), ... ) \method{bayesfactor_restricted}{emmGrid}( posterior, hypothesis, prior = NULL, verbose = TRUE, ... ) } \arguments{ \item{posterior}{A \code{stanreg} / \code{brmsfit} object, \code{emmGrid} or a data frame - representing a posterior distribution(s) from (see Details).} \item{hypothesis}{A character vector specifying the restrictions as logical conditions (see examples below).} \item{prior}{An object representing a prior distribution (see Details).} \item{verbose}{Toggle off warnings.} \item{...}{Currently not used.} \item{effects}{Should results for fixed effects, random effects or both be returned? Only applies to mixed models. May be abbreviated.} \item{component}{Should results for all parameters, parameters for the conditional model or the zero-inflated part of the model be returned? May be abbreviated. Only applies to \pkg{brms}-models.} } \value{ A data frame containing the Bayes factor representing evidence \emph{against} the un-restricted model. } \description{ This method computes Bayes factors for comparing a model with an order restrictions on its parameters with the fully unrestricted model. \emph{Note that this method should only be used for confirmatory analyses}. \cr \cr The \code{bf_*} function is an alias of the main function. \cr \cr \strong{For more info, in particular on specifying correct priors for factors with more than 2 levels, see \href{https://easystats.github.io/bayestestR/articles/bayes_factors.html}{the Bayes factors vignette}.} } \details{ This method is used to compute Bayes factors for order-restricted models vs un-restricted models by setting an order restriction on the prior and posterior distributions (\cite{Morey & Wagenmakers, 2013}). \cr\cr (Though it is possible to use \code{bayesfactor_restricted()} to test interval restrictions, it is more suitable for testing order restrictions; see examples). \cr\cr For the computation of Bayes factors, the model priors must be proper priors (at the very least they should be \emph{not flat}, and it is preferable that they be \emph{informative}); As the priors for the alternative get wider, the likelihood of the null value(s) increases, to the extreme that for completely flat priors the null is infinitely more favorable than the alternative (this is called \emph{the Jeffreys-Lindley-Bartlett paradox}). Thus, you should only ever try (or want) to compute a Bayes factor when you have an informed prior. \cr\cr (Note that by default, \code{brms::brm()} uses flat priors for fixed-effects.) \subsection{Setting the correct \code{prior}}{ It is important to provide the correct \code{prior} for meaningful results. \itemize{ \item When \code{posterior} is a \code{data.frame}, \code{prior} should also be a \code{data.frame}, with matching column order. \item When \code{posterior} is a \code{stanreg} or \code{brmsfit} model: \itemize{ \item \code{prior} can be set to \code{NULL}, in which case prior samples are drawn internally. \item \code{prior} can also be a model equvilant to \code{posterior} but with samples from the priors \emph{only}. } \item When \code{posterior} is an \code{emmGrid} object: \itemize{ \item \code{prior} should be the \code{stanreg} or \code{brmsfit} model used to create the \code{emmGrid} objects. \item \code{prior} can also be an \code{emmGrid} object equvilant to \code{posterior} but created with a model of priors samples \emph{only}. } }} \subsection{Interpreting Bayes Factors}{ A Bayes factor greater than 1 can be interpereted as evidence against the null, at which one convention is that a Bayes factor greater than 3 can be considered as "substantial" evidence against the null (and vice versa, a Bayes factor smaller than 1/3 indicates substantial evidence in favor of the null-hypothesis) (\cite{Wetzels et al. 2011}). } } \examples{ library(bayestestR) prior <- data.frame( X = rnorm(100), X1 = rnorm(100), X3 = rnorm(100) ) posterior <- data.frame( X = rnorm(100, .4), X1 = rnorm(100, -.2), X3 = rnorm(100) ) hyps <- c( "X > X1 & X1 > X3", "X > X1" ) bayesfactor_restricted(posterior, hypothesis = hyps, prior = prior) \dontrun{ # rstanarm models # --------------- if (require("rstanarm") && require("emmeans")) { fit_stan <- stan_glm(mpg ~ wt + cyl + am, data = mtcars ) hyps <- c( "am > 0 & cyl < 0", "cyl < 0", "wt - cyl > 0" ) bayesfactor_restricted(fit_stan, hypothesis = hyps) # emmGrid objects # --------------- # replicating http://bayesfactor.blogspot.com/2015/01/multiple-comparisons-with-bayesfactor-2.html disgust_data <- read.table(url("http://www.learnbayes.org/disgust_example.txt"), header = TRUE) contrasts(disgust_data$condition) <- contr.bayes # see vignette fit_model <- stan_glm(score ~ condition, data = disgust_data, family = gaussian()) em_condition <- emmeans(fit_model, ~condition) hyps <- c("lemon < control & control < sulfur") bayesfactor_restricted(em_condition, prior = fit_model, hypothesis = hyps) # > # Bayes Factor (Order-Restriction) # > # > Hypothesis P(Prior) P(Posterior) Bayes Factor # > lemon < control & control < sulfur 0.17 0.75 4.49 # > --- # > Bayes factors for the restricted model vs. the un-restricted model. } } } \references{ \itemize{ \item Morey, R. D., & Wagenmakers, E. J. (2014). Simple relation between Bayesian order-restricted and point-null hypothesis tests. Statistics & Probability Letters, 92, 121-124. \item Morey, R. D., & Rouder, J. N. (2011). Bayes factor approaches for testing interval null hypotheses. Psychological methods, 16(4), 406. \item Morey, R. D. (Jan, 2015). Multiple Comparisons with BayesFactor, Part 2 – order restrictions. Retrived from https://richarddmorey.org/category/order-restrictions/. } } bayestestR/man/eti.Rd0000644000176200001440000001223113616544116014230 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/eti.R \name{eti} \alias{eti} \alias{eti.numeric} \alias{eti.data.frame} \alias{eti.MCMCglmm} \alias{eti.sim.merMod} \alias{eti.sim} \alias{eti.emmGrid} \alias{eti.stanreg} \alias{eti.brmsfit} \alias{eti.BFBayesFactor} \title{Equal-Tailed Interval (ETI)} \usage{ eti(x, ...) \method{eti}{numeric}(x, ci = 0.89, verbose = TRUE, ...) \method{eti}{data.frame}(x, ci = 0.89, verbose = TRUE, ...) \method{eti}{MCMCglmm}(x, ci = 0.89, verbose = TRUE, ...) \method{eti}{sim.merMod}( x, ci = 0.89, effects = c("fixed", "random", "all"), parameters = NULL, verbose = TRUE, ... ) \method{eti}{sim}(x, ci = 0.89, parameters = NULL, verbose = TRUE, ...) \method{eti}{emmGrid}(x, ci = 0.89, verbose = TRUE, ...) \method{eti}{stanreg}( x, ci = 0.89, effects = c("fixed", "random", "all"), parameters = NULL, verbose = TRUE, ... ) \method{eti}{brmsfit}( x, ci = 0.89, effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, verbose = TRUE, ... ) \method{eti}{BFBayesFactor}(x, ci = 0.89, verbose = TRUE, ...) } \arguments{ \item{x}{Vector representing a posterior distribution. Can also be a \code{stanreg}, \code{brmsfit} or a \code{BayesFactor} model.} \item{...}{Currently not used.} \item{ci}{Value or vector of probability of the (credible) interval - CI (between 0 and 1) to be estimated. Default to \code{.89} (89\%).} \item{verbose}{Toggle off warnings.} \item{effects}{Should results for fixed effects, random effects or both be returned? Only applies to mixed models. May be abbreviated.} \item{parameters}{Regular expression pattern that describes the parameters that should be returned. Meta-parameters (like \code{lp__} or \code{prior_}) are filtered by default, so only parameters that typically appear in the \code{summary()} are returned. Use \code{parameters} to select specific parameters for the output.} \item{component}{Should results for all parameters, parameters for the conditional model or the zero-inflated part of the model be returned? May be abbreviated. Only applies to \pkg{brms}-models.} } \value{ A data frame with following columns: \itemize{ \item \code{Parameter} The model parameter(s), if \code{x} is a model-object. If \code{x} is a vector, this column is missing. \item \code{CI} The probability of the credible interval. \item \code{CI_low}, \code{CI_high} The lower and upper credible interval limits for the parameters. } } \description{ Compute the \strong{Equal-Tailed Interval (ETI)} of posterior distributions using the quantiles method. The probability of being below this interval is equal to the probability of being above it. The ETI can be used in the context of uncertainty characterisation of posterior distributions as \strong{Credible Interval (CI)}. } \details{ Unlike equal-tailed intervals (see \code{eti()}) that typically exclude 2.5\% from each tail of the distribution and always include the median, the HDI is \emph{not} equal-tailed and therefore always includes the mode(s) of posterior distributions. \cr \cr By default, \code{hdi()} and \code{eti()} return the 89\% intervals (\code{ci = 0.89}), deemed to be more stable than, for instance, 95\% intervals (\cite{Kruschke, 2014}). An effective sample size of at least 10.000 is recommended if 95\% intervals should be computed (\cite{Kruschke, 2014, p. 183ff}). Moreover, 89 indicates the arbitrariness of interval limits - its only remarkable property is being the highest prime number that does not exceed the already unstable 95\% threshold (\cite{McElreath, 2015}). \cr \cr A 90\% equal-tailed interval (ETI) has 5\% of the distribution on either side of its limits. It indicates the 5th percentile and the 95h percentile. In symmetric distributions, the two methods of computing credible intervals, the ETI and the \link[=hdi]{HDI}, return similar results. \cr \cr This is not the case for skewed distributions. Indeed, it is possible that parameter values in the ETI have lower credibility (are less probable) than parameter values outside the ETI. This property seems undesirable as a summary of the credible values in a distribution. \cr \cr On the other hand, the ETI range does change when transformations are applied to the distribution (for instance, for a log odds scale to probabilities): the lower and higher bounds of the transformed distribution will correspond to the transformed lower and higher bounds of the original distribution. On the contrary, applying transformations to the distribution will change the resulting HDI. } \examples{ library(bayestestR) posterior <- rnorm(1000) eti(posterior) eti(posterior, ci = c(.80, .89, .95)) df <- data.frame(replicate(4, rnorm(100))) eti(df) eti(df, ci = c(.80, .89, .95)) \dontrun{ library(rstanarm) model <- stan_glm(mpg ~ wt + gear, data = mtcars, chains = 2, iter = 200, refresh = 0) eti(model) eti(model, ci = c(.80, .89, .95)) library(emmeans) eti(emtrends(model, ~1, "wt")) library(brms) model <- brms::brm(mpg ~ wt + cyl, data = mtcars) eti(model) eti(model, ci = c(.80, .89, .95)) library(BayesFactor) bf <- ttestBF(x = rnorm(100, 1, 1)) eti(bf) eti(bf, ci = c(.80, .89, .95)) } } bayestestR/man/bayesfactor_models.Rd0000644000176200001440000001327713616544116017327 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/bayesfactor_models.R \name{bayesfactor_models} \alias{bayesfactor_models} \alias{bf_models} \title{Bayes Factors (BF) for model comparison} \usage{ bayesfactor_models(..., denominator = 1, verbose = TRUE) bf_models(..., denominator = 1, verbose = TRUE) } \arguments{ \item{...}{Fitted models (see details), all fit on the same data, or a single \code{BFBayesFactor} object (see 'Details').} \item{denominator}{Either an integer indicating which of the models to use as the denominator, or a model to be used as a denominator. Ignored for \code{BFBayesFactor}.} \item{verbose}{Toggle off warnings.} } \value{ A data frame containing the models' formulas (reconstructed fixed and random effects) and their BFs, that prints nicely. } \description{ This function computes or extracts Bayes factors from fitted models. \cr \cr The \code{bf_*} function is an alias of the main function. } \details{ If the passed models are supported by \pkg{insight} the DV of all models will be tested for equality (else this is assumed to be true), and the models' terms will be extracted (allowing for follow-up analysis with \code{bayesfactor_inclusion}). \itemize{ \item For \code{brmsfit} or \code{stanreg} models, Bayes factors are computed using the \CRANpkg{bridgesampling} package. \itemize{ \item \code{brmsfit} models must have been fitted with \code{save_all_pars = TRUE}. \item \code{stanreg} models must have been fitted with a defined \code{diagnostic_file}. } \item For \code{BFBayesFactor}, \code{bayesfactor_models()} is mostly a wraparoud \code{BayesFactor::extractBF()}. \item For all other model types (supported by \CRANpkg{insight}), BIC approximations are used to compute Bayes factors. } In order to correctly and precisely estimate Bayes factors, a rule of thumb are the 4 P's: \strong{P}roper \strong{P}riors and \strong{P}lentiful \strong{P}osterior (i.e. probably at leat 40,000 samples instead of the default of 4,000). \cr \cr A Bayes factor greater than 1 can be interpereted as evidence against the compared-to model (the denominator). One convention is that a Bayes factor greater than 3 can be considered as "substantial" evidence against the denominator model (and vice versa, a Bayes factor smaller than 1/3 indicates substantial evidence in favor of the denominator model) (\cite{Wetzels et al. 2011}). \cr \cr See also \href{https://easystats.github.io/bayestestR/articles/bayes_factors.html}{the Bayes factors vignette}. } \examples{ # With lm objects: # ---------------- lm1 <- lm(Sepal.Length ~ 1, data = iris) lm2 <- lm(Sepal.Length ~ Species, data = iris) lm3 <- lm(Sepal.Length ~ Species + Petal.Length, data = iris) lm4 <- lm(Sepal.Length ~ Species * Petal.Length, data = iris) bayesfactor_models(lm1, lm2, lm3, lm4, denominator = 1) bayesfactor_models(lm2, lm3, lm4, denominator = lm1) # same result bayesfactor_models(lm1, lm2, lm3, lm4, denominator = lm1) # same result \dontrun{ # With lmerMod objects: # --------------------- if (require("lme4")) { lmer1 <- lmer(Sepal.Length ~ Petal.Length + (1 | Species), data = iris) lmer2 <- lmer(Sepal.Length ~ Petal.Length + (Petal.Length | Species), data = iris) lmer3 <- lmer( Sepal.Length ~ Petal.Length + (Petal.Length | Species) + (1 | Petal.Width), data = iris ) bayesfactor_models(lmer1, lmer2, lmer3, denominator = 1) bayesfactor_models(lmer1, lmer2, lmer3, denominator = lmer1) } # rstanarm models # --------------------- # (note that a unique diagnostic_file MUST be specified in order to work) if (require("rstanarm")) { stan_m0 <- stan_glm(Sepal.Length ~ 1, data = iris, family = gaussian(), diagnostic_file = file.path(tempdir(), "df0.csv") ) stan_m1 <- stan_glm(Sepal.Length ~ Species, data = iris, family = gaussian(), diagnostic_file = file.path(tempdir(), "df1.csv") ) stan_m2 <- stan_glm(Sepal.Length ~ Species + Petal.Length, data = iris, family = gaussian(), diagnostic_file = file.path(tempdir(), "df2.csv") ) bayesfactor_models(stan_m1, stan_m2, denominator = stan_m0) } # brms models # -------------------- # (note the save_all_pars MUST be set to TRUE in order to work) if (require("brms")) { brm1 <- brm(Sepal.Length ~ 1, data = iris, save_all_pars = TRUE) brm2 <- brm(Sepal.Length ~ Species, data = iris, save_all_pars = TRUE) brm3 <- brm( Sepal.Length ~ Species + Petal.Length, data = iris, save_all_pars = TRUE ) bayesfactor_models(brm1, brm2, brm3, denominator = 1) } # BayesFactor # --------------------------- if (require("BayesFactor")) { data(puzzles) BF <- anovaBF(RT ~ shape * color + ID, data = puzzles, whichRandom = "ID", progress = FALSE ) BF bayesfactor_models(BF) # basically the same } } } \references{ \itemize{ \item Gronau, Q. F., Wagenmakers, E. J., Heck, D. W., and Matzke, D. (2019). A simple method for comparing complex models: Bayesian model comparison for hierarchical multinomial processing tree models using Warp-III bridge sampling. Psychometrika, 84(1), 261-284. \item Kass, R. E., and Raftery, A. E. (1995). Bayes Factors. Journal of the American Statistical Association, 90(430), 773-795. \item Robert, C. P. (2016). The expected demise of the Bayes factor. Journal of Mathematical Psychology, 72, 33–37. \item Wagenmakers, E. J. (2007). A practical solution to the pervasive problems of p values. Psychonomic bulletin & review, 14(5), 779-804. \item Wetzels, R., Matzke, D., Lee, M. D., Rouder, J. N., Iverson, G. J., and Wagenmakers, E.-J. (2011). Statistical Evidence in Experimental Psychology: An Empirical Comparison Using 855 t Tests. Perspectives on Psychological Science, 6(3), 291–298. \doi{10.1177/1745691611406923} } } \author{ Mattan S. Ben-Shachar } bayestestR/man/figures/0000755000176200001440000000000013620150641014614 5ustar liggesusersbayestestR/man/figures/unnamed-chunk-16-1.png0000644000176200001440000016375213607554753020500 0ustar liggesusersPNG  IHDR .}PLTE:f:::f:f!333::f:::::::f:ff:f:f:::MMMMMnMMMnnMnMnMff:f::f:ff:ff:ffffffffffnMMnMnnMnnnnnnnnnMMnMnnȎȎ:f:ffffېnMnȫff::f۶ې۶ȎMȎnȫې:ېf۶f۶۶۶ې۶n䫎ȎȫcfȎې۶xxE pHYsE4E4Ƶ IDATxݎya(;8R <0A4 ZUs6Z3{__M#D"v`;A؎ lG#D"v`;A؎ lG#D"v`;A؎ lG#D"v`;A؎ lG#D"_%8|Jq )A1ScȧǐO "0!D`C>%8|Jq )A1ScȧǐO "0!D`C>%8|Jq )A1ScȧǐO "0!D`C>%8|Jq )A1ScȧǐO "0!D`C>%8|Jq )A1ScȧǐO "0!D`C>%8|Jq )A1ScȧǐO "0!D`C>%8|Jq )A1ScȧǐO "0!D`C>%8|Jq )A1ScȧǐO "0!D`C>%8|Jq )A1ScȧǐO "0!D`C>%8|Jq )A1ScȧǐO "0!D`C>%8|Jq )A1ScȧǐO "0!D`C>%8|Jq )A1ScȧǐO "0!D`C>%8|Jq )A1ScȧǐO "0!D`C>%8|Jq )A1ScȧǐO "0!D`C>%8|Jq )A1ScȧǐO "0!D`C>%8|Jq )A1ScȧǐO "0!D`C>%8|Jq )A1ScȧǐO "0!D`C>%8|Jq )A1ScȧǐO "0!D`C>%8|Jq )A1ScȧǐO "0!D`C>%8|Jq )A1Scȧg!_{vw䗿Dq?~{V>#8o}͟GY"AYuC䭼GD`EgDrJh!4Oy/^^?~wΏ| 8KuSGGչhFq߿;şf_y(g|DDq<Oy又|#$ߞ-"ǗW?_SCy"0ΊC8k_WH^ޑ'?iπy??:AYpȟhęg! ߾!?q$8 ߆,o|_IqƛA#?ݯ)PDqύ<|w? "0˂CD>|oOo!:"o};#x ȇuY9]Do!e?&W_%r"0ނC ~~  Xp_D>2?ߟA[p_D_į'WD_o^33_D` ߿_D`>¯>ׅP>#8 ! ʷ|NqCoy [Yp?}G_߼wƼL Xqȟj֍:XՊCׅWƜv}[G>#8+S8<>9%"O?dAYrO?SB i9AYr?>Éo_D`%ӟvy៟wOg!OC%=Kg!g+f}?q߹ :糺ף>"'Y=Dy߾7N__(*AYwo2SA?~_AYy?}ow "OOg[ǐO "0!D`C>%8|Jq )A1ScȧǐO "0!D`C>%8|Jq )A1ScȧǐO "0!D`C>%8|Jq )A1ScȧǐO "0!D`C>%8|Jq )A1ScȧǐO "0!D`C>%8|Jq )A1ScȧǐO "0!D`C>%8|Jq )A1ScȧǐO "0!D`C>%8|Jq )A1ScȧǐO "0!D`C>%8|Jq )A1ScȧǐO "0!D`C>%8|Jq )A1ScȧǐO "0!D`C>%8|Jq )A1ScȧǐO "0!D`C>%8|Jq )A1ScȧǐO "0!D`C>%8|Jq )A1ScȧǐO "0!D`C>%8|Jq )A1ScȧǐO "0!D`C>%8|Jq )A1ScȧǐO "0!D`C>%8|Jq )A1ScȧǐO "0!D`C>%8|Jq )A1ScȧǐO "0!D`C>%8|Jq )AG`ȧKa)A^! pLI`s|J83Z*9C>%E9Dؔ!D"lɐO "reDcȧø>H"1SAd9D؋!D!!N p!F wlÐO "u7!& Pv"O "U! t"ϐO "E! s,ΐO "-! Pr",͐O "! PːO "! x! p! h! X! P,ȐO "T!z @,ǐO "S!j 0,ƐO "S "R (,ŐO "!J p3|Jxv y>nŐO ".![1SC'F p Qp|JvB0 C>%[;|}܂!D@i7`ȧ;k'@ΐO "ծ!D?Nj )C>%U;}>R|JvxAX)AE cȧ;jgO!D=^>"|JvxUpH)An  `ȧ{i7)ANڹ #j|Jv8G!D;>$eȧh3 +)Aڡl:|Jv8[!D9.>*bȧkG k)Aډ2|JvP!Dn8.>0.gȧk+ )Au #b|JvܸJ!Dn6>6.dȧ[jk )Aaj2|Jv)AU#>:.bȧiGH!Dn4B|Jv )AVA#>@gȧi\8!Dn3n}͐O "ю7>Deȧhh"2SMSƍ3)A!f y p q3<|JvƸQpC>%g Y kGj&0S@0n}O "vqpC>% j;h)o2S@]/}ɐO "vC>%D>ڧ [ i;i+o0S@.}ΐO "v:C>%G hW;j-2Sڇ k pvC>%\,}O "j{k/0S pvx"C>%\]+}ȐO "iNJh2/1Suڭ!ڇ K pvx1C>%\]*}O "hi4)A L84C>%\)}dȧ˵## pvxap!D.N>mN1Sډ |JT;P%\' G3|JL;N4g pvh:_3SEi}|͐O "iC>%\&ZW pvi<_2SY}|ɐO "hgC>%%g pvj>3SI}|ΐO "gk'C>%$g pvk_?3S9}̐O "gjh_)A<q;C>%#} |dȧS1o pv85!DGѾ>0S9!0C>%!}<1S@W{|Jx[;BI.xϐO "okGCi_?1S XڷO v8u DGӾ~Dr[p DӾ v~8 7A$&}))AupHKO "jj_ vy8lϐO "ij_ vw8ΐO "hgk_ vu8lΐO "/kG#k vt8͐O "/k7Ck_ vr8l͐O "/jk_ vp8̐O "/ik_ vn}E3S ڵal̐O "cKؗ!DNkڗ/C>%N 3o `_|J8h_ pR4 Ѿ&m)Avh}Q2S)0FveȧڙaUlʐO "'+ ؔ!DNhWAW)C>%<׎ / `O|JxFi_ L;1 Ӿ.-)Ava}][2Sځa}lɐO "_kq#C>%|i_ v^}e2SWualȐO "_jDžڗC>%|Fj_~ vZ}m1SealǐO "kC>%|j_n vV}s1SgYalƐO "iWWC>%Fk_^ vS}y{1S'0\bȧOEalŐO "p؊!D>jC>%| /`'|JоB)AvMX@ 6bȧ'혰lĐO "O1a K؇!D[ڗC>%N h_#6 ^$,}0S{풰5lÐO "?ie/`|JI#,}0Sz `|JQUlO "? "7ԾJM)A@eO "M/`|JCn}{0S r[؂!Dj_' i_( i_( lԾR )A^,}0Sv=XPJ6`ȧ`wxϐO "`IKX!D͵ %kE`y|Jj+ վX)A[,}3Sv6XXjgȧ`kj,ΐO "`aX!Dڗ 6C>%;k7/`m|J6 IDATNk_/ l] ׾^)AX,}K3Sv0X^Vfȧ`_^̐O "ڹ`}X!DmsW 0C>%jׂ `a|JvՎ[h_2  оdu)AUl}2Sv*Deȧ`SR5,ːO "ڥ`kX!D=C6 *C>%{jwm/`U|Jh_4 l 6ҾjE)ARl}2Sv$Ieȧ`GHeɐO "ڍ`+X!D f $C>%jʹ`I|J6.i_7 Ӿp)AOl}+2SvPdȧ`;:,ȐO "vu`C+X!DݴڗC>%i-/`=|J6N{j:z l6վv)AL l}1SvVVcȧ`/.ƐO "^]`[X!DWC>%[iW`1|JҮk_=b ־|)AI l}k1SvZbȧ`#$,ŐO "FE`sX!D}C>%hJ ~+1Sv@C>%h1Sv @C>%h܎!D][DnǐO ".-?"cȧ`{g C>%hk?e)AD^,ÐO "!`|J|~0SvKX!D-3" l]aȧ`gڏ` |Jvn|`ȧ`gڏ` |J6N|`ȧ`` |J6.|`ȧ`}W`|J| V`ȧ`}W`|Jמ<~ 0SX!Dյ?ϵ =9(3SϐO "ӟ`%k/Nj? ,9,3S0!Dw?/h? yAaLgȧ`iϋO`8C>%Kk~^~)AXZ{ΐO "ڣWl|JV8f3S50!D'?j? ,yUyfȧ`]`4C>%j~~ )AXW{̐O "s_d|J՞D3SM'0!DU>ok =9Ceȧ`Uڏ`.C>%jO}~%s)AXT{s3ːO "K X|JN2SL0!D5w>gj? ,=9WLeȧ`IO`(C>%Kj|~*C)AXQ{s[ʐO "#  L|JV\Xf2S0!D'>i? ,Hdȧ`=e`$C>%i|.~0)AXO{sȐO "rK_ D|JӞ\d2Sb'0!Dմ=k =Bcȧ`5mڏ`C>%iO{~5)AXL{sǐO "b˞ 8|J\n1SJ0!Dw=Wj?i ,=VLcȧ`)YO`C>%Kiz~:)AXI{sƐO "Jڣ@bȧ`%MOxf1S'~=)AXH{i?Q ,艴(|Jd`C>%hzB0!Du<ĐO "2sTLbȧ`9O1S'~B)AXE{̓k!A =湁#ÐO "*[h?"9 ,=幅+ÐO ""Sh?#1 ,义3ÐO ""Kh?#1 =乑CO ";i?$) 㹑CO "3i?%! ,⹙SO "+i?%! =⹡cO " j?& ṡcO "j?' ,๩sO " j?' V=`ȧ` `C>%;7~P)A=߹O "x͵|Jkwn0S0]{s{70!D;h?*3S0]{sGp||JkOwϐO "p])Aܹ8%;~Wgȧ`pN Ia!Dڳ{i,3S0Z{s7pp|JFkvΐO "hݴ)A=ڹ86C>%7;w~\fȧ`fڏ  ֞Uy!Dڋj?/C3S0X{sWph|Jjv̐O "\Ν)A׹82C>%c:w~bfȧ`ZO ^]!Dck13S0U{Gp\|JjouːO "Tq)A=y38,C>%C:~feȧ`R! %G:~ldȧ`Fڏ L%9~pGdȧ`> Ӟ<\!Dq뜇k?92S0N{p'p@|Jis ڏx L)AG spdxR@:\ ŅNvzA AbF:cȾ.1v3=3=dV}>ϗUO:s "@gGsBDv1DDO愈>c% }4 K=#hA> %z0'H|.AK\N5\З蹜 '@k +c9aO=s "@WrDz1DDO儉>c%= } Kz=(hA> $z&'P|.AILN-\Б葜Pѧ@S #9O?s "@G'rBE~M1D~D>Zb% } K=,hA> t#z'Z|.AF8NS!\Ѝipѧ @C 8OAs "@/q} Kz=Ӏ\ЋYD0DND4!4hA> t"z ѧ!@3 ѓ8M> a%}iD |.ACN#ODVs "@p}" K=ӌS\Ѕ)fD0D.DO4#ThA>  z"@# C8 >`%=iH|.AA NCOF6s "@Gp}:4 K:=Ӕ \Ё DM0DE4&hA> /z1'$@ }7>!Z`%틞iL |.Ah^MsOIs "@o}J4 K=}ӜS\кE uѳ7 >)s "@go}R3DE޴(g%iRi  K=yӤ A> 4.zIѧ%@8|.Ah[MOLh\ж蹛FE ms7>1s "@Ӣn}j3DEO4+f%MiV  K=uӬS A> ,za''@,|.AhYMâONX} ,za''@A'Sа葛EnZhz$ iZ *'vǣ3а艛E2bqDvEܴ-D^{9?:#i\ D3ЮyEI"}tF=oӸ Av<DfE4/5ȟ~{ۣ3ЬiEr_4(r`DfEO4/T`3AhUMOR8E˯sAhUMOR8Kͣ3ЪYDq w?w"@Gm});:[tF=iӅ LA~LAhTMOS05G~ȴoD6Et"DRaUV"@l:}D)=_z.>8#mD  rnɟt_}_'˛?Ф1^DQ /=)g(ӿ$%P:=fӍS HAŷ5'g_9e~aᖿw  "@l})1ȟ~1w6a!IS6݈>UOŬ~9-"{DEt$d9?ja;{)eGdA[MGOV9רn?*D:=cӑ AkT/?*scDd#Ξg?'еD!_rʜ[>33V"~ t-z¦+ѧ+@a ޿Q?0#3˨]?~[Dξ[A[MWOW%ȭ7饪Ce_7A\MWOWAd2kwÒ]u7aɚs6>a"dY;Y7|Ϙ}_Y%_Ɋ 'z3',@ r~)^"UIDEt&08}6Rӝ=ru!wn&зDqtu9eDEOt'0Aq}#~^A2AhKtMwOY~[ȍAo]DE'0l?wn[D^'ǷmWY}+PI 0Oy72(?qDEt(_ w">fPwo^A}@٢gk:}n "3ǥ2&=Zӥ`t/޸*L\SCzo"ѓ5]>mFW+<[tD^ ^e4`8{sHI$zKѧ-*;U[ ">vww"07nH`MO\y5l_>=H̍wl?3k_!r_Sf`HLMǢO^qM1,ֽ˻@v+rA&KkEdY揍Ao{eEC)"#5=>{5 WI7"0U="=Rӵ`Tǯi5 _}? ӔC:)"5]>}F5A?/f#fAIO_QMu?/! e.H@MO`1Mv߹GdAfIDc Ov?nQ!=yEc _߽y??~ID&'0 C~^) 0ӯ?yӧ['M>"pS`D/Mo=M>iAE#Dr "Ѕ"="=L3'1x\tACDDr "A}G%@ ƋH,DDr "ЃYQI>Fc%@8Li 0|.Az aOcs "Ѓ9Ai>Fc%@8P 0|.Az Ods "ЃAkDDcibAzP0"~|;V/~ίKuBiGD#6?y f3ID%{dpD(-A~yC3OKDVUAZ,Oi /Վ+D6nD{z,H+eEdY',@ݽ[KA IDATnDٙ v4HYBd3x*М Rk=:3IuNVxCw7dzK3U#T3DOLRS)A /,AFPRC6HMC$U8S2|@]u#{埞A+x !5 ѓ3T<hͰ+w\̃KoY,A*+zm!7DLT9.ȫ]>Axd "PUCj!=73QOT "W?}bWt "PS0"PG31/.^/P}![/.A!nSOU $?عa "@=B´z B-U &o~b%&@->^"PO r`)SthIU?y3o>C^ "PU r@)*gf& Аӿ?J}$R!t |.AjCΌVC**|.ARo y%PU PA> Ur6"T}zTe%@]c\g "3}d%@e#\fR PA> DJ'8@M\ =.3yѧ8@E?|?֧Od W>*קּŎ]OE o! A}T4x=Dk!=D 5Orz_CCѳ23PT4pvHD>4D[DA ΰAO{|'I r"zTfQ 7/|8DMmoIY1l?#17D-ǏoIY1l?%AihEу231ҙ0A;+D-}0]D` w9۠{g/JL h;Aҧ9YFAq4D40 ╪g@mHZ ")=Da,c#x rෆ "h`dHvsЃGѴD| [zș6sЃBPyМ} ۃ<;F-|,}jh/&-C߰=#3###YUKl Ľsm*EcS;[9C= "e`\ >x\C{9C{= r=!3+# YEC3k4DnܤEO(<BttdA̼DU ߇,%~zZD`ȏכ;\96tЃ.}Ԑ7KW {E5"gwA$z3D ( KuDGcf)( K "p8|.A'MO{ __A$z0fO|  7>J3DEpD'yx=-wCuD6&{ى>JOhUo-Ϸ`DMH|D ϿZ\Xgos 3v!2}3ȟ~{{q9lnD]MX̜De _.W_}S['@z "tc9#3''?@Y-AxU0lmBd#(k彝 "3"bAdg;&3'?@YCw,_Y?0x ՏgtH>|<>D"(jveuȻ_Wϖ_[D@y/xOVVoYKAdQ3.w}Pք}0I "BPҰgbXrY=R3gfz@;,DNf PҰ1w}ŮJe("Q1JUy}Av2|Pťձ ."A1sJUYB !WC5E= 3!-z 4h@"WDu !?!,z $qC "CW(H*踇tDa^ "w"0#AʩRU2mDPыZwsnn/(z3lr} d2a^Q2l˲| AdYL_`EQCE/bYV7oDT;U5QCE/bnZVwSg A&@CE/b[@VEd'H g@)C-"<5DN]Œy ""pPAU8z < "L ^ >{4~b>b SEa%z)2|,"W[([&@8\R($c_D>/wmD.z)3?29#@8\R($oD>{f&_A$zsыA." A'@PF /6ȝ_Pz"@H(4ȟ>~cVw "пH W@\O|.A7 (6Rr( KM:ިPA> k? ]v*&ax#zA`%@&DV[0"4g.ȡtJM7,D//Ͼ/y\DcfM%PAW DS "[;0s$ 0ȟ~{r Yu1MM4[( yߛCDfdWiK۴#1s$ H;?T9F "t/ya#]ZّY^w5'7׎>sMD _D"BEы _Ҧ:"b/E/iO`zoLȄi:}(5DzD/ l);eo}q:`l,"ax#zYdK<;o9>o_1-;^DNi "E + ֩c+S/>G׃ `Nڕ 7!.Y?0s}`ޙ<43m!l!SM9Io8rVLs[[CDPEu~fu[oU;[D&/ !"PEufuacw3SH ׈^Ѭ8q<33iA^{D`]Cf"VUAO@ r[R-h8DGr |Kjjy D`%)8RcO!@A$ D`E%@ "/@r" D/H.A'  K5D]/zDr "?A^"\Oa@A$ D`%A%@Z ".(z 'D HDr "?A^$ " E/\ 2LXe0XRI%]yyg19`0A$ wYnfFaLN2L%@ "㌳;q~PDr "п(? ^(C "|mE/\oAd{6d PH.Aɘ Ry#0С;xR!0s4\*"#H&(z 4ߍO)S1 \nG2=K`{DY_Z "㌲z @s#0 Y_D/z 3]OA=0SqD2D/a)iΛG&d1"2 '=lы`Yy zDh07!d^.qS|LgDfD+z 2Oa90! AdAdğE/Af)l;Gc^uC`Ꭷ`㖧nAD/!f)j{Gd=D< `ynzJ)D "-$^2sSҜ7L˜/i^2tS d&@%0\="L|/hE/t#@hs "?A2E/t\ =BU K "+z$3D^6   @2|.AVla K "-z2Dd^8  ٢@*|.ATkah /|D^:{Ϟ>N "?AE/Dk?-|4=D- ^;rkw<:#@( x$ "3tF "P@HD^{9?:#@,.D%ȼD J:ч@b@vlD>NQЃh G;Md"?Az^qЃ(k?Gg_;A$zb}$t tDȁuC "E ]= >}t9=:#@-҄DE˯sA'\igp u"ytF#Xh&D;RE.DEw?w"%:5E.DewE?NDkֵkxhD QMn3Xh]KBƞѯ'2D-ŵ9HJf&p$#zUVYЉKs4GB[DwLK}e>"דCH~=['5ľ.^1QJzi@rۡ痞y?}4"b[ @=- DC/ݭ!G<9W(c[ @}R- D%C߾˺׷k}%Cn @)q  Qhգ2?/'|%=Ck#DPD H#zn Y?*彳dg @(&l$]~2Nξӫ5;K:!@1a+ -ר^~Tg<ɗZ =zM1ʈZA.8+9|fFP.ZD%h؞hN _ sͺ D-!D%ȭ7RUu%AʉZBGe֖;5NDX(&h $ "7=*v8oI'D[ L !DgS|C =BA1 ?>~ "?AJYD  %,"D\Ob@a{w{">9=!@H E"D92o݁GA'@Q! Q wfI4OBV@AdwL  "+ " eE,#Dȳ B"@JAdNUABt^eA -#׸^ @@/|.A XG "_T_~_"@[裠=/$D3x$&/6D9=,8h+ K "]!}$gH%@A>3JH$DA$zll!K kwWL 2k挽s "iAdA^Jlr "4%i AdE^Jr "Ei =f 3DhP0z{2itGHa@"\6b?GLgHȚGaq@K_ƧO~yOm'X(" "C!3bHtn彳k[~yO]'ؽGOWw1$Drt=$ 2.&DH.Nd "k>=덺R "l;hpuF*F]M\a0%DQW@"A$ @#6Ht',DPט S.AVx^f8As5ce @3cȎ)' qUH:\Nlr "Ei =Bm#.'TL4 uR4DT2zHd%@H Rs "?A*o=2DT2zHe%@bH #m=$;p?>}Z @eAo={MD2ڂH%D2ڂH%D!A$zNqR " ՌR " ՌR " ՌR:ȿx<ADTHK A> D@*|.A'@=#-)T\/ DϨ0q@*|.A'@E)T\OYS  Rs "пH:"7ʢHe%@ZIAQ@*|.A'Ա')"7ʢHu 'o|t7Y "пуH|:B ف{gײn&o4A'T+>R " Rζ0p6ƪH%DHq! K`d.:D1"laYDr "UAWkDr "?Ad| cq0Z+ K ";0FR "A$z m1X]%J%@QlUZb\O 2s@*|.A\Y6BA\5v+ @y-cD%\5f+ @q..DF[5^+=+ŝ2Hژ7E㪲ic91XB2oU@*\=*F<"Cf"HenYRf] z?$ZU6~Zoo*E!!!VczVa @I A$ @ U>~q||D AXU*cuۻk H R hxSD`=d|2H5xurcD`=dt2H5t"@.#DgOhOujVzg>vqq #+R k_n5 @%-D`d@a/W'_} /}+̤"0:H5ly 3w<ū_|6O "ѓ'BH5l_=1n϶ Y?El D`l@aC2o,_z24A'؊/4T- nC3j;I@ rL̃cL D`tW@A򑘍q""@]LyYED "0+ UFB6l>hF "S'RHU&\edH^F "0K U rcffA' /5T2̃'dd"%@*Uv"7\K7L D @ٵj 醐K+D @ٵj \˟;3UOe@aܱhDzP/DOв հA~@-"߸xȃ[F&K "bH5p_|EY1cw."bH5p_?~dL DMh[j EduB{DN D Dj  ɜw4mO%W@ɺzrQH.A&V6{XD#( Ovɥ2DA5g`@A[Ad"sx S e> FPpJ;Ed&=D Dr>+R ߾={D?6)R5_آʭ7TeoWIEޖ "?A;`GՕ[oirh Adnʭ7TbrhH =iIb  X.Aa\@Z"Cb  }X.Aa\AZ"4Db  mX.YWHSȼ[plr0}0 "sf)R؂W8>s<:c+  E)HSy)R^|{{[9^0C3{HSy)Rm~hw?w9%Y!D̞!Rhف=:v/`dHFi2/@-؏lzg67 "D#J%jdOyÂG2[Y!ͩDg!sRf$!o7"b/ zHcTf y[>G"3yjfA"̚H5pvjwnxzw7z`3 0zHKTf {;+~7Y YriG =aBO,:TöaDy*f<ͻ9G A\@Z!@" հ}تxu{So"?A"Yt 򫏘b7L +DKKUj এY=S3gf J:TUwl>yW "ĪH5hyYEd/D*ReoAW:DOЛ K "HA D Ve*SfTu/UzP8DϖПuj Y1O` հAbD_w7l1'fA$zX[D}˳AD D Z f |iEGy S @  A~P/x*fKW?  ,d4UxŌD` Tb)8p?K~J;wx-Ǐ?xD+R8ȯ^hN D \ B%@Wb)Dr "пA$z^YDr "?AYDr "?AYDr "пA$zEPI p(|.A'+r8A> DzEUY2DH/OfGUY2DHD9q懲>(c%@.l?`Ђ8A> DzD&Ђ8A> >}(D)[,+bK`W>"=DfԒ8D-ڋUΧyE`N"]PDԒ8DWw/.ȑN=/~H<43}4!g?C޸u>m^}Jir+n;;9.Ŏuv\k}8\O2y52ftMcl=.C!tBbf7g!3*"6\ "(DgC&ܲ˭rt֮קּ%]6\G =d-k ޝ=8G>gn 9d-k ݝww"` 5"C&ܲϯ⏷Ќm.WCzR&DO06~gDysϷgt}.D!@+ .lN_<-"\O (A[ro8wɴ ҉"A$zi(A;!w\_LF\& @3J.m}ЖnwL  \&: @3J.m}Vgy +DHk`A򁘽oYed+@hG p v-!@hG p=A$ DE7\@!a:.odTwx*ARvy\'Sf2tC]6?)wɻC#/?DO0%8Ն 7<3|bf D%8Ն W^o A'@K /p \H _ADGK*g p{UOV_3L M)2p_=Xå=? >cF)D)8U򫷦/ 6 }vyg D "D&R4l?9}S`$@m~aPFi ٵ%ADFCɨ. _s9A'N*BFya + "Db DxRн =3LQ̜Dp- ҽ"2Au;ZΧ-|,} ,HL"2Au;ڰϷ=3f 0{CX0 =1LP2p㶼E䝧eK6sߥA C&҂Xm[[LlfNAd ~<{jfqKNfNAd 驴۞rOH/-IDE6!Tk1lv⭫L 2 rZo "n!L 2 zZo"fNA$zZ|vL m|!DM=  (N6U[?vC<){mDxõoHWoؼD8R5AUoؽD4޲0DÂHsPqsg%@hUŅ̝A> DU>0w\oPaj|`  ͪy0돏|'COf\>^\8i DY5>0oi$A&`Ha..~`Βe~uhD,e=d8uA "ЮACA&`@a>.`}MY3"?AVwu Ek[['@hX~֟9:AD'Dsu `7~t/*Y/ -0W/]>NED "вsu zBVo33/9Dχ0/ Al"?AV`\q2[Df93O' rō /["@RHt3S`7}s+VUA'@۪#QvYmU> 2}E?8/?uY̌ Dl}d0GĀ K "ѳ!V GDr "?Ad EG%@ [,)0?H.AD'C,D FA$ 00Ad`n\lRDbDr "s_)DK!Fe{ 0UPAR IL޽,ɒy.a0S9):X0p3vfdd{[u c}ob-H1"NgtY8Z*OcEUO)"hq "UkCv$W!G!hr "U^K "SC&+DVN2&X Ri AdvHz=dmA`z!hsu"U+Df'ڜ`QKdoc%!D`mA`"5:**cU!3D` A`.ABDVjwIAX[ADOJ!: *A'Z` { *A'j!> *A'i{#Y8dW "qW(WKi~$yU "qԗ$HY#țJ&AdI `F^|U\5="5?|<<c? M "kRDLܫlq,4$iSDLثhq4uI/?. ,p(xUy3L.NDV+p(xUy2LDY *9d\C[ʛ`zȺ "Ǖq%N0*fy"D+q*xUy4LCY .9dXSkʫ`¾ "QEn0*f!D`A9< *AyI/>sD`|(t0YUO0 CJ}D{Jds0 CJ `| D`HD`|; *A'bG!_%Sh30'A$rW!_%Tl3 ʝ `|| D`PϐD`|Hz0:CJ "0FgW "0>AF< *Aw7wGtc3 J`l| D`X͐D`|HzD02CJ "0FfW "0>A= *Aw'7== *A'pG^CC|2 Cƒ>"<<#p ^vAKGw!CI_`@^U^D%W`5n "]#xA9޼!g` 0*^D`|/}GUO0CJ "0cW "0>A&>$h *A'҇!_%@1 3H_`0| D`K ƐD`| }IUO)O 0CJ "0)bW "0>A>%X *A'ҷ!_%CC1 sH`(| D`cĐD`|L"}LUOI 0CJ "05bW "0>Af&@ *A',!_%F0 Q[9aW "0>Ac=ҟO 0CJ " "=K`| D8*}OaUOamSD'0 *Ac3; " "K`^U@gDi{'i/}PQx(V "rMaM@LDD ^Wb6@~L'{/ 0o*om^,BvD( :~? DO_@͏;|Ia?=d;OzG" /}R1x VyhtIA#O 0/*/m3#pҿMX %=CC)f~"_O/_ϻ[<$pς%}T!D_H"?xjJa7Af*}#wz_6@D`6`GM_w~7{<:Fς&}VLHCǝqDN'}WJ/u_~KA~y- ˿g 7O}y#_/2A I`S?=~ D`p? "0e7Ç3"?-" gA&,@f_귈?D`lߖ I3_ǽw/??r _6۷$|ҷތC^xyOZGAVm8 "0m70/_/o +>Oݛpay/ pD`Bn!7|0ޯfG}ML(}]M8紐A_7nD`Bn!?}Ͻȝ?c_|$^v 3Js-hDcilD`Ftn!?|_*D^g 3Js3K%Lu6 "06?!A&R}p׃kGAjD`Jm!_"CUylVvIkbCB{?&<7ڄC|eD`XM&>2@&TgO.AFeD`3l!=߽T7"҇لCς "TwɚѮ"ҧلCOEשN!Gm[Vpȿt_/I//=j 9C2LH_c3{?uS}!hI^_HG،Cޯ y!vשD`0{"N ҡ5i?9T_"0'z"k!?"˃? T:@!OEߥoЯ)rԗߗ?t/=*}@#!OEGnЭ)ߐ?/y&_DaD"ҫ5 ߐWǯ??>Pg|Nr9 ҫ5ݏ?/~+q#IB("<*}x^|i"Dv}? +E6ESjψt 0#i ¥N/Ыyߎ7]nƻ GC=D=Stj?~K/U]W"}C8DbrHҷ蔷c78@!ҧ:X#p N'*p=CN!!}JO^U^1"t)}~>y=Vy! cg8@^EƔ>@@avD`atC fAV@@< :bW "0C6ECtĐD`1ϲ%aW "M8\,"ǰMHDZpi9))"]I"U@n-'xBzHWҷe Ѐc ^ B ҕ-mYp3"ICPD>F@/,vG? } !"=I_#ޕU{?$ҟQCzrHGge9n~9o֐B{zHOWe9Nn!4't$}NxVVy\L7{H9={t»bDz!I$ޕUC8E$ИҏAaYp-ASDr,*}>xXVy\M"9֔>I@,P.RC`AtӲAI%UM8T@!_%@/"AYRP9| pǔ_D) >T@g pהXD!ۦĥOY%=l B >U@g pD1H\T1ޝU1ҟYD1L^X)^U^x9C9zH\X)ޜUvd "!icxrVygӟ CArH\\!^U^gC8ݡ9$$Yp=CIzHZ^!ޜUu ;6䐴29NpckH!K K, ã z';X[d^U^D81$' z8™!A֖>Y@gg;i\TNst "<;9D`q$xwV "grO9-$g D; K- D; K- D7ǧ K.=CJ_B,/} *A:D`ygW "Й' OМ!_%@gB>]@s| }yf ""13;HX!_%@_A r *ADEcW "Еf "CJ<5@U$?+"CJ<gAVcW "ГV ,j *A: Do' hʐD#O A&}€ *ADo7 hʐDώ A.}Ā *ADږ I1%CJn<: I1!CJn<= "MА!_%@7@z}k*"ͤА!_%@/@6E!1 HO ^Utz4H2CJ8:$УMH2*A  B),Dp3Pe "tiSD" hţJ8B#'A$"}̀VIO&T?C hŋ`wa:#)=$"}΀FGv.T?C hsʳ`<̥6z%DІhW=^zWAn! 9ZY¥jG_zH@mxVypD>i@ޣU;!\8~I K:#羅Ђi=~WnAx'}ԀHCHu";IZipGK5O*A G󕧏 >k@ޤUQn&gOAx/}׀l *AGnp=CJ J6r| a' A>m *AN>p+}ڀUd1|qfW "uD !_%@)G>J7b| QA(}ހUD2{y.fW "tD;!_%@9GH8Z| A'A#}kUz.eW "tD{'!_%@YGJ9J| 9gmA+}+UĜy.dW "sDg!_%@iGa\[L.} U7yC4&>tu *AR[<zy?҇!_%@ȉGaHQAZS\ƐD #0mSDZH:2| gAlo?ҷ!_%ri#0Mi&}쀫xV "7E9s"ff7k H#A" p/*A=wFN;QDI;"Ux? ;u"i'}xVyu0Lܹ#0 =Z֦ȹkGaDzH3\[ʛ۟NuDNTxCǎ f'j7?z͜y%uk\{ʻΟ:@^D>rKGH>|U"W`"#΋J.:H3h삡#Op:CJ:Pg3h늝#p6CJ.9X'3h꒙#p2CJ9X'3h隙#; p.CJY9C2h袕#{O p*CJ.9G2h窑# p&CJv8Kg2h窍# p&CJf.8O '2h沉# p"CJV8S1h庅#;!pCJF.\8S1h…#{O!pCJF.8ڶ'P1hʁ#0ogPg1h}#0wjS'} U4qD3U9_'1hy#0mSD>I *AXG{Al"r9NE[%+rhy#0MR D|\nH"p*AXG{A"KD^UVO'a!A("JD޲U6ogaWA"JD^UaWA("JE U_՗jkqf`MN45pX:U^U(VA8.}"*A.d"qU\ͬD'$PcW "p6F>@!_%AxFD%| j4j U\ըD$PaW "pVF>@!_%emAxNL| i6i93 U\ݤD$P`W "pvF> *An"҇x!_%E.AxVTO3HA#JJi| h9hic <ːD- <-},gU\D%,CJK43dW "psF> *Av"@A`1B5#mKY&CJ 4^3 MI/&CJ 4^3y}*"HL)| he\ }6'xV ޼52K "҇8Kb0,DX rЭ2kR< IDATlY%#= [e/ӹӟ jO'pgn-O 2CNaU0=ĐDXrέ!%v zӥ'pnF!,c.}>t,`:zY#"=l [e2CHA8AxV 2+FN>1޺U60%ЊD3O(pn i#$4b  [%rوHA8EGxV "pԆDS(p!_%ibFN| mA8GU&6a3 `W "p܄D)!_%Yr FN>~| ' .A8KU$8`,S fW "pDӤ)!_%9EN>^| A8O;U":_>| 'A8QU ^L bW "P/pMv1KA8U{U +}U= *AEX;UT |{?øwD*=\cDJ҇x̐D(=[~DG"e3(JϖmHUU0(~WςiD1\#=ZDWxW ".,"Ʀ"}\G`HnHOxK9Gx W "6Nj~w)"HWo*Џ7n@=Ds,5O*ЦЏ^y!{z)pE37aX]zD== |Cʠsk 7SO,%*{K֖+Dr\eO!QAn!HY+U0=~@9C_2(!t#=U~'GzgpE H \#}g/x W*?"E҇pI \$}iy W ".3 AJDQ9]G[p>5\%s#-AL1xJz#I_[3| OIOw:s |D \'}oOU<#=PD .p!_% \(}q *A'7J eW "p\zD+o.p!_%qqrK.>=| p18,=M>Dk.p!_%Qa \+}w; *AJb |dW "pPz!K_^#CJҳAZU%"ҷD(G.>-CJCғ.A\ *AIOx–I_!_%Ar N/pÐD Oþ?b0 gW "p@z|B^_138 =G>!Aۦ%ۦ<+}7CDKSJ0r5Q "C \er:!D= },;O?g8D)HbwUЛdNMz|Eco1;*{̇?ٝ@ !Kc7U=n'ȗ8JyZP28!+=A$azx\eqCTz|Mg1SzЧ 4> o* BAh(}U6=R$y,W@&z"@K |\%]rLz|<"M2r a+ 47U< "@[ R'# 4>̀ R'# 4>̀ R'ٱ >̀ R&ձ >̀ R&ңcAh.}CJ7\8| _JO]yU|%=8DgW "GVgW "I'gW "I'gW "K"GfW "K"GfW "M2gfW "M2gfW "OKl0U|"=3D K +3Dzf ?S 3/=2D|_Xú *AKo#8ݶ)"5ːD8Dm"W\ò *ApuAza"ɶM-}aYUОAεm D"pVu.}aUU08HS}"'=]ey]? |.=.D\1 򚮲J=/>G "p2=ц5yLW%@["0AΦ>۰$o*hGӟK/8rHlÒCJAx֖}JuX!_%=D_ЧY%UC _ѧaUwC OiIaUKφ"mҧ`W ".= @)"_HvX!_%'sPNz4AMR\. IOS"ܶ)";LϳJN0b8 EyyuW "Ɠ D5AdytW/~?얞 'D9=dysW.y< J"ОXyCIzvCaDpAR҇f]e'COz&H=yqWY.YF gDn=˓tN0F8 t+}a^U p='©_s2^9) $=%J{WwEap.AXì<T Hσ "@'&]%rODzLz>0)*Aks2D>LɐDXPzN>0%CJ`=ap>A\Ì *Ags32DޥO?LȐDXMz\A>0CJ`5MpA^| *AŤ'%1֒D`6| Z{0ƐDXJz\DF U+I"з-:U+I"е_ЉŐDXHz \F}o?D'_0CJ`!pA:NN`&| :;:k7_0CJ`p!Am;!_% 6EFaW ","=.%@M6yUKRi@]H4<ϫ`ѥ rW fu^%. CJ%$<Ϋ`ͱ`trO+&m^e;lK?&@{_ 0o*x,rK'S42qG6EDczy*xDap'0 ˼CZ߆  (42q~7"JCͫl`=q"_08*#å`T~+0Jvq(Sߌ )5C>D} ~7#cJM<Ы~#JQ *Ai "_00CJ`VG~K0w ̐DUߒ  +e2&~7%J] *A9m "_0*CJ`N}[0ʐDRyߘ ,2f~ܷ&CKi *A "0mKɐDPiߜ 3,O ɐDOaߞ xy?EJFdW "L'D`|rEːDNU %#2f~'"0M2&~G"0mSD_0CJ9L&D`pۦ"pw| sMA!n{"_%f~·"0=d 1^U `6| C^D`,U,wWIOAFHPl!XٛW)AHHU"yW "0HH?{$2V!|_e4|<.~Hu,Cchټ&? \ $  Y("~&~I,C_e2lnhqN "U"yWYL0AD`1kEү`:~DD`5kAЯ2`>zJ%r!_e1|~wL&xWL0!=I_=DU#UFHaJ7x`o#~3763J&HA7O9AXH z_%Mtү "JI o*AD| @/ "ZKCJ{w`1o&!_%н{WtD]=AXN *AΥCD ΐD[=AXP *AۃDOm[\&a| @ҏQ"g=ӟ*(3:~kC>O ːDW=AoM\$=Q| C+kI"wmo?5TdT "0+{$p׶MDUdT "0{(p϶@QDXaP%8Z/"ADa]AIh" |JQDX]Pe_ 6AףDn{ADaQvA}CxI\e=Ac(ʼlkI$,CdTW0E_#D "+ + Jo! "'V!+ +":/1 "V "1 + *A "!1 +"үQ ""1 +oa "?+"02`neocДiP%8 "ߥɠ%۠Jq[٥_#D^ˠ!J7ߤΠCJo%E+D *S*BE2xJ Iڼ`S{ a۳w^{gIڝݾ}a' b%ϙDR`&X z=A!Q a%^s5A> @rDI\]~SI}d9c "z\A`9L% f`%X5s` H\A> @ZWeD \m~"H}x3DH*0+Irb%@b?*z\ Awu Bc\A" i\r1 ޕ"y1HD %; KP. D: M!.KtvOIL. D+\K"84W $z\AUhQD()"i 9\s~6@rY VAD\X1HxёR/K#!2C2A"uK8. +\K"((",ʸ<0b"BLLb BR $(("$!*H0B"BbBR/$)\K" ,!T)zP0B{"ByLLc ^ J8/1&fe~T'R\0-D,ARQ ">”HGURK& X2KDI5pR a:XH. 0T"L K`D$2DA>  R}')<#R?qRaXz[>AH`=I~>.$ .-f2>4 ] ^VAwՐL} ^VBfwu(!b:b "0+Hح . ;CG,L(H\`R/u!s+\K"dS9\~6׹DDfev"p,*&0Bz"Bn0E^EuULQDȘ# &#@Sne`fkI|9b`:^VGVPDȕ# #FmRt IDAT`~E<8b`:W^HR(\K"dD QNDȐ# & $SJQDȏ# $Pԋj "H}0X7ڥ^KB(1wD`R&z[1A`K`"UD`  z [7AA_M}< zZ;A@6ћ c "KtE2:ԇU K#pN+OGVK#pVE+ׂYp!%a5٩ ,W++xWE32AÃvgKzʚ ,^vADa(zGR/UD˯\K",%Yz "pFe*{t r#N12A/Iר4 "eD5̈˨zuAX|{ȵ$bQb }R/M9& uDX(#J,zYJ ,^ADaL(Lzp R/I"9ȸ(", %IND-N$Pb5^CoDXCJ,bD@ )8{W $*H4A(2?^ [X^|r i`M}T5|,A+R/<9KJOX#30D/R/:@Jtu 9 xCqpDMd W "%ZrDHPpH=zPPI 5 C` &Z|DHPpHEzIA(PAD!%CM,!qH$ gYK}bfXfC*"pWDdCn>jS+3M,!D`J "@y2 "iib C^S2 'O>~S!3M,!5qz5HPzȵ$4D3R$MJSD&ѐ!pJ5$1`!A$}5(ЅVadF "p)"|RP-ȏ 0zZKf%p‹/#gL',Q+!~.k)&g "SzbY9c_ (* '6|&ZE1j2rƨ!%^;QA`>cTAZaϨ,C‰ ""O 0j2r E. 01ec z}bY9c\PC`S 0LR!-V,SkvKoѐ߯0,, "\OD?#z\SҨ*[ô.oѐߔiwYF&2b "_vÕИejW2N-?0d \ %͚h2d5aU+g_] 1Dq*m! Sߢ  6"[\"+?1dn C&ڍA$t Ab?ٍ!cȞ GCƐ?1dO# ؏!{vcȟ'Ĺp\g7ُ!{ɾvُ!{vcȟ'y7·ܐ?1dn C8 ձ[LOnȟg7ُ!{H3;N-&'7~ ٳCǐ=A$NP2v CƐ?1dOޔ꽛_|q-&'7~ ٳCǐ=A$RW8[LOnȟg7ُ!{H~+c·ܐ?1dn CHs;?sA·ܐ?1dn CXZ'|Nh]%$fI CƐ?1dO;>~Wo1)?!cȞg? "s ?^;wGg?ٍ!cȞ m3^xϯ\=·ܐ?1dn Cx|a 綸(?!cȞg? "q+o| rn 3>"gQlyufW9Prǿɛ7JW7:-rD: "@u:PA TG#D"@u:PA @{Ν;H,1˛==SB'o켟~#9C1暉w"n\|}2 m&DA_s腿KAw!wPfbAdqD K?W0־( q7 "K7, Kʮ y{2AJ2L,,R~Cv6!WϏo.~b[hov_wa(ɜ3 0-@V6?_?ńO ` Ȧ|5B8CQ有ekda_ !a߅8CIfE~XJ};3d^kjZĜ"p; "ir>Ys¢wqUD 'PygbAdIVAP_~ X,a(3  [uLĐa(3 , 7۬`֧_/S!'PgbAd9odL}5HA!CĂb<.(D׵eY:ag8C柉X^2uA I0 "Kqo'.@aѺGy#@>!C fbAd!?(@V?˰lW D w3 oZ EXHoSpAġ2 I2 "v0(z)_a(0'L,,½U 류 "P aPXY10f)z!@y!CfbAdVs {?^/^LD9CĂȬߩ ۟4BtTEEs\ "DuQu|{7;;!+-AbÕ C]ռ ?/Abb{ '] Y@N!c ] uP{oʫFvq "ͥ\ rsOwq"D GK1sܬG|}j2 D*&@֟]6AA~6,oDȇ0F LTv\Zӓ.kq"D ?'Œt;+3BF8H=k1؏[;0dA#TLlZ 2utC 'PAbdgs k1_k쵯k!'PAbdħ ɛ7CYȋ0GA TG#D"@u:PA TG#D"@u:PA TG#D"@u:PA TG#D"@u:PA TG#D"@u:PA TG#D"@u:PA TG#D"@u:PA TG#D"@u:PA TG#D"@u:PA TG#D"@u:PA TG#D"@u:PA TG#D"@u:PA TG#D"@u:PA TG#D"@u:PAjgݺO8w7w_l}W'>? 5 "03}?{ַ?ݿ ѹ ruR?|g.D]=@۝ 'G&@)w50?9"@0Aj4$QD~7m r&Y7DD`8Aj4(TPD m.z O^RU&Gv$ȃ!yIUAFDFAƣo 䏓3s&@N |Ad{ySglƈs&@ݗ>aDU=qǃ"L "x_"/3ak%UA "î9ȃsW=O`=%3lO}K1%A "P'#ܭ""sm{s]ǂDFH{zݗoj=~uQ>zϽr|x߼}s_yOw^Z=S}]&ۼl_jM+.z& PAjt>t^qw~t~l 4`'[0$ܽ:j^#|fnד1kp6N'_.E "j7_tkAw"~O𣷏1 +iAg_jw}4ZE!/"@F]Cys伭x5.vAAg/_lmߚSӍ7N{WNKE;6> zPAjt6t|%{]3s{T䷏3.AA"o{:BȀϼN'G璪A 2@Y7:ё6wAd]lwn[͡ËMDo~-a 2@Y x կ-s|cu+7x5.7ZV|)"Gzr}]ؽ 2h/Ƕths*a|F/"@팼J3 %(>7~B݃Cn}y6G#=I7ݡ>= =?wl\n@Qѹ wQOtt\~·>4KE#|yϻפz 2xޗkڟD,%ب 2"PAdw{W 'wKowÞЊ ]yt~w{nccHEDF'/vW =߁cVA:.vAծ۵fvv "7{'t_;u9D-( 5 "Byv9bD\g~#qoRO(D-( 5Նfxx|َɿw:`]];csOw^XG ؼm2yv؈ n@Qѐ u@[_ {qhQyнqZi_jibLDB-( 5DyD7޷&Zk8A$`"?s΄A$"PA+Ud鸋xߚ{.wqD:Iw9}G|̶w,<[PAjt&<_[Ȁ).&:(쾢={@森:yf|&ƹw,<[PAjDn~[?u]P9wHđHc%D`8Aj4rO.Xx z(hTD?1.2 "=~%D`8Aj4&yXxmvՎxNGwf姾Þ r'%D`8AjDZS^]}w1EV)zȠGeUQ=  (8o=nDAdؕN{Ƞք6O߱A$PAjDv+Gbp )vww~G^ѼjN:.]߬ @QhٞܚF񀻈 "X0쥁|IDATH棃?%yZA}FлEDFAd36w}bsOgH]Dͭ:š y=w "@f7 rqAdsbŠLD6EvCd]@:>kA$`AdsW|a;m^tˀw "@hhٍAd]D~G=a]AdAd sFcs9ZY~qM\!E?vn鋪 zN[v~#?xX#l_[((0l7zqH]mJ]hՋJ0.&"}'H>< 6 "G>w0s3tD淛lypv>Q Ap| ͳ0}xq}~!Z?zgI[|Q!r9s|. =?7Vޙy@"w֧Kk3*Dz6gy.T,e9,l ͙G+ P|8.,DxCM ͙GpY(Dv}e<K ;9Mh^o<],"6|gۦټ+d]C_ycv'<ҿX!1a[pQ?rX8R,|t[ ==HyoE)D;ED+DnZOwB¼"Kx}wBd{?G!Ra[pQ 'Lxp~!Id(D*{.J!WDA!rk}˖ɮQT\Bde};^rf';VT\Bdw0Y WDI!Nc(D*{.J!R'>qy'ڮ'ŋ?ϼ=ۿsF՛[>P"kftz]w.c"]r{_{\./Q8FBKf׵{>!~Q˝W} n7M>6 חƋ6n} {6e[qO=g_Z];t΍t9/#sW;[?HOm;cQX'E!ya9?x>pg-y{t8|0yp {;PmOX =%RUt+g;h!rζZE oY Л]:`, =D8n魇{aBl[y׏a>yGzڑBm) H_7𼥲ŹuO1X"+Wr8;f犙[}kz ߮]?~(lD.~?ecQKqNjkW KIvʏBxooK7,;p'4؇0\"E]٬߾v6cq||ƢY~p0|BܔzcG݃ǮSof3 PR7kz޹0>Z!RE }|޶wퟸT8Xm>cQ,$;u1'?{c{C߱c}/rx6+ū>{-/B֓a9<۝k?Y\[O|p Sc"]ypWeW>;[劣8X׷?;`, ;oad/ڃޕ{wc{ݭX?olpB6TyaK~A!9<__/&9~.L嶿 " lΑ<^Gs#_ZFb"_ycߜpp`>L?="}<9͕ E|5vԞdJ_܁f捶]wh!R#"]{ݽFd ӳ|k)KX!rA>9p^~Iɫ>q}X=<}hA;uS*t#"vmlr/Vgj@!yMA:/X l]O1X" ^skzR瘯se}9^mwϱԃߖ +)-D>9xV#9kH!R .Pti Wp ?[S 0]9W!i_v^1s7n7?3OvP}Wt^^"Ol rpI9G mo_W_\^ۥXo]O1X"ko}c?o;GGW ~͙vLw6}P=wV|ɟ{>(c))FBU>[S 0=86z:rܯ W̬d> \RrͿ~m]eѿ9_n`) -D_t ջǢw}Ƣ;:%v(azTHO'\Ѭ5پp!RTe-D#;iBݧ`, =vߡc53C13t,}fzw5=NTM #cwsBt"5U"e ~sBݧ`,a1OvlQxcϿwyW%~w~dE!һׇW&e!y"~Bd~U"1 _RSn >c/DScs?wv;+fJϠ^²BdxysC3H.C!R G #꾭k)Bdio؃O~CGv{׻rpv sd]o)D:e!rC ;-,DJ}BM!R#QlO;?l:pp̅%o~bچvy˟(D޾) nBS}Bt1F)rhGzw?v9b悇QtO!RׇhKfV[v F/D>'+D`$ [AŝAo"GWT ]ֲ}(D.׷l؅HH!S 0z{w?"{Wv+7R c-dșc\~|nO1X"[׻Ggۯ'^{Ρ3Ǿ{lS+7-D֯>tx}f93ȅHH!S 0zs>}+fv[ocͯ,\9z=ܸHo}ƢCȱCywVO?ο^u,>7tv+K(D;^o_zy }[Tݧ`, ͷ̬Ύۿ4}WkBMn~e0"#ۇυTap"՟꾭k)Bdcdaq暙w޲\[Oަ;kTo~e0"HMp"꾭k)BdcuP9;;;1ny(K|_ZבҽJW .R_ } !RܨHo}ƢY[ Rvãw˼z~@s[y "}} /W2]B*"ޭk)Bdesܽ9dSֶG=kՏvZQQ=D7.D6zx̼֝#"UZn?cQ2ǔcڶcQ?ּ^O79[wpBͿ>/DN9gI=v *D*؜FHzh|Ƣ9rd9s{V{eTnMґF/EmR79 Ob5E!c ç-ܘ+|^^0x$yuHn/0U!rdkt~qέD.X79 ObE!woȕcdz_py},m'5Rfod9Ή|Zσo~ayapcmZ֍)85H ?fD"-!rF"-^1LB!Ҁ+f`" ,9K(DX^1(DNwQm(DNkYSTRʭ}^D`"5'|)DN> "0c +D| ̘Bd`. ɼC旦l Շ)DNڵkC`"@G!Qq"@G!Qq"@G!Qq"@G!Qq"@G!Qq"@G!Qq"@G!Qq"@G!Qq"@_t,i 1'01@sC *XbN`c9N!9+ "T)Ĝ<ǐsB s 97WP(D`IS9y!:rn.PPsC t 1'01\A]%M!9bN`cȹ@!BKB s 9'0)Ĝ<ǐssuB 4rN`S9y! ,i 1'01@sC *XbN`c9N!9+ "T)Ĝ<ǐsB s 97WP(D`IS9y!:rn.PPsC t 1'01\A]%M!9bN`cȹ@!BKB s 9'0)Ĝ<ǐssuB 4rN`S9y! ,i 1'01@sC *XbN`c9N!9+ "T0VОI`C t 1'01\A]B$I`C t 1'01\A]B$I`C t 1'01\A]B$I`C t 1'01\A]B$I`C t 1'01\A]B$I`C t 1'01\A]%M!9bN`cȹ@!BKB s 9'0)Ĝ<ǐssuB 4rN`S9y! ,i 1'01@sC *XbN`c9N!9+ "T)Ĝ<ǐsB s 97WP(D`IS9y!:rn.PPsC t 1'01\A]%M!9bN`cȹ@!BKB s 9'0)Ĝ<ǐssuB 4rN`S9y! ,i 1'01@sC *XbN`c9N!9+ "T)Ĝ<ǐsB s 97WP(D`IS9y!:rn.PPsC t 1'01\A]%M!9bN`cȹ@!BKB s 9'0)Ĝ<ǐssuB  39 "c9N!9+ "TP09 "c9N!9+ "TP09 "c9N!9+ "TP09 "c9N!9+ "TP09 "c9N!9+ "TP09 "c9N!9+ "T)Ĝ<ǐsB s 97WP(D`IS9y!:rn.PPsC t 1'01\A]%M!9bN`cȹ@!BKB s 9'0)Ĝ<ǐssuB 4rN`S9y! ,i 1'01@sC *XbN`c9N!9+ "T)Ĝ<ǐsB s 97WP(D`IS9y!:rn.PPsC t 1'01\A]%M!9bN`cȹ@!BKB s 9'0)Ĝ<ǐssuB 4rN`S9y! ,i 1'01@sC *XbN`c9N!9+ "T)Ĝ<ǐsB s 97WP(D`IS9y!:rn.PP[A{&']d 9'0)Ĝ<ǐssuB &']d 9'0)Ĝ<ǐssuB &']d 9'0)Ĝ<ǐssuB &']d 9'0)Ĝ<ǐssuB &']d 9'0)Ĝ<ǐssuB &']d 9'0)Ĝ<ǐssuB 4rN`S9y! ,i 1'01@sC *XbN`c9N!9+ "T)Ĝ<ǐsB s 97WP(D`IS9y!:rn.PPsC t 1'01\A]Yc IDAT%M!9bN`cȹ@!BKB s 9'0)Ĝ<ǐssuB 4rN`S9y! ,i 1'01@sC *XbN`c9N!9+ "T)Ĝ<ǐsB s 97WP(D`IS9y!:rn.PPsC t 1'01\A]%M!9bN`cȹ@!BKB s 9'0)Ĝ<ǐssuB 4rN`S9y! ,i 1'01@sC *\[z+h$!:rn.PPA!$!:rn.PPA!$!:rn.PPA!$!:rn.PPA!$!:rn.PPA!$!:rn.PPsC t 1'01\A]%M!9bN`cȹ@!BKB s 9'0)Ĝ<ǐssuB 4rN`S9y! ,i 1'01@sC *XbN`c9N!9+ "T)Ĝ<ǐsB s 97WP(D`IS9y!:rn.PPsC t 1'01\A]%M!9bN`cȹ@!BKB s 9'0)Ĝ<ǐssuB 4rN`S9y! ,i 1'01@sC *XbN`c9N!9+ "T)Ĝ<ǐsB s 97WP(D`IS9y!:rn.PPsC t 1'01\A]%M!9bN`cȹ@!Bk So활v1@sC *(DRv1@sC *(DRv1@sC *(DRv1@sC *(DRv1@sC *(DRv1@sC *XbN`c9N!9+ "T)Ĝ<ǐsB s 97WP(D`IS9y!:rn.PPsC t 1'01\A]%M!9bN`cȹ@!BKB s 9'0)Ĝ<ǐssuB 4rN`S9y! ,i 1'01@sC *XbN`c9N!9+ "T)Ĝ<ǐsB s 97WP(D`IS9y!:rn.PPsC t 1'01\A]%M!9bN`cȹ@!BKB s 9'0)Ĝ<ǐssuB 4rN`S9y! ,i 1'01@sC *XbN`c9N!9+ "T)Ĝ<ǐsB s 97WP(Dpmaꭠ=.2bN`cȹ@!BH .2bN`cȹ@!BH .2bN`cȹ@!BH .2bN`cȹ@!BH .2bN`cȹ@!BH .2bN`cȹ@!BKB s 9'0)Ĝ<ǐssuB 4rN`S9y! ,i 1'01@sC *XbN`c9N!9+ "T)Ĝ<ǐsB s 97WP(D`IS9y!:rn.PPsC t 1'01\A]%M!9bN`cȹ@!BKB s 9'0)Ĝ<ǐssuB 4rN`S9y! ,i 1'01@sC *XbN`c9N!9+ "T)Ĝ<ǐsB s 97WP(D`IS9y!:rn.PPsC t 1'01\A]%M!9bN`cȹ@!BKB s 9'0)Ĝ<ǐssuB 4rN`S9y! -LgrEƐsB s 97WP(DIarEƐsB s 97WP(DIarEƐsB s 97WP(DIarEƐsB s 97WP(DIarEƐsB s 97WP(DIarEƐsB s 97WP(D`IS9y!:rn._|;'9w>d'eIS9y!:rn-7/o7*D&gIS9y!:rnt Ǟ>D!26KB s 9'0)Ĝ<ǐssĔ9Gq,i 1'01@sC"=79Vp|D!2=KB s 9'0)Ĝ<ǐssĄGVo^߿ˆ|>))@J)Ĝ<ǐsB s 97WPLW|y3,xCR,i 1'01@sC"\lDOY'gKB s 9'0)Ĝ<ǐssudȢl'_>zǻO/5uW%M!9bN`cȹ`Bd1{|/; #} ͐CR,i 1'01@sC"9l?>~䚙E[r{%M!9bN`cȹ`Bd13>W )YbN`c9N!9+ *DʏdǿCR,i 1'01@sCS"_>겘?o~J)Ĝ<ǐsB s 97WPLZt蹯C/cb EJ)Ĝ<ǐsB s 97WPLU|w_{uMg[W7/Ƕ')o O'Cw>7YO Y"6W"kf&"Bcq ׯ_>?vBdq*S|~٣+f~sy~W.~~O=S۱>uƯ۟Z3 rN`S9y! Z $9 э;zY^1sJ__o:N!{򊙾Bv^G}9s\_yg"wSfN!wY["g}/3bC t 1'01\A/1Y!rx~Dzۅ,')!\)-ffy@!sZꚙmUGdf53+fz WNu/D,.jm!rFgb~ٻGɪy ȼEƐsB s 97WKLV֣"'wSՇF[ Q{Y]1_8[rS՛}̬9,DzFۇ2+Dz_pw<}s[ۈA;ȧqέL.2bN`cȹ^bBdYhң#{e +f|Ni%"=̬/D>ܯrB皙3Ⱥy}vOx+Ox~:B<7y||YE.DoO>'~_g="f}g?suԝ|i{ӿ ̚]d 9'0)Ĝ<ǐsstℐw|/sه ]|ѯ;'7uWhK)Dpx#ٻ{o:\PM!bԸ<d{'M!;d'5ڒn G|~q"7f9|ƶy`,UBO\4C?(Dik(VJO-D`Ywc䡞t'߹{sys抙BŞo'?ϜbN`cȹ@!By/iO{9!9'0)Ĝ<ǐssuB s^:D%rAD%R`1sZgNM t 1'01\A]lQg1sZgNM t 1'01\A]lQ7Ϙ93&:rn.PPaKZ݇hDi̜|sB s 97WP(D0%@!9b1sRgNL t 1'01\A]Œш1ט93%:rn.PPaKZ߇hD6fNhI9N!9+ "T^ш m̜М癓sB s 97WP(D0%C4"f3'3y@sC *{Iш q̜̼sB s 97WP(D0%=7jȀz9%9'0)Ĝ<ǐssuB ^9hDJHz9%9'0)Ĝ<ǐssuB s^߹h!1ۘ993'%:rn.PPaK;/D4"34y@sC *xIшk̜ҌӒsB s 97WP(D0%]W+D4"]3fڛ<39'0)Ĝ<ǐssuB XRf1όA t 1'01\A]xsѨkD&abN`cȹ@!B9,4HZ#2y̌`(@sC *\[v"c09 "c9N!9+ "T.D.2bN`cȹ@!B" 09 "c9N!9+ "TH.D.2bN`cȹ@!BH .2bN`cȹ@!BB$19 "c9N!9+ "T~I+-"arL?όD t 1'01\A]K:e!Ԉ3#sB s 97WP(D0*DarL?όD t 1'01\A]K:i!Ԉ3#sB s 97WP(D0[Y(DN$|bN`cȹ@!BɗtB$19 &g":rn.PPa%iDLN癱9N!9+ "T|I"#19 &g":rn.PPa%=h]4"&'h@sC *L S3sB s 97WP(D0Π iDLN9N!9+ "TxI iv1@sC *L(D2.2bN`cȹ@!BT!2.2bN`cȹ@!BtHD#brEƐsB s 97WP(D0풖 09 "c9N!9+ "TvIR$4"&']d 9'0)Ĝ<ǐssuB .Bd<&']d 9'0)Ĝ<ǐssuB z"I`C t 1'01\A]p NhYLEU8D+(Dш\ *(D"W/DnJ"s*D]DLNrN`S9y! ,Dʊ iv1@sC *L*Dz#brEƐsB s 97WP(D0*DdrEƐsB s 97WP(D0ά⍈I`C t 1'01\A]„KZXS(DN$!:rn.PPa%U$!:rn.PPa%]!r.2bN`cȹ@!B閴PI`C t 1'01\A]tKZRX\F$!:rn.PPa%--)"arEƐsB s 97WP(D0ݒc"W19 "c9N!9+ "TlI; iv1@sC *LB4LNrN`S9y! &[bB 7"&']d 9'0)Ĝ<ǐssuB -iqE9 .2bN`cȹ@!BI`C t 1'01\A]TKZP(DN$!:rn.PPa%-o(F.Dl#brEƐsB s 97WP(D0Ւ 09 "c9N!9+ "T0Vc"W19 "c9N!9+ "TP}1arEƐsB s 97WP(D90b$!:rn.PPA!rhI`C t 1'01\A]4HM=9 .2bN`cȹ@!BHc&']d 9'0)Ĝ<ǐssuB ,iM;9 .2bN`cȹ@!BII`C t 1'01\A]$KZUN(DN$!:rn.PPa%*'"arEƐsB s 97WP(D0ɒVS"W19 "c9N!9+ "TdI iv1@sC *LU$lDLNrN`S9y! XҺjB!r&']d 9'0)Ĝ<ǐssuB S,i]51M!r.2bN`cȹ@!B)PI`C t 1'01\A]KZLLT\F$!:rn.PPa%+&"arEƐsB s 97WP(D0VS"W19 "c9N!9+ "T`I+{ iv1@sC *LB4LNrN`S9y! &X^bB5"&']d 9'0)Ĝ<ǐssuB ,ie-9 .2bN`cȹ@!B񗴶ZI`C t 1'01\A]µ1߱PI`C t 1'01\A]B1ץ5.2bN`cȹ@!B1cKk&']d 9'0)Ĝ<ǐssuB \LNrN`S9y! "G0v1@sC *^Tw"W19 "c9N!9+ "T}I+ iv1@sC *ՕB4LNrN`S9y! F_JbB4"&']d 9'0)Ĝ<ǐssuB /iu#9 .2bN`cȹ@!B2I`C t 1'01\A]KZ_H(DN$!:rn.PPa%/$.DJ#brEƐsB s 97WP(D0 09 "c9NsaS IDAT!9+ "T{I +҈v1@sC *#"arEƐsB s 97WP(D0^I`C t 1'01\A]Kz6B!r&']d 9'0)Ĝ<ǐssuB #/iv1@sC *h#fP\F$!:rn.PPa%@9 .2bN`cȹ@!Bq"e Јv1@sC *""arEƐsB s 97WP(D0^E!r.2bN`cȹ@!Bk cEi?{ciZw> ӸHd'`` NnfT>pi.ilLcJ JŘ2>:KɌX6>;"2sHXkyEb30fasDbuB}lW$9Xh c&.08(ᬃȤ)bCa˜ 39 "Jp :9^L`)>cs\ 3QCa˜ 39 "Jp:9^L`)>cs\ !A!0g MaÜ%H KD־H s&L`1̹]`.pQ9t RxEb30fasyv "+_DlW$9Xh c&.08(G:mpas"1̙BS3}0vAD cs\ 3A!0g MaÜ%H' DVH s&L`1̹]`.pQt RxEb30fasxG% "k^DlW$9Xh c&.08(G:qpas"1̙BS3}0vAD ;ҩH Ca˜ 39 "J8ߑN 5x9^L`)>cs\  A!0g MaÜ%HNDֻH s&L`1̹]`.pQt RxEb30fasv "]DlW$9Xh c&.08(C6+Ü ,41g sn D ujas"1̙BS3}0vAD "ig86+Ü ,41g sn D v`s"1̙BS3}0vAD "yg86+Ü ,41g sn DpAd RxEb30fasu6+Ü ,41g sn Dp#>?,oY"bs"1̙BS3}0vAD :냃H Ca˜ 39 "J8בN_8r9^L`)>cs\ 3 RxEb30fastッH Ca˜ 39 "J8ӑKDָH s&L`1̹]`.pQ™t`{pas"1̙BS3}0vAD g:ҁA!0g MaÜ%HGE"+\DlW$9Xh c&.08(cs\ Ad}!0g MaÜ%HG6+Ü ,41g sn Dp#YDjH s&L`1̹]`.pQytdyX ExEb30fass#ÃH Ca˜ 39 "J8ϑ "5lW$9Xh c&.08(,G:4<,uY"bs"1̙BS3}0vAD #CH Ca˜ 39 "JpЪH s&L`1̹]`.pQtͧVxEb30fasDk>b6+Ü ,41g sn D 2jH s&L`1̹]`.pQȀces"1̙BS3}0vAD g9ҡA!0g MaÜ%HV"ZDlW$9Xh c&.08(G:6:8԰9^L`)>cs\ s RxEb30fasqcÒ5-"6+Ü ,41g sn Dp#DjH s&L`1̹]`.pQ9tlsX ExEb30fasqcH Ca˜ 39 "J8ÑN"5lW$9Xh c&.08( G:89,{Y"bs"1̙BS3}0vAD g8A!0g MaÜ%H6+Ü ,41g sn Dp#\>f9^L`)>cs\ 3 RxEb30faspH Ca˜ 39 "J8ÑKDֲH s&L`1̹]`.pQB "5lW$9Xh c&.08(HG"+YDlW$9Xh c&.08(Ratnpas"1̙BS3}0vAD ":O!0g MaÜ%8<26+Ü ,41g sn D RH s&L`1̹]`.pQHbs"1̙BS3}0vAD RxEb30fasڏtxlpas"1̙BS3}0vAD G:<6aY"bs"1̙BS3}0vAD G:58԰9^L`)>cs\ #DjH s&L`1̹]`.pQBo DVH s&L`1̹]`.pQBO "5lW$9Xh c&.08(Hu "_DlW$9Xh c&.08(HǗ6+Ü ,41g sn D}KH Ca˜ 39 "J>a%Ca˜ 39 "J>A!0g MaÜ%tаAd鋈!0g MaÜ%t RxEb30fast|gpas"1̙BS3}0vAD GZ3fY"bs"1̙BS3}0vAD GZ038԰9^L`)>cs\ #-DjH s&L`1̹]`.pQB DH s&L`1̹]`.pQťo_28԰9^L`)>cs\ JmXxEb30fasD*cCa˜ 39 "JpvlW$9Xh c&.08(w4,y9^L`)>cs\ R]YxEb30faszbcX ExEb30faszbbpas"1̙BS3}0vAD GZ118԰9^L`)>cs\ #5,w9^L`)>cs\ #XDjH s&L`1̹]`.pQBV, "5lW$9Xh c&.08(HK "]DlW$9Xh c&.08(HK6+Ü ,41g sn Dz%."6+Ü ,41g sn Dz%H Ca˜ 39 "Jh=Ғ}A!0g MaÜ%iɾAd!0g MaÜ%iɼ RxEb30fas:f^pas"1̙BS3}0vAD GZ3/oY"bs"1̙BS3}0vAD GZ.8԰9^L`)>cs\ #YDjH s&L`1̹]`.pQB֬ +DH s&L`1̹]`.pQť]3.8԰9^L`)>cs\ =9xEb30fasDZCa˜ 39 "JhD6+Ü ,41g sn D Ң@H s&L`1̹]`.pQHbs"1̙BS3}0vAD GZ4-8԰9^L`)>cs\ #-DjH s&L`1̹]`.pQBM DH s&L`1̹]`.{y/G}Wӯğ!lJ- "5lW$9Xh c&.0;?O"G";ҪeaCa˜ 39 "~O0uocs\0 r5ލO~LjVpas"1̙BS3}0v`A/O_˿ 2#-DjH s&L`1̹]`.my̼u 2#-;,k9^L`)>cs\0 rҗ\'{58̥HV6+Ü ,41g sn DF9̥HV6+Ü ,41g sn D޽"ONWo Cؔ#-[V<,j9^L`)>cs\0 ~!{W䶷c?UY}F5"s I$I$s "o>/6\\je$I$)b=7y 2AnTX ""I$I3 1>w D8AUJ$IX E8?XI$IkD|*\Dz՟$I$i "@;AM DOv*Ca˜ 39 D 2#=,g9^L`)>cs\O"s:ҺEA!0g MaÜU "w@䉃|nQpas"1̙BS3}0v`Adۅ"ODuu,"6+Ü ,41g sn fD>s9.>Cؔ#-DjH s&L`1̹]`kۯǓ]h:AA!0g MaÜ}Ǿh:AaRCa˜ 39 s ">wЧ:,Fӑ "5lW$9Xh c&.K6uMD.B8,Aӑ DH s&L`1̹]`m|}̝wP=}~\pNpas"1̙BS3}0v`A53˅$ODtsH Ca˜ 39  "W/xV4iᜰAd!0g MaÜ y~[| pMpas"1̙BS3}0v`AwBi RxEb30fasȓww D=GZ&l`Y"bs"1̙BS3}0v`AdwDnO%91A!0g MaÜY'? D=GZ9&laY"bs"1̙BS3}0v`Aɓk~MS_"Ks[H Ca˜ 39 s"aS..U-A!0g MaÜ%8G H s&L`1̹]`.pQyTp!0g MaÜ%8G H s&L`1̹]`.pQB R:%lc9^L`)>cs\ 3)>cs\ #-DjH s&L`1̹]`.pQBˑ"5lW$9Xh c&.08(HKw "3/"6+Ü ,41g sn Dq3H Ca˜ 39 "J8a;ȼ!0g MaÜ%ti RxEb30fas:vEpas"1̙BS3}0vAD GZ"lhu9^L`)>cs\ #DjH s&L`1̹]`.pQBǑ֎"5lW$9Xh c&.08(HkG- "s."6+Ü ,41g sn DqH Ca˜ 39 "Jh8 aSȌ!0g MaÜ%4i RxEb30fas..~ A!0g MaÜ%8YQgH s&L`1̹]`.pQșu!0g MaÜ%8YQgH s&L`1̹]`.pQȹu!0g MaÜ%8[Y'H s&L`1̹]`.pQBÑ[DZDlW$9Xh c&.08(H6+Ü ,41g sn DPH Ca˜ 39 "J?`sL!0g MaÜ%i| RxEb30fasꏴz>pas"1̙BS3}0vAD GZ=log9^L`)>cs\ #^DjH s&L`1̹]`.pQBV"5lW$9Xh c&.08(H׃ ","6+Ü ,41g sn DP~ぃH Ca˜ 39 "J(?`!0g MaÜ%iv RxEb30fasʏ|;pas"1̙BS3}0vAD GZlra9^b IDATL`)>cs\ #-DjH s&L`1̹]`.pQBO"5lW$9Xh c&.08(H˧m"_DlW$9Xh c&.08(R7,_DjH s&L`1̹]`.pQB R8԰9^L`)>cs\ ԝy!0g MaÜ%8̥clW$9Xh c&.08(Ad6ubs"1̙BS3}0vAD ";Ca˜ 39 "J>A!0g MaÜ%TinA̋!0g MaÜ%Til RxEb30fas~6pas"1̙BS3}0vAD G0lw9"bs"1̙BS3}0vAD Gڰ8԰9^L`)>cs\ #mX DjH s&L`1̹]`.pQB6DκH s&L`1̹]`.pQB6"5lW$9Xh c&.08(HF6+Ü ,41g sn DP| s."6+Ü ,41g sn DP| H Ca˜ 39 "J(>҆A!0g MaÜ%ifA䌋!0g MaÜ%id RxEb30fasjc2pas"1̙BS3}0vAD G1l{9"bs"1̙BS3}0vAD Gڱ8԰9^L`)>cs\ KU߭c1pas"1̙BS3}0vAD "s:SlW$9Xh c&.08(AdnUg!0g MaÜ%8̭O9^L`)>cs\ Ae0 rExEb30fasDfWtH s&L`1̹]`.pQB鑶"5lW$9Xh c&.08(H["gZDlW$9Xh c&.08(H[6+Ü ,41g sn DPz-sH Ca˜ 39 "J(=Җ`yCa˜ 39 "J(=ҖA!0g MaÜ%iZ RxEb30fas*g- "gYDlW$9Xh c&.08(H{ƂM"u&#lW$9Xh c&.08(H{6 "uWM H s&L`1̹]`.pQBD~뾚9^L`)>cs\ # :CZCa˜ 39 "J<Ҟ`9u9^L`)>cs\ #DjH s&L`1̹]`.pQBLD!}!0g MaÜ%TivAҶH s&L`1̹]`.pQB6"5lW$9Xh c&.08(Hƃ "GExEb30fas..|A!0g MaÜ%8߉=i9^L`)>cs\ sQH s&L`1̹]`.pQ9W$9Xh c&.08(AD+Ü ,41g sn D r~"a˜ 39 "J(<ҦA!0g MaÜ%i| RxEb30fasꎴk>pas"1̙BS3}0vAD uG58԰9^L`)>cs\ #DjH s&L`1̹]`.pQBݑvDN-"igs"1̙BS3}0vAD uGڵ8԰9^L`)>cs\ #ZDjH s&L`1̹]`.pQBݑvD/"aas"1̙BS3}0vAD uG58԰9^L`)>cs\ #m8[D xEb30fasʎm; rx)b?Ca˜ 39 "J(;ҶA!0g MaÜ%itA$NAH s&L`1̹]`.pQBّ [D-"OiAH s&L`1̹]`.pQBّDM" lW$9Xh c&.08(HV "CD4+Ü ,41g sn DPvmli[DlW$9Xh c&.08(R7j DjH s&L`1̹]`.pQȜDT+Ü ,41g sn D 2}H"bs"1̙BS3}0vAD "srQ-H s&L`1̹]`.pQB 7ExEb30fasD Z^L`)>cs\ # DjH s&L`1̹]`.pQBՑAg9^L`)>cs\ # DjH s&L`1̹]`.pQBՑ"5lW$9Xh c&.08(HF ҲH s&L`1̹]`.pQBՑ"5lW$9Xh c&.08(HF6+Ü ,41g sn DPu}qXDlW$9Xh c&.08(HF6+Ü ,41g sn DPu}riXDlW$9Xh c&.08(HG6+Ü ,41g sn DPtH Ca˜ 39 "J(:р9/"6+Ü ,41g sn DPtH Ca˜ 39 "J(:A!0g MaÜ%ihDCa˜ 39 "J(:A!0g MaÜ%ih RxEb30fas.. р:T/"6+Ü ,41g sn D 2'0g MaÜ%8 RH s&L`1̹]`.pQȜDT+Ü ,41g sn DP3t"5lW$9Xh c&.08(AdNExEb30fasjs4pas"1̙BS3}0vAD 5G98԰9^L`)>cs\ # H"bs"1̙BS3}0vAD 5G98԰9^L`)>cs\ # DjH s&L`1̹]`.pQB͑vAr9^L`)>cs\ # DjH s&L`1̹]`.pQB͑v"5lW$9Xh c&.08(H;G RH s&L`1̹]`.pQBɑ"5lW$9Xh c&.08(H[G RH s&L`1̹]`.pQBɑ"5lW$9Xh c&.08(H[G6+Ü ,41g sn DPr|)[DlW$9Xh c&.08(H[G6+Ü ,41g sn DPrH Ca˜ 39 "J(9р>T-"6+Ü ,41g sn DPrH Ca˜ 39 "J4MZG6+Ü ,41g sn D 2 RH s&L`1̹]`.pQȜDT+Ü ,41g sn DP1"5IH s&L`1̹]`.pQȜbH"bs"1̙BS3}0vAD "srQ-H s&L`1̹]`.pQBő"5!0g MaÜ%Tih (H s&L`1̹]`.pQBő""0g MaÜ%Tih N"bs"1̙BS3}0vAD G;8 H s&L`1̹]`.pQBő"8(+Ü ,41g sn DPq3%I-W$9Xh c&.08(H{GgJ&Z8H s&L`1̹]`.pQBő"ϔ$=Mp^L`)>cs\ #m D.U$==t^L`)>cs\ #m D.U$==t^L`)>cs\ #m D.U$==t^L`)>cs\ #m D$=-x^L`)>cs\ #m D$=-x^L`)>cs\ #m D'=|^L`)>cs\ #m D'=|^L`)>cs\ #m D'=|^L`)>cs\ #m DNzh"1̙BS3}0vAD Eh X6+Ü ,41g sn D 2'0g MaÜ"*t5|AFU$II"Jp$I$qQȜD$I$_)!=80!0g MaÜ%ih `#_U0g MaÜ%ih `#_U0g MaÜ%ih rX#1k"1̙BS3}0vAD G=82HZH s&L`1̹]`.pQv"%=V+Ü ,41g sn D0~ݣmCIĬU0g MaÜ%if rP#1k"1̙BS3}0vAD Gڼ81HZH s&L`1̹]`.pQ6O";FYa˜ 39 "J>#IĬu0g MaÜ% ibȎb:xEb30fasw1xAd@1k"1̙BS3}0vAD G:< r@1k"1̙BS3}0vAD G:< r@1k"1̙BS3}0vAD G:< cz1k"1̙BS3}0vAD Gڹ\r1=遘^L`)>cs\ # .9옞@ZH s&L`1̹]`.pQvDvMNz fW$9Xh c&.08(7h 8!0g MaÜ%8AD"1̙BS3}0vAD HZpAIIH s&L`1̹]`.pQȜDT+Ü ,41g sn D 2'0g MaÜ%8i 2i9^L`)>cs\ #m n8711k%"1̙BS3}0vAD GڶpobcJxEb30fasFm+ ǴǬ0g MaÜ%iTӒV+Ü ,41g sn D0z]S "{LKzzZ H s&L`1̹]`.pQvM/83)1k%"1̙BS3}0vAD Gڴ ϤǬ0g MaÜ%iRp^SV+Ü ,41g sn D0x=;"{MIzrZ H s&L`1̹]`.pQw85%1k-"1̙BS3}0vAD Gڳ ߄'Ǭ0g MaÜ% iLp~+Ü ,41g sn D0x-3]"MHzrZ H s&L`1̹]`.pQw9OzrZ H s&L`1̹]`.pQv;D'=9fW$9Xh c&.08(aH;V"+Ü ,41g sn D0x #.CIOYka˜ 39 "J<҆`!'Ǭ0g MaÜ%\\ #.RQH s&L`1̹]`.pQ R? rP*+Ca˜ 39 "JpjyEb30fasD48Ca˜ 39 "JpjyEb30fasD Z^L`)>cs\ #-_p9"Ԙ^L`)>cs\ #q9"Ԙ^L`)>cs\ #q9&Ԙ^L`)>cs\ #-r9&Ԙ^L`)>cs\ #-r9&Ԙ^L`)>cs\D_^K͜aSƎxALScjxEb30fasA~f3w2v~"GeV+Ü ,41g sn "鯜!lБ֮8IzbZH s&L`1̹]`.:<~¦ i:pq'Ƭ0g MaÜ悁A䩏|/g7w2t!"%+Ü ,41g sn [KgaSt8ADczxEb30fas/xˋ~!lБ8OzbZH s&L`1̹]`.0зk1w2ta"+Ü ,41g sn =o'¦ i4pIѤ'Ƭ0g MaÜA쾝Ȳ^:3w2tu"'E+Ü ,41g sn 'N|`A/;Mw-G8 xEb30fasA ԏwNK$޹CؔAn8AHZ6+Ü ,41g sn ~zcgҙC99W$9Xh c&.0T"Ndy:w 2'0g MaÜAdD2¦8̩j,"6+Ü ,41g sn z z̗ S¦8AD"1̙BS3}0vdͷw~7s/ts)#GZ5  JzZZH s&L`1̹]`.D>s?c!lȑ'8DV+Ü ,42 IDAT1g sn ?5G߽ȘCؔ#-ZNp%=-fW$9Xh c&.0 "?P}}/;M9ҢUHb֊xEb30fasAۨ~{/͇&G ¦i(pHH$i1kE"1̙BS3}0v` rmT_R[ޞ53s)#GZ3 IzZZH s&L`1̹]`.2{/WDdHk6DbIOYka˜ 39 Ae0G_*s:lL9+Ü ,41g sn q2wfs)GZ2 t:I1kM"1̙BS3}0v`h9R^2㛪nV,"A+Ü ,41g sn |_|SՇ{?Rh6eH+IOYka˜ 39 Sz̯g@Cؔ#XDN&=)fW$9Xh c&.0LD|_6Cؔ#-"DN&=)fW$9Xh c&.0LDKed6eH IOYka˜ 39 AdmaS`p ;&^L`)>cs\1'Z÷h6eHCDN%=)fW$9Xh c&.0Lx>ןv;M8SIOYka˜ 39 S?eCAss)G: w"I1kM"1̙BS3}0vi9o6Ҥ/-xp;!0g MaÜ%LD CDes"1̙BS3}0v|+" 2'0g MaÜA_p.99W$9Xh c&.0 z(~>7U(9"GCa˜ 39 A/rmrjyEb30fasA%3W fj6e<8dMzV+Ü ,41g sn !ZAJӏ=A$XғJ^L`)>cs\DJ5hA YGZ~Hʱ'h-"1̙BS3}0v )3o_ZYGZ~Hʱ'h-"1̙BS3}0v ?wv"wLaSii"9Gc0g MaÜ Ջg^A g2w2HOr9W$9Xh c&.08(a= t8遟W$9Xh c&.08(aV= t8遟W$9Xh c&.0LD>8M>ҪD'=U0g MaÜ 6s)$Ih "1̙BS3}0vAD Ih "1̙BS3}0vAD Ih "1̙BS3}0vAD CIh"1̙BS3}0vAD CIh"1̙BS3}0v 2cs\Dm6AdN"a˜ 39 "JpjyEb30fasD7_DlW$9Xh c&.08(a> 2ɾ ~^l^L`)>cs\ GZ~$.yѲyEb30fasi1"IFa˜ 39 "JzOG8L'-W$9Xh c&.0T ">|}Bs)Si$]E0g MaÜW/=Ϗ|aSi"O'FKa˜ 39 Ńo<ӯ^xߟ-¦L=D&tO+Ü ,41g sn "u|Ow[C"¦L=D&tO+Ü ,41g sn & "o^-_|.s)S0%]#0g MaÜ)LJMmaS&ia"SKgFa˜ 39 >{{|S_GWCؔGZ~T.ryEb30fasAA;\W+"s)0vbyEb30fasA~y!|7ۋ{CؔGZ~dIWh"1̙BS3}0v ?\F;#wa+aS&iA"$]S0g MaÜ Gn6ͥff6e6< 2Ne?5Z*H s&L`1̹]`."1s3}4K0w2Hqn'鲟-W$9Xh c&.0ļIW1 MwByEb30fasA2?7FUu6ev< 2nu?6Z(H s&L`1̹]`.H";[8lD܊xEb30fas [8lȜ[!0g MaÜAd-DeAdD Z^L`)>cs\0qyo!r~̦MDZp"3Ca˜ 39 oz-D_C_=C9"/Ca˜ 39 AE2ן*s"/~!Sy?~o6AdN"a˜ 39 Akb[Ǜ/!lʴ#yvAdKrD^L`)>cs\D_%s9y\#^1s/Uv=9t폎+Ü ,41g sn kf|ɓ󊙿zeCg7w2Hrs+-W$9Xh c&.0LD-2·/vcdaSiӳ^"cn%]0g MaÜ pD "UvM{9 ztώ+Ü ,41g sn "}A^;,!lʴ#zvAdЋv<^L`)>cs\0eܼ>E!Uv]8 ztώ+Ü ,41g sn & "Ofز*M;Үg}DF$]ã0g MaÜiȓ'~?`aSi۳"nxEb30fasAd珼yf6eڑ= 2y?=ZH s&L`1̹]`.(Dj6eґ= 2y?>ZH s&L`1̹]`.pQ¤#{tAd|0^L`)>cs\ IG辇ȸ~~,^L`)>cs\ IG~ȸ~~,^L`)>cs\ IG~ȸ~~,^L`)>cs\ IG~Ha˜ 39 |;_8 ݹCؔIG~Ha˜ 39 _zqK?m;¦L:G{D*x=3xEb30fasA!"uoZw9Tzf0g MaÜ)㯟CDAdN"a˜ 39 SO!"[ 2 "x=#xEb30fasA{V9AD"1̙BS3}0v ?z2 2'0g MaÜ W|9;M2>ras"1̙BS3}0v ?y刺rs)S}H Ca˜ 39 A-U|#3;MrO;DH s&L`1̹]`.H"o!f6eʑv? RxEb30fasAzyﳗW_7Cؔ)G~Ha˜ 39 bJS.*6g"1̙BS3}0v` _7Cؔ)G~H}^L`)>cs\0=DDi"FO+Ü ,41g sn &~ʌ/arO{8z~^L`)>cs\DMUiۓ^"c澞%-W$9Xh c&.0_bM9Ҧ'DC\D+Ü ,41g sn Dd6eʑ< 2`$]^L`)>cs\D}_;MpG8 p0g MaÜ ۪^|yWa6e‘v< 2=Ed"1̙BS3}0v` raS&iÃ1"C\D+Ü ,41g sn ")_}fs)(Dp"1̙BS3}0v` qqp#s)(!."a˜ 39 A^Ϡ;Mp9L a˜ 39 "Jp9Ltbq"H s&L`1̹]`.pQU/~p?Ad" H s&L`1̹]`.pQȜDT+Ü ,41g sn D 2'0g MaÜ '=mrjyEb30fas]C99W$9Xh c&.08(AdN"a˜ 39 "Jis)"9yEb30fasGZ~D"@^L`)>cs\ V? 2W$9Xh c&.08(!8La˜ 39 Ão~61׾ѣO~ӿ_/¦䏴Dx"1̙BS3}0v`hؑ~쏿z#}'&W_O+ߘ;Mis "S9xEb30fasAׯ^ܸD~>ķ[ dz}?AWJ;Mis "xEb30fasA/.v7(=vG>_~۱Ed6%9L a˜ 39 _rq`x{NG6=gBCؔ? 2{W$9Xh c&.0LD޾\"_7:=߿OG|ު_ӿgaSG~H s&L`1̹]`.4!W?W"ݻrC/ӻhf6%8 p0g MaÜ)eϾ 0)oLWǁ_yroaSG~Ȑs;]?NW$9Xh c&.0LD&ȃמ[ȋwZ Eÿ"7}!lJH{ۏpoyEd"1̙BS3}0v`  r󾪁_yۢ^{7V G~ȨsVyEb30fasA~ dg~c8m[_3?"g>ҶCDC\D6+Ü ,41g sn ;\*fwݝ\HR9= 2A+Ü ,41g sn țw^;\ȇN}}Ǿw搹Cؔ= 2A+Ü ,41g sn w~ vMD x?~as)#{l?AdؾAEd"1̙BS3}0v =\͛D^Qs }r'`שoM|Ac""m?R$I4 D'jR/ʥOAȜD\D$I$m7!xWD:9AD$I5}ȗݽu~{H'99H$IRȩ9{o,r%N"suq$I1T+)3S?^D~{" AdN"$ITcwN}ssf>soQFG~ȰHfa˜ 39 A"Wsׯ9iS!" "."a˜ 39 A;\[4Sfnz̎Cؔ= 2A+Ü ,41g snq "u{ A5ӓo6%{}O8 ;4lW$9Xh c&.0+LD^3s r~ y#6q\!lJHq a˜ 39 _xړo_DAjyO "WoCrGaSG~0 H s&L`1̹]`U2\ O}[O_}~'Ow>> iS!""."[a˜ 39 Sz?$cD/{}O8 s0g MaÜIȓ-"|<-|O}k l|da0g MaÜviȞycq"w}?ef#{j?AdW$9Xh c&.KLD^|? ._sgطzwyj>;aSG~0 H s&L`1̹]`:o}<|_|x?_>xKUJea"^L`)>cs\0cs\0 ;rܼ3>U97w<ҾDD\D+Ü ,41g sn fD[{ȓOCؔ= 2A+Ü ,41g sn D¦$ a"^L`)>cs\D<5<%/ǿߙ¦$ a0g MaÜ rÇ;67XCyCܑؔ= 2 "^L`)>cs\D~uΩ;\⯈¦䎴0a"^L`)>cs\D{׿H7=Cܑؔ= 2A+Ü ,41g sn BFN'4w;ҾgDm0g MaÜ r/~¦䎴0a"^L`)>cs\D׿"¦䎴0a"^L`)>cs\D^5KCܑؔ= 2,2lW$9Xh c&.0$Dd6%w}9 s0g MaÜ rk\?;Mi3a"D0"1̙BS3}0v >F=Cؔԑ= 2,6lW$9Xh c&.08(b.a"^L`)>cs\ 99W$9Xh c&.08(AdND\D+Ü ,41g sn D 2'0g MaÜ%8AD"1̙BS3}0vAD "sZ "z^L`)>cs\ ԑ= 2A+Ü ,41g sn D:ҾG#D9`xEb30fasRG~Ȱ "v^L`)>cs\ ̑= 2A+Ü ,41g sn D9Ҿ'cD9`xEb30fas2G~Ȱ "r^L`)>cs\D.^~  i1"D0"1̙BS3}0v 1$8lTH؏qD\D+Ü ,41g sn D9Ҿ'cD9`xEb30fas2G~0 H s&L`1̹]`.pQBH؏qD\DV+Ü ,41g sn D9Ҿ'cD9`xEb30fasAdyş6Vs)#{b?AdXjqY3H s&L`1̹]`. "6w9Ҿ'cD9`xEb30fas2G~0 H s&L`1̹]`.pQBH؏qD\DV+Ü ,41g sn D9Ҿ'cD9`xEb30fasG~Ȱ:󼋒wBZg춍H$Ab#1ڔf6`lI H2-Q%SXs9޹}_GfUd@{8\/H s&L`19]`.pQOUѼ$n"^L`)>cs\ "D,H s&L`19]`.pQJ+D\DW$9Xh c&.08H%-+Ü ,41g sN DAZka˜ 39 "j RAD0g MaÜ5h8Ҽ$n"^L`)>cs\  G` ҭ}qR^L`)>cs\  G` A+Ü ,41g sN DԠHDD\DW$9Xh c&.08AÑ='9ts0g MaÜ5h8Ҽ$n"^L`)>cs\  G` m "r"1̙BS3}0tAD G^ A+Ü ,41g sN D ~yi"f "."+Ü ,41g sN D ~yi"D0"1̙BS3}0tAD G^ A+Ü ,41g sN D ~yi" "."W+Ü ,41g sN D ~yi"D0"1̙BS3}0tAD G^ m "r}"1̙BS3}0t}ߩT¦ď4~H7 H s&L`19]`.1ushT)#M{ A+Ü ,41g sN "77OFu?Ҵ""."W+Ü ,41g sN "/|_7Cؔfq a˜ 39  o_6e{Ofq a˜ 39 }͓o[gCJkD\DW$9Xh c&.0|_T)A$~H7 H s&L`19]`.1<on"k֙6A+a˜ 39 赃Ijou RAD0g MaÜ悹 pҷV3!lH"."+Ü ,41g sN :'Ƈ]]6%|)/n"^L`)>cs\7 |u:M iK=AW$9Xh c&.0"/D3!lJHS^"za˜ 39  "_~ᗉ|yE:S¦4 A+Ü ,41g sN D^lj|}-?:M iC=A[ "rM"1̙BS3}0t`A_ؙ|Lu>҄wzH7 H s&L`19]`.Xryq"6%| n"^L`)>cs\ r"kDCؔ&cD "."W+Ü ,41g sN D~1d3/0Wu>A"D0"1̙BS3}0t`A?<ڐU)#] ҭsq^L`)>cs\?cs\9<ۇkqH6%z?D9`xEb30fasA w|G+6%z?D"."W+Ü ,41g sN "'Ƈ'ٷo# aSG=AW$9Xh c&.0DN>FK{*aSG=A[ "r-"1̙BS3}0t` r1OU^sْ.Do A+Ü ,41g sN oV_ %#]AW$9Xh c&.0"̷Ld!lJH~7p?\ H s&L`19]`.DƿU[fPխ~-Bo A+Ü ,41g sN :7~z~[ȂCJ3\H s&L`19]`.;|з|OV¦8TrѲ"1̙BS3}0t` jT)"D,H s&L`19]`.1/oY6A "."W+Ü ,41g sN kS¦%D9`xEb30fasA _&3U)#]yA"50g MaÜ2/oUvU)#]uAW$9Xh c&.0)3L"_ aSG뼕H7 H s&L`19]`.HDFCؔ.:o mAE xEb30fasty3n"^L`)>cs\ D+x=Λ9t[hqY?H s&L`19]`.Xz7"<D9`xEb30fasAl^KM?TuGۼH7 H s&L`19]`. "Ͼ}M#]mARyEb30fasA[fv A|V¦t "D0"1̙BS3}0t "\ْG|n "."ka˜ 39 AdQA+#[THz A+Ü ,41g sN ?e[kClI#]e>H7 H s&L`19]`._<ljcs\>\6esE39t[rqY5H s&L`19]`.pQJ"ZW$9Xh c&.08H%-+Ü ,41g sN DA5"."ka˜ 39 "j RAD0g MaÜ ޯi]6AU"."+a˜ 39 A䋷^_x[¦Ď=H7 H s&L`19]`.pQؑ?s a˜ 39 "j;G|"D\D+Ü ,41g sN D t s a˜ 39 "j:9ts0g MaÜO~6t7|"ݖD\DV+Ü ,41g sN "uaSBGz6n"^L`)>cs\ #] =H0g MaÜ5"oD9`xEb30fast7\"D0"1̙BS3}0tAD BGz.n"."+a˜ 39 "j:e39ts0g MaÜ52oD9`xEb30fast7L"u0g MaÜ 6zME6%t q a˜ 39 A䋷nqf:M BoyDe "."a˜ 39 "j9ҥ8ts0g MaÜ5ROyD9`xEb30fas3K=q2W$9Xh c&.08H%-+Ü ,41g sN )3j RAD0g MaÜ rݪCJW=W$9Xh c&.08H%-+Ü ,41g sN DAu"."a˜ 39 "j938ts0g MaÜ5ODe "."a˜ 39 "j9%D9`xEb30fasDt't;n"^L`)>cs\ #] A[ "6^L`)>cs\D}?MkP¦Dt't3n"^L`)>cs\DxZ}vgנ:M /v"0g MaÜ5/f"D0"1̙BS3}0tAD G HAEd]"1̙BS3}0tAD G H7 H s&L`19]`.pQ.n A+Ü ,41g sN B?eO_ɯiO)38҅_ЭDe"."a˜ 39 AU)#]AW$9Xh c&.08AH~A7r:W$9Xh c&.08AHAq a˜ 39 "j8_mD"."+a˜ 39 "j8_MD9`xEb30fasttn"^L`)>cs\9}tE?i@ursk,n -yqYH s&L`19]`.D}OY6 n A+Ü ,41g sN f"ԓu"!lH "."a˜ 39 3gf`y?aSD*9hY^L`)>cs\0o=oT)"2W$9Xh c&.0D~E:Mq eyEb30fasA䋷׏/}?}wymHurHS D9`xEb30fasA䣇 pW4MurHS D"."+a˜ 39 3̇ǿWw_&k6<D9`xEb30fasA ۻ+?aEM3!l#y@9t "^L`)>cs\>܍_25CؔGs a˜ 39 '2CؔGsvAEd"1̙BS3}0t}}#{ 5c]:M9Y(n"^L`)>cs\<-_aSi:AW$9Xh c&.04"_uf~0OYчT)V IDAT4 "xEb30fasAd|8W¦=ҴsH7 H s&L`19]`.pQG~rvAEd"1̙BS3}0t`gn3D潟D9`xEb30fasܟ23l#{?9ts0g MaÜA韫^ry!l#{?9t "R+Ü ,41g sN YGْi9AW$9Xh c&.0"w3)!#[#M|?8t "R+Ü ,41g sN f "wǫCϦBurH3!"D0"1̙BS3}0t` r |oaSi9AW$9Xh c&.0Df||~"7XdK fdCD]lqa˜ 39 Aw=e|z;<߻In駟qT¦8TrѲ"1̙BS3}0t 2~rL F9T "R+Ü ,41g sN ?hAx'@9A+I$I'DA$I$-AD "nD\D$I$ R}KrH".9\?] 0g MaÜ悙?eVwCؔsG|pvAEW$9Xh c&.0DV¦;y"D0"1̙BS3}0tAD i<n"^L`)>cs\ 4|H".""1̙BS3}0tAD i,n"^L`)>cs\ 4|H".""1̙BS3}0tAD in"^L`)>cs\3M4|H7 H s&L`19]`.HD~\!lʹ#> ƒHH s&L`19]`.=<{׿zF; cs\0g Ug3D]|qa˜ 39 3!_֯4!lʙ#yH7 H s&L`19]`.hD}~ǯ<v4Cؔ3Gz4nD\DxEb30fasA ~>@~'?h;3U)g2I"D0"1̙BS3}0t}W摯/MT¦˼'9ts0g MaÜA䣃M}r4lKU)";0g MaÜAnSeq CJ"ZW$9Xh c&.0D>9Ï du_"R¦8T "R+Ü ,41g sN ׀/ >E:Mq eyEb30fasAb>;W¦8T "R+Ü ,41g sN D^}~+6̑^cs\= >D:M>=9ts0g MaÜ悙?eCByu?x:M>=9ts0g MaÜAd?VDlH/x ҭhq9ԣ}6MϣD9`xEb30fasA 12rcfv MGz(nUyEb30fasA 913%Cؔ#yH7 H s&L`19]`.hD>4cTn^cs\0c}S}G$)W¦LE#D9`xEb30fasAdv_$r7<~֫aS" 0g MaÜݧ<|R@`E!dK>>9ts0g MaÜ9>|Rg'EU)Gz0n"^L`)>cs\0k_D?)䣣=dEd>>9t+D\D.+Ü ,41g sN "}̚a5}~<r a˜ 39 3G>U!G#yHAE䢼"1̙BS3}0t` ||; !l^cs\1 ҭzqH s&L`19]`.9<7sK_0Ou2yo"D0"1̙BS3}0t` #{ ߜ7aS&|A[ "r)^L`)>cs\0kk{6eHKD9`xEb30fasA䋷׏/}?}wymHu2y%oc"D0"1̙BS3}0t` |~_`_:M<Ғ1nȅxEb30fasA)ݗqߚ:M:Қ1n"^L`)>cs\>D:M:Ҳn"^L`)>cs\0s A+Ü ,41g sN D`H˞DcqH s&L`19]`."2D m:Һ>n"^L`)>cs\0̌~3D)34uuO}"V20g MaÜA韫^ry!lԑ>8ts0g MaÜAd{fV"G۵f {DeqIa˜ 39 7Ō|J@Ȗ RAD0g MaÜ"里P¦8T "."ټ"1̙BS3}0t` r |oaS&җ#n"^L`)>cs\0kyy|cU{-Aa˜ 39 Aw=e|z;<߻In駟qT¦8T "."ɼ"1̙BS3}0t 2~rL FMiH7 H s&L`19]`.pQ#}9?p鶢AE$W$9Xh c&.08đ9ts0g MaÜ58=nkD\DRyEb30fasLiH7 H s&L`19]`.SfxoE?y:M8nD\D2yEb30fasAdn!lđV?8ts0g MaÜ58n"^L`)>cs\ GZp m]H"H s&L`19]`.pQ#~7q a˜ 39 "j0q;"V60g MaÜ58gn"^L`)>cs\7cs\0{;7I:M?G=n"^L`)>cs\0w׆9/LS¦iH "."Y"1̙BS3}0t` #s _YgT)GZf A+Ü ,41g sN f ">CVT)GZf m}HH s&L`19]`.5!o_{uߙ:M<;O{"D0"1̙BS3}0t` G~??񗦩aSD*q^L`)>cs\0cݳǿVM3!lH%-+Ü ,41g sN f "!?8߽/aSD*^L`)>cs\><򰈬KDCJ"ZW$9Xh c&.0"w7Fu RAD0g MaÜAd7y}\R:M?=nD\D2xEb30fasA ~nu2~{"D0"1̙BS3}0tyяMlm"R¦i{Hu"." "1̙BS3}0ty}O>bd;j T)GZZ~ A+Ü ,41g sN f"ǹ߿6eH_DtqYW$9Xh c&.08V?8ts0g MaÜeF FAW$9Xh c&.0j0zՏ":,+Ü ,41g sN j0zo"D0"1̙BS3}0t}-cCؔ#~+?pAEdi^L`)>cs\>>$dl%1ry!lV?8ts0g MaÜACD?Ed "k#[V?8t[ "0H s&L`19]`.hDG5}nj-iH_D9`xEb30fasAKD|@}-iH_DxqYW$9Xh c&.0DpyC&~&o6eHD9`xEb30fasASw~ko?EiCH7 H s&L`19]`.5g#nD\Da˜ 39 s~80<ƚ~Nu RAoDa˜ 39 ϟ97٢D*9t+Ü ,41g sN :~G_MiCJ"/]0g MaÜA T)"D^a˜ 39 3߽Nu RA,H s&L`19]`.1|S 2Cؔ#~#?r a˜ 39 v?]b6eHȏD}qYW$9Xh c&.0"!_YT¦iH7 H s&L`19]`.hD}v+S¦iH"."K0g MaÜA䋷^!O~aSƎAW$9Xh c&.0D^+S¦i H"." 0g MaÜ悙2 4vG"D0"1̙BS3}0tCU?3DƎA "."0g MaÜ悹?vן24vG"D0"1̙BS3}0t}o5Cؔ#~qv ""1̙BS3}0t` 7kDCؔ#~q a˜ 39 s?x Hu2rկ="ݮbqYW$9Xh c&.0D;VT)#GZ8 A+Ü ,41g sN /޺9kM?:M9n1,+Ü ,41g sN D`H{D9`xEb30fasixH7 H s&L`19]`.pQ#~qv%H?H s&L`19]`.pQ#~q a˜ 39 ȳ\ n:n2t0g MaÜy?vT)"D`c"1̙BS3}0tAD D*9)hxEb30fas8Tr9RЀm0g MaÜ5p ra˜ 39 "j0g>nW40g MaÜ5>W>n"^L`)>cs\6G{&^æRu2|կ}"ݮiqa˜ 39 -ϾzaSQAW$9Xh c&.0߾9G˅Cؔ#~svUHH s&L`19]`."?{xPu2|o}"D0"1̙BS3}0t :|v:,"!lV9ts0g MaÜ 򻁯y+6eHD] "+Ü ,41g sN bȳo?n _zWf4Q:M>'>n"^L`)>cs\DaGVM3!lV?9tAEd>H s&L`19]`. "֏'?xY]T)GZ" A+Ü ,41g sN Bo/+6eH_D] "2W$9Xh c&.0_FV:M>>n"^L`)>cs\D>Z<!l؊ IDATV?9tAEd.H s&L`19]`. " Z/Lu2x"D0"1̙BS3}0t 2~pP:M<n7a˜ 39 =gW!lV8ts0g MaÜ !3||q<nW8a˜ 39 =دKuqCGX> A+Ü ,41g sN DAȸ:\=H s&L`19]`.pQJ"Jp"1̙BS3}0tAD D*9L(õ0g MaÜ5p 2+Ü ,41g sN DAȔB\9H s&L`19]`.pQ#~ p a˜ 39 "j0x/"ݮtqia˜ 39 "j0x"D0"1̙BS3}0tAD !|A۵"."ͼ"1̙BS3}0tAD |AW$9Xh c&.08БV?9tAEW$9Xh c&.08БV9ts0g MaÜ5:W!n;40g MaÜ5:G!n"^L`)>cs\Dn 믿:M:G!nW<0g MaÜ悆Aj:M:7!n"^L`)>cs\ CGZ> &^L`)>cs\ CGZ> A+Ü ,41g sN D`HD9`xEb30fas iHD\DZxEb30fasAdyw"Uu2t/C"D0"1̙BS3}0t 2\6eH_D] "+Ü ,41g sN D`HD9`xEb30fas>"תD] "a˜ 39 "j RA$W+Ü ,41g sN D``~qv탈HW$9Xh c&.08H%r\!H s&L`19]`.pQJ"AE>^L`)>cs\ "Dq}"1̙BS3}0tAD {A"."A^L`)>cs\ GZ= A+Ü ,41g sN D`H߾GDm`q0g MaÜ58n"^L`)>cs\ GZ= A+Ü ,41g sN ?|Ϳ_O?C:M8n[D\DB"1̙BS3}0tvÿ}Og&?6eH_GD9`xEb30fasA /ogo)?E iHM "."^L`)>cs\P9!o'OG!lV?|8ts0g MaÜAc>׈T)GZ= mHW$9Xh c&.0"y!_n_?:M8wn"^L`)>cs\P7v7?FCؔ#~s鶕AE,H s&L`19]`(D~sg_wnדP!!lV?z9ts0g MaÜvA/O׏ߌ~oF"R¦iH "."xEb30faD 24gL|?>JD iH7 H s&L`19]`DƏı:=71nD\D0g MaÜfAd`n?DsV[!!lͭ_~s a˜ 39 |Ȩ`UvT?y9t RI$IנjМ1#~HH&J"ͪ"I$ \ r$C?A$H%vUM$I~Uwnjc:"D*9j$I[ 2"Cv{:dr 2CUU$I$U"25"U= AD$I5"}o?hQGZ= m[?zwW$9Xh c&.0j}ٴْN{AW$9Xh c&.0\Oÿ}i?%ikH ".""1̙BS3}0t?9E:M99 n"^L`)>cs\P5 ~\_w2U)'GZ= mk(H s&L`19]`.(Dncrel_aSN{AW$9Xh c&.0T "_O4|7d@urro"670g MaÜ悪AdhI|ȭ6H'D9`xEb30fasA 2swU¦iSH "."#"1̙BS3}0tlɇd}}aSN{AW$9Xh c&.0 "s:"w{HLJܪaSN{Aa^L`)>cs\P6gfoDݱ|慄P¦iCH7 H s&L`19]`.D~{tw{'L%Cؔ#~pAEdW$9Xh c&.0 "w^_ٛS_:gT)'GZ= A+Ü ,41g sN 8ߞ;9S¦i;HM"."C"1̙BS3}0tUpyEdU/&qvr"D0"1̙BS3}0tQr9yiZGZ= mH s&L`19]`(D?~njȅzG)n"^L`)>cs$Q;<_~7 F"Df"1̙BS3}0tAzW¦8TrRW0g MaÜ5p ҧ7ka˜ 39 "j RAO]o+Ü ,41g sN DAH⬔W$9Xh c&.08VpO9ts0g MaÜ58>)nD\DxEb30fasiH ".""1̙BS3}0tAD }{AW$9Xh c&.08VoO9t "r+Ü ,41g sN DHD9`xEb30fasiHM".""1̙BS3}0tAD q;AW$9Xh c&.08ёV?n8t "+Ü ,41g sN DH߶D9`xEb30fasivH".""1̙BS3}0tAD i;AW$9Xh c&.08ёV?m8t "+Ü ,41g sN DH_D9`xEb30fasivH".""1̙BS3}0tAD a;AW$9Xh c&.08ёV?l8t "r+Ü ,41g sN DHߵD9`xEb30faszwnAEd+Ü ,41g sN DAb ;&^L`)>cs\ "DSX0g MaÜ58DCD9`xEb30fas8TrYPeV+Ü ,41g sN DAȂ*k^L`)>cs\ GZ ҍ1g-s&L`19]`.pQ#~q a˜ 39 "jpxO!" 9Xh c&.08Vh8ts0g MaÜ58<nAE{Ü ,41g sN DHCD9`xEb30fasivH7 "a˜ 39 "jpx!"8~0g MaÜ58<n"^L`)>cs\ GZ 4H s&L`19]`.pQ#~r a˜ 39 "jppՏA"H|0g MaÜ588 n"^L`)>cs\ GZ 5H s&L`19]`.pQ#~r a˜ 39 "jppOA"Xz0g MaÜ588 n"^L`)>cs\ GZ 6H s&L`19]`.pQ[wP A+Ü ,41g sN DAHJ0g MaÜ5p R"1̙BS3}0tAD D*9$)T%H s&L`19]`.pQJ"IJ;U+Ü ,41g sN D`~sDW$9Xh c&.08V?b9ts0g MaÜ5?G0nAxEb30fasivH7 H s&L`19]`.pQ#~sFDW$9Xh c&.08V?a9ts0g MaÜ5?'0nAxEb30fasi vH7 H s&L`19]`.pQ#~sDW$9Xh c&.08V?`9ts0g MaÜ5?0nAxEb30fasiuH7 \D"1̙BS3}0tAD :AW$9Xh c&.08V?_9t"E+Ü ,41g sN D`H#D9`xEb30fasiuH7 [D"1̙BS3}0tAD :AW$9Xh c&.08ޑV]G8t"E+Ü ,41g sN D`H#D9`xEb30fasܺV?]G8t#"E+Ü ,41g sN DA7첼"1̙BS3}0tAD D*9\@q.+Ü ,41g sN DA%W좼"1̙BS3}0tAD D*9\Bq.+Ü ,41g sN DAEw쒼"1̙BS3}0tAD :AW$9Xh c&.08ޑV?[G8t"E+Ü ,41g sN DHcD9`xEb30fas<iuH7 YD"1̙BS3}0tAD :Af0g MaÜ5x<'n""s&L`19]`.pQ#~q YD"1̙BS3}0tAD :AƜ ,41g sN DH_cD9-e0g MaÜ5x<n" 1g MaÜ5x<n"/UW""1̙BS3}0tAD :AKUH s&L`19]`.pQ#~q rka˜ 39 "jx1"Dvv^L`)>cs\ GZZ Adla˜ 39 "jpՏQ"DT-W$9Xh c&.08ÑVUG9tsSݶ|^L`)>cs\ GZV A^uyEb30fas"%D.n"1̙BS3}0tAD D*9\Zu߲yEb30fas8Tre0g MaÜ5p rqՅKa˜ 39 "j RA+Ü ,41g sN DAU7.W$9Xh c&.08ÑVSG9tsW]T^L`)>cs\ GZL Ad_uRyEb30fas<i3uH7՝a˜ 39 "jpկQ"DTw.W$9Xh c&.08V?R9ts9T]D^L`)>cs\ GZF APuyEb30fasiuH7#խa˜ 39 "jpOq"DU.W$9Xh c&.08VP9ts9V]4^L`)>cs\ GZB ADuxEb30fasiuH7սa˜ 39 "jpq"DNU/W$9Xh c&.08VO9ts9U]$^L`)>cs\ GZ> Ad@urxEb30fasitH7a˜ 39 "jpq"DTW/W$9Xh c&.08VN9tsTݽ ^L`)>cs\ GZ8 AdPu2xEb30fasZ AdXuxEb30fas8Tr)U]^L`)>cs\ "DjUoy^L`)>cs\ "DjUoy^L`)>cs\ "DUoq^L`)>cs\ "DUoi^L`)>cs\ wGZ. AdTua˜ 39 "jpw "DUpa^L`)>cs\ wGZ, Ad\ua˜ 39 ]b IDAT"jpw "D&T7pY^L`)>cs\ wGZ* AdBua˜ 39 "jpwկ "DTWpQ^L`)>cs\ wGZ( AdJua˜ 39 "j;7n";$H s&L`19]`.pQݑV?I8tsV]yEb30fas쎴E:Aȴ.+Ü ,41g sN D`w/)"DΨnr"1̙BS3}0tAD vGZ A.+Ü ,41g sN D`w)"DΩb"1̙BS3}0tAD vGZ A.+Ü ,41g sN D`w)"DΪR"1̙BS3}0tAD vGZ A.+Ü ,41g sN D`w)"DΫ.B"1̙BS3}0tAD vGZ A$0g MaÜ5U A$0g MaÜ5p "U\W$9Xh c&.08H%5"1̙BS3}0tAD D*9Iua˜ 39 "j RAdU˸H s&L`19]`.pQJ"R]xEb30fasܞhCtH760g MaÜ5=w$n"Qua˜ 39 "jp{I"D+Ü ,41g sN DDD9UW$9Xh c&.08VB'9ts.d'H s&L`19]`.pQ~Nr W]N^L`)>cs\ 'Z AAu#xEb30fasܞhtH7Սa˜ 39 "jp{oI"DZTWW$9Xh c&.08~Ns Ҥ="1̙BS3}0tAD ^htH7&՝a˜ 39 "j@D9.eH s&L`19]`.pQZ AQu+0g MaÜ5xyi"DUr>H s&L`19]`.pQZ AUu-g0g MaÜ5xyկi"DZUr6H s&L`19]`.pQZ AYu/0g MaÜ5xyՏi"DUs&H s&L`19]`.pQ[կ)"DUs&H s&L`19]`.pQJ"Uy"1̙BS3}0tAD D*9Xu5g0g MaÜ5p b՜+Ü ,41g sN DAȚUwsH s&L`19]`.pQJ"kV9"1̙BS3}0tAD :s3xEb30fascs\ D 9]u?[yEb30fascs\ D 9_uCxEb30fascs\ D 9\ꎶ0g MaÜ5X AOuIxEb30fas8Tr %ma˜ 39 "j RA:T4+Ü ,41g sN DAuniW$9Xh c&.08H%+Q]0H s&L`19]`.pQJ"עQ^L`)>cs\ 7SD9tiW$9Xh c&.08H%Q] H s&L`19]`.pQJ"ף1^L`)>cs\ "DGuSc"1̙BS3}0t` RATWU$IY"j RATWU$IY"jRcs\ "DJuY"1̙BS3}0tAD D*9\궞a˜ 39 "j RATcs\ "DNua0g MaÜ5p ru {W$9Xh c&.08H%Si^L`)>cs\ "DPue'yEb30fas8TrBՕa˜ 39 "j RAUwvW$9Xh c&.08A<n"H s&L`19]`.pQy"DU] ^L`)>cs\ oD9,"1̙BS3}0tAD 9tsYXukyEb30fasܪ~`Nq ڎ0g MaÜ5p r{;+Ü ,41g sN DAȵH s&L`19]`.pQJ"W#"1̙BS3}0tAD D*9\0g MaÜ5p r;+Ü ,41g sN DԠYyH7 a˜ 39 "jPH s&L`19]`.pQXS2AH0g MaÜU$#D9䪮H s&L`19]`.pQXK2AH?0g MaÜܪ~PNq "1̙BS3}0tADa"D6"1̙BS3}0tADa"D"1̙BS3}0tADa"D6w"1̙BS3}0tADa"D6w"1̙BS3}0tADa"D;^L`)>cs\ GdH7 W$9Xh c&.08( A|+Ü ,41g sN DU q ru~+Ü ,41g sN DU q ru~+Ü ,41g sNw/YfוcF5H<ǡ,T PI8 X@(Ahg4vX m!scvرb&=K!u=wy.PP5,Q,S\<޾D t 1'01ܮP(D~@(D)Ddz[A@sC uBcBdBJھD t 1'01ܮP(D~?(D)Dez[9@sC uBcBdBjGC t 1'01ܮP(D~=(D)Dfz1@sC uBcBdBzGC t 1'01ܮP(D~<(D)Dgx1@sC uBcBdBfGC t 1'01ܮP(D(~:(D)Dit1@sC uBcBdB&gC t 1'01ܮP(D(~8(D)Djr1@sC uBcBdBgC t 1'01ܮP(D(~7(D)Dln1@sC uBO7wQ,S\L;"c9N!9En I iGd 9'0)Ĝ<ǐsB]M!2I!WS3툌!:rnW "}x)D&)Dvkh1@sC uB6$n ʹ#2bN`cȹ].PPݦٯvDƐsB s 9+ 5 e #3툌!:rnW "L?""FC t 1'01ܮP(D~0)D)DFL #2bN`cȹ].PP3^,R,SjGd 9'0)Ĝ<ǐsB]fXYrvDƐsB s 9+ j_E e !jGd 9'0)Ĝ<ǐsB]fXYrvDƐsB s 9+ jE e 1jGd 9'0)Ĝ<ǐsB]fXYsvDƐsB s 9+ jߊE e 9WjGd 9'0)Ĝ<ǐsB]fXYtݡvDƐsB s 9+ j_E e IWjGd 9'0)Ĝ<ǐsB]fXYtաvDƐsB s 9+ J߉U e QלjGd 9'0)Ĝ<ǐsB]dXYuũvDƐsB s 9+ J_U e aכjGd 9'0)Ĝ<ǐsB]dXYvvDƐsB s 9+ JU e iWjGd 9'0)Ĝ<ǐsB]ku e qךjGd 9'0)Ĝ<ǐsB]qvDƐsB s 9+ Ji I WjGd 9'0)Ĝ<ǐsB]qvDƐsB s 9+ Ji I WjGd 9'0)Ĝ<ǐsB]IrvDƐsB s 9+ JU e pvDƐsB s 9+ *_e e pvDƐsB s 9+ *e e ?֎rN`S9y!v@!B۰L!L!r+C t 1'01ܮP(D~)D)DnEX;"c9N!9O22kGd 9'0)Ĝ<ǐsB]beXYc툌!:rnW "TL? ""7w1@sC uBwaBdB#2bN`cȹ].PP1.,S,SܒֱvDƐsB s 9+ *e e 9֎rN`S9y!v@!BL!L!rS:C t 1'01ܮP(D(~)D)DnK\;"c9N!9o:2ȍkGd 9'0)Ĝ<ǐsB]`IXY1}s툌!:rnW "L? ""m1@sC uBaBdBt͵#2bN`cȹ].PP0 S,SܞvDƐsB s 9+  >ܙ|Y=Ms툌!:rnW "|z)D&)Dr̵#2bN`cȹ].PPY 2׎rN`S9y!v@!BWBdB$X\;"c9N!9^e I ds툌!:rnW "|z)D&)D5̵#2bN`cȹ].PpS e 0؎rN`S9y!v@!qO(D)DnC t 1'01ܮP(D8n%x2ȭ`;"c9N!9M?OYYlGd 9'0)Ĝ<ǐsB]w ""ƒ툌!:rnW "7 ێrN`S9y!v@!ϼ(D)DvDƐsB s 9+ xzsgWY٪gC t 1'01ܮP(D8K!2I!NmGd 9#a IDAT'0)Ĝ<ǐsB]ሇ.$G8ێrN`S9y!v@![ I 9m1@sC uB#>""sl;"c9N!9{RLRpI툌!:rnW "nw"2Ȧ2܎rN`S9y!v@!w'R,Sl 툌!:rnW "nyw"2ՇC t 1'01ܮP(DxD e #2bN`cȹ].P݉""[WnGd 9'0)Ĝ<ǐsB]uӏ)D)D6:܎rN`S9y!v@!v'R,Sl_q1@sC uBM?NYپp;"c9N!9~ڝH!L!vDƐsB s 9+ ^7;BdBdJ툌!:rnW "naw"2.TC t 1'01ܮP(DxT e }(L#2bN`cȹ].PWݩ"";q|1@sC uBWM?NYً툌!:rnW "jMw*2nnGd 9'0)Ĝ<ǐsB]UOS)D)Dt;"c9N!9~ѝJ!L!GC t 1'01ܮP(DxT e y}1@sC uBW=~^3Q,Sɫ툌!:rnW "J!2I!^nGd 9'0)Ĝ<ǐsB]UW I t;"c9N!9zRLRpC t 1'01ܮP(Dx͓BdB8ގrN`S9y!v@!k""vDƐsB s 9+ ^3;BdBdw^oGd 9'0)Ĝ<ǐsB]5/)D)DvDƐsB s 9+ ^3;BdBd#2bN`cȹ].Pw""{tp1@sC uBL?NY٥C툌!:rnW "fw22>oGd 9'0)Ĝ<ǐsB]0]Ͼ~翫G߾}/aۦq'S,SvDƐsB s 9+~W[D޽U\d e z6ގrN`S9y!v`>_+uL?NY٭툌!:rnW & '}۷MO*Dew:2n=oGd 9'0)Ĝ<ǐsB]0X7OR\t e z2ߎrN`S9y!v`G~s߈w(Dbv:2Ȏ=oGd 9'0)Ĝ<ǐsB]0W{=!MDZ>B*oS,S٣vDƐsB s 9+sȷO~Fy[DF!rӯ)D)Dv|;"c9N!9ꂱBwGE/|B*oS,SۃvDƐsB s 9+cwjw/~Z_{5o)D)Dv|;"c9N!9ꂱBwI|% ~N!L!wC t 1'01ܮPL"wƓk:^ͻO~'MNYٻ툌!:rnW Cǡ*DK͝7\BdBd>ͷ#2bN`cȹ].*DdԻy9uB䊞=""\wDƐsB s 9+ 9"_B?=u?mg($2}-*D;}(D3J!2I!L-p"+J!2I!L.0o91"WႦOwSyF!r5O3(D)DBL/0M!˦_lgP,S>^` gϖMΠYIsB s 9+7U\3Da˦lgP,SpDƐsB s 9+-3Le3(D)Dr8"S9'rnW "lv2HGd9'sC uT!rB>Wt?U\c e $ rN`I!9BI"7ev2HGdWa;O 1'01ܮPL"wwM?U\S e ,vbN`cȹ].*DJ"2R;BdB$#20'rnW Cw[w"W3R;BdB$9 yR9y!v`ه}_29""i yR9y!v`5}LΡY3}UΓB s 9+cnh*L`I!9BO6"?QU!2nv2HNvbN`cȹ].,D~@!2͝?NY 5}UΓB s 9+ȟ~wɤCWa;O 1'01ܮP"_Cw q^L I zM9q&sC ut!o::`RLRl̡0'rnW "{I!2I!BC&vbN`cȹ].P%$Oz yR9y!v@!KfgQ,SD>vh*L`I!9/~E!L!mء0'rnW "dev2HsvbN`cȹ].Pwy""O yR9y!v@! eQ,S>z Wa;O 1'01ܮP(D8l!v&2LD\0'rnW "vqΕ_d'Q,SI% yR9y!v@!aF I &LE\0'rnW "viaY:Wa;O 1'01ܮP(D8H!2I!ŒÈevbN`cȹ].PpBdB!ӧ\ <)Ĝ<ǐsB]"$S# yR9y!v@!As)D)Dxn@b0'rnW "4:BdBO$ yR9y!v@!Aϯs)D)D8dHb0'rnW "4:BdB$ yR9y!v@!Aӏs)D)D8lP|vbN`cȹ].Pp\ e ^0}*q6Wa;O 1'01ܮP(D8hu.2/>80'rnW "4:BdBMLUΓB s 9+ ~wK!L!+O&*L`I!9M?ΥY5Gp&sC uBC]gS,SÉ3 yR9y!v@!!o)D)Dx\ <)Ĝ<ǐsB]'""1}8UΓB s 9+ ~T-P,S` Wa;O 1'01ܮP(DxnM@!L!ŠӋC\ <)Ĝ<ǐsB]'2K/p&sC uBgT+"">xUΓB s 9+ y^VS,ShWa;O 1'01ܮP(Dx凎BdB>xUΓB s 9+ ya+0p&sC uBg^~(D&)D،C\ <)Ĝ<ǐsB]ᙗ_9 I cWa;O 1'01ܮP(DxGBdB >xUΓB s 9+ ~IP,Sp_ yR9y!v@!3 e .d$3Wa;O 1'01ܮP(DxfB!L!¥Le|*L`I!9LV(D)D鳌\ <)Ĝ<ǐsB]723}qUΓB s 9+ ~B-Q,SpAw\ <)Ĝ<ǐsB]25}{V;O 1'01ܮP(DxjD!L!eMhط2yR9y!v@!Sϧ% e .lHþΓB s 9+ ~=-Q,SpigvbN`cȹ].PiBdB> yR9y!v@!So% e .oTK*L`I!9OM?(D)Dh0}s&sC uB_NK"">آ yR9y!v@!Ϧ5 e zLm\ <)Ĝ<ǐsB]W2M϶`vbN`cȹ].PiBdB.Ӈ[.Wa;O 1'01ܮP(DxbʹF!L!B-0'rnW "Lq\ <)Ĝ<ǐsB]2 I vb*L`I!9M?)D)Dh7}q&sC uBǦ_J""\AUΓB s 9+ ~'-R,Sp ']Wa;O 1'01ܮP(DxlH!L!ULuQ\ <)Ĝ<ǐsB]W"21}%q&sC uBǦI""\iUΓB s 9+ ~"-R,Sp5] Wa;O 1'01ܮP(DxlH!L!Lw)\ <)Ĝ<ǐsB]*2W4}p&sC uBGG""\UΓB s 9+ ~R,Sp]Ӈ^Wa;O 1'01ܮP(DxdmJ!L!•Mz\ <)Ĝ<ǐsB]*26}ퟫ0'rnW "<22ZYs&sC uBGE"" >vUΓB s 9+ ~-S,S0b5Wa;O 1'01ܮP(DxhML!L!Œo\ <)Ĝ<ǐsB]'22Cs&sC uBYL*L`I!9_L?}.@!L!M>wUΓB s 9+ ~\BdB3}00'rnW "|12fd>Wa;O 1'01ܮP(Dbs e nٸuvbN`cȹ].P""ܠq\ <)Ĝ<ǐsB]'%(D)DI㦹 yR9y!v@!gKP,Sp-s&sC uBϦ;YVM*L`I!99BdB5}Fn0'rnW "|v9Sr\ <)Ĝ<ǐsB]$hD*L`I!9" I 79\ <)Ĝ<ǐsB]KD!2I!L*L`I!9" I 7}Xn0'rnW "|2ƹ2oWa;O 1'01ܮP(Dds e `Wa;O 1'01ܮ*yg IDATP(Dds e 6aWa;O 1'01ܮP(Dd}s e 6bWa;O 1'01ܮP(Ddus e bWa;O 1'01ܮP(Dhms e c Wa;O 1'01ܮP(Dhes e 6dWa;O 1'01ܮP(Dhas e 6eWa;O 1'01ܮP(DhYs e eWa;O 1'01ܮP(DhUs e f}vbN`cȹ].P2""ly\ <)Ĝ<ǐsB]'e(D)D؞ֹ yR9y!v@!½ͅ(D)Dآ yR9y!v@!½̅(D)Dؤ# yR9y!v@!½̅(D)DبC yR9y!v@!½̅(D)DتSv yR9y!v@!½̅(D)Dجcf yR9y!v@!½̅(D)DذF yR9y!v@!½c͝2[6}&Wa;O 1'01ܮP(DWzk(D&)D`,EvbN`cȹ].PpPLR@iz\ <)Ĝ<ǐsB]N yR9y!v@!3C!2I!Eq&sC uB;WBdBʦ*L`I!9w_/Yaԛ*L`I!9w/Ya'O*L`I!9w.Ya/*L`I!9L?\.F!L!~LUΓB s 9+ >~\BdB=>[o0'rnW "|0h22}Wa;O 1'01ܮP(D`r1 e vfxvbN`cȹ].Pb"";UΓB s 9+ >~\BdB>a yR9y!v@!ϕQ,SGg,Wa;O 1'01ܮP(D`r1 e i*L`I!9QYaAvbN`cȹ].P{6(D`I;UΓB s 9+ ~>k yR9y!v@!"۠eӇ Wa;O 1'01ܮP(DBd"pWa;O 1'01ܮP(DBd"p Wa;O 1'01ܮP(DBd"p'չ yR9y!v@! }ț;e e o̽6Wa;O 1'01ܮP(DPlB.fԽ.Wa;O 1'01ܮP(DPlB.gؽ*Wa;O 1'01ܮP(DPlB.i"Wa;O 1'01ܮP(DPlB.kWa;O 1'01ܮPA!2I!6}0I! S,Se`BF(D "?ϑKR,Sg n*L`I!9 P@S0'rnW "(D6B!m^vbN`cȹ].P4>; yR9y!v@!Bd#"j(*L`I!9 P@ø0'rnW "(D6B!vbN`cȹ].P0(2٦OvbN`cȹ].P0(2s&sC uBE)D)D`T4Wa;O 1'01ܮP(D~z\BdBֈ yR9y!v@!""o;D\ <)Ĝ<ǐsB]aqQ e 3}4_0'rnW "L:.J!L!M*L`I!9H'e)D)DR\ <)Ĝ<ǐsB]7,2|1}@_0'rnW "Ny2<4}D_0'rnW "Ny(D&)D`!UΓB s 9+ x ""0c^*L`I!9HS I 2}N/r&sC uB$I$̙> yR9y!v@!'BdB&M \ <)Ĝ<ǐsB]I7Ƹ02`>0'rnW "_YMgr&sC uB$""#,vbN`cȹ].P~\\BdB^5}jUΓB s 9+ pOKS,S릏ӹ yR9y!v@!neqi e 8j>0'rnW "ᦟY$vbN`cȹ].P~T\BdBJO yR9y!v@!mEqq e (>\ <)Ĝ<ǐsB]682MEvbN`cȹ].Pd~M\BdBN0} yR9y!v@!m-qq e 81^*L`I!9H)D)DDQvbN`cȹ].Pd~G\BdBN6}*L`I!9HG)D)D ӧ\ <)Ĝ<ǐsB]6<2e<0'rnW "ѦY3M/r&sC uB$""p3vbN`cȹ].PD;usq) e X0}*L`I!9HW}$l0'rnW "N""gSvbN`cȹ].P$;yWLRfL yR9y!v@!_!2I!2}?*L`I!9H7|$l 0'rnW "_ ""p)Ӈ'vbN`cȹ].P~+tP,SL\ <)Ĝ<ǐsB] 6RY>o`I!9HgB2\Yoߊ`I!9HGB2\a*L`I!9H'B2t<] <)Ĝ<ǐsB]5] <)Ĝ<ǐsB]Iu΅_Y 罫0'rnW """k}罫0'rnW """wM罫0'rnW "Z""-罫0'rnW "[""w&sC uB$yBdBB\w&sC uB$E!L!.{ yR9y!v@!jzY[pUΓB s 9+ Pӻ~2܈vbN`cȹ].Pd(D)Df\w&sC uB$F!L!'0'rnW "W6 e -'0'rnW "6 e 9'0'rnW "6 e AK'0'rnW "W> e MvbN`cȹ].PD(D)Dfy yR9y!v@!izY[vΑ*L`I!9H鍾BdBnG0'rnW "> e }vbN`cȹ].PD(D)D`N8] <)Ĝ<ǐsB]I47R,SVT|Wa;O 1'01ܮP(DM/""!3UΓB s 9+ D|#2l3UΓB s 9+ D{#2lͱ3UΓB s 9+ Dgoo\p82lЫg0'rnW "_""+^>] <)Ĝ<ǐsB] t^¡*L`I!9Hm$pԡCUΓB s 9+ @o I ١*L`I!9Hl$Pw&sC uB$J!L!w&sC uB$J!L!{w&sC uB$J!L!;rw yR9y!v@!gz?oY}oesC uB$nK!L!* `I!9HżBdBhꡛ'rnW "q^ e ةۇVvbN`cȹ].Pę^{)D)D`/ yR9y!v@!fzoY=bI!9HeBdBn'rnW "if e H0}qivbN`cȹ].P^›)D)D }%yR9y!v@!fzY wcI!9H廛BdBLK\'rnW "an e 4}7ΓB s 9+ 0Kw72XbI!9H}BdBrMQΓB s 9+ ,v;2d8'rnW "Y7w.r7Q,S@髊3yR9y!v@!euIVLR3}_q";O 1'01ܮP(D. I દ,NaI!9HX!2I!\E'rnW "QV7c$0bΓB s 9+ $[BdB3}q'rnW "I+P,SM_aΓB s 9+ $(D)DM_cΓB s 9+ $ӛ(D)DWM_edI!9H%""QyR9y!v@!dz2P1}'rnW "AkP,SU_yR9y!v@!dzs2p雍<)Ĝ<ǐsB]15_BdB8=r9y!v@!czc 2p;O 1'01ܮP(DrLWY4}EsC uB$| e `]ΓB s 9+ ;u(D)De7^ ;O 1'01ܮP(DbLסY.b cI!9H:""L_|I<)Ĝ<ǐsB]I1_BdB/'rnW ")W+Q,S6}FsC uB$>|% e -wvbN`cȹ].P^D!L! wΓB s 9+ Yeܹ[=""@ q<)Ĝ<ǐsB] q-V!2I!;vbN`cȹ].Pvfj;O 1'01ܮP(DB\fwULR{4}?'rnW ".*D&)Dݚ%ΓB s 9+ X""M_fI!9H-j""r<)Ĝ<ǐsB]0^BdB6}gn'rnW "wQ,S7a;O 1'01ܮP(D"LףYnvbN`cȹ].PDhG!L!ܘ;t<)Ĝ<ǐsB]I0^BdBE7魳sC uB${E e fM_7ΓB s 9+ )D)D[7}";O 1'01ܮP(DLoפY6bv)vbN`cȹ].P^YI!L!l{+<)Ĝ<ǐsB]ٿe""L_7ΓB s 9+ SJ!L!lm;ΓB s 9+ ݛPK!L!l;ΓB s 9+ ݛLK!L!{evbN`cȹ].PFze e `?sC uBd聯+S,S;3}_'rnW ";7^BdBء۸'rnW ";7^BdBر['rnW "vE͝ ?zQ e `o˳sC uBd.A*D&)D.l;O 1'01ܮP(DvқBdBm}vbN`cȹ].PڥWF$@K{'rnW "v]Q!2I!p wY<)Ĝ<ǐsB]ٳ/ I kObI!9ȞM""M5vbN`cȹ].P9A!L!p'rnW ";6;NP,SbnAC s 9+ G(D)DJyr9y!v@!_2@UYΓC s 9+ ݚ^g(D)DN6}{@sC uBd e s];O1'01ܮP(Djz)Yk;O1'01ܮP(Djz Yk;O1'01ܮP(DvjzYhzGPLR3ﶣrt{ ?o=wT '0ϰ#<T!rBNjH''0ϰ#<)D8~g avW aaG 4y+B!B'3y1аhweNg ;ba?3\B8a?3숁0pES1_W aaG 4y+-D{"實<Îh W4Uo߾?{Yj8a?3숁0pESȡ:PqW aaG 4y+*D&W~i_M3y1аhǂ}( 6洯<Îh W4VˣD>䴯<Îh W4ṼFxg`Nj8a?3숁0pEsݷx?}=i|0ϰ#<"O~7|4q>~g a ȃߢɷuȟ=8~FB,D~OY!Wԍ"z͗㫿z0 ׾nӟg?@!WM"W(D8  B(D8  B(D8  Bo߾_w`/Տ믧( c74loч Ƞ?쫿p_?k7nrCÆ+PPg'4l?~ {nhش)Dx҇}L<?pf톆O]pu  |v@F1vC&3tSPpn}uN?pw>[g*&=~-o?ʂe:)D8`!`>o/DO%pEW??n/e؞/W?՟?gMA&??ӧ=pC3(D`k'بυȗo9 6?W fn "gTO9 J [I#߿sCC7-3kMX]"O_hئ|C>,w)D8Dž||'lη?m"?ۻɶpC;Ǽ; v[ ݡ !_[ۏA'8㓿e3B`!nZRp̡ ??fzlӻC?2C`7E)D8#\ئC:>C3Rp6o[f`o=qWh؜}_~6\B~>r|' umzeg?1"퓅 ؄wp [to74lճo7p(D8B~r>ݏWpߐ/ؤ__hب'u7@3};?՟߼ɰY?ݟ_ #~>'lԁ (6՛M!BŻ I|lԻg5 J,\_~\ؚ?u+lӉ+}lՓ}p jxm^O&?}M!B_On6<q}~+Rq"@G!Qq"@G!Qq"@G!Qq"@G!Qq"@G!Qq"@G!Qq"@G!Qq"˛?e {m"8܆Af4EF l1bFpZYY~J̢\};oyS=-QRe6"[I̽(`J}ȗBJeDF)AD-#D-#D-#D-#D-#D-#D-#D|8+bɧ 2W_ fH D젨<DoU~l}8$K6>nԊAS5&_߮"d "@̮bp$Ta?%뤌Q/z~e7pH ī\ Ҵs "W}Ycf/%Vp<9֚EDܱt7Ad ,A2XTԎHcpR.׆eH Dk؍a;jZ\ʧ>WO:A$tH Ī\Q+2"2ZA Ee+8}4G:+:fdU R$vN=^!ro;o"@ R厙rHy˰V7)DHGtA؀ DۀbqAq dH D`V ;H"5Zz+|HXL^D )DH pf8;h ~H DAKgG` R$ "3w"a@ RAdYxǧ)DHȰY4 8H"dL󖙫y<{{Q{km R$G/Fb1s||D "@f;c;_4p*|n6)DHesȏbQn|wA E=3͒1ZΣ|")DX-_Q.YDuw"A ErLe >iAD "@+>ui{=HzY?H5wgx,ZGOj]㪵E&xH j'ޢXAdM狳âX5z4_n<H"6*oAdMZ^)ӛkB "@̆TDM;cj~;tL "Ai;X6b痳![djmevH Aۗfm%K=vN@jKD8$H"7>]$}3 Ҏ&q;??4GwvxRCqf6]2ΧwR#D"@v ;AȎ dG#D"@v ;AȎ dG#D"@v ;AȎ dG#D"@v ;AȎ dG#D"@v ;AȎ dG#D"@v ;AȎ dG?mNãIENDB`bayestestR/man/figures/unnamed-chunk-12-1.png0000644000176200001440000015761413607554753020474 0ustar liggesusersPNG  IHDR .PLTE:f:::f:f333::f:::::::f:ff:f:f:::MMMMMnMMMnnMnMnMff:f::f:ff:ff:ffffffffffnMMnMnnMnnnnnnnnnnMMnMnnnȎȎ:f:ffffېnMnȫff::f۶ې۶ȎMȎnȫnȫȫȫې:ېf۶f۶۶۶ې۶n䫎ȎȫcfȎې۶ pHYsE4E4Ƶ IDATxߎ\ua(hb'Flg8$ IցeOݤ]vw} 7?O$Ӛu?_gr|u5 7/c[97.'IM9|h7OJ_#F?fDÜ}{;>G>, Oq|oy#!Úq|uSD|1<~oM8"aY-2<~2b8}#o;w "ȘӟbY1<,@A䕟k-1 K:>N//A/Dg \0q1|qD`X`bxq?zSO YL[ qhNȗ~Z#, f-D!g \0i1.m?b%p[挏MƿpcLOTŷ|oD`8SW{şg^Ќ!"[ƌG￧3H|$A3a?> Ox56 g/'};1_^mjL_4߽{L "0BG^=<? Ù/> A7?/?fg>B _䟷!2p|A# "0!_e* GWD`8B U2@?@1TDg \ "Џ.P UY(*, CAq U2p|A# "0!_e* GWD`8B U2p|A# "0!_e* GWD`8B U2@?@1TDg \ "Џ.P UY(*, CAq U2p|A# "0!_e* GWD`8B U2p|A# "0!_e* GWD`8B U2@?@1TDg \ "Џ.P UY(*, CAq U2p|A# "0!_e* GWD`8B U2p|A# "0!_e* GWD`8B U2@?@1TDg \ "Џ.P UY(*, CAq U2tR 7!_e1^B"!_elYB"0;!_e\E`JB y ||Aګ.!v2@o!F2@_wCl"0!_e9D`pB =}Ȅ|A:f 1|A9p|A9z1|A:I!&2@9&Uh"^)*,a8G1TD %x\UH[F|A"[F/ !_eq 8IWDpJ/p2_1%!_ec׍hsB GJ/7KO *'jxDŽ|A^4_?#B I;H |A3v" *!d& * =c(*]zW # "pocw/(`3;HIHA+=^E *,a*^_WXb2@?f-')*,a_\Xb2@?,-)*,a_aXb2@?+*CA"T%uU *Az8PK U_z8V KU]z8Z +UYzH`AB J/:GWD`Wi"$e*(K䤿!_eW" "$R|A$/DWD`'9K UGzh!":|A^"H !_ewF/,BWD.=B~1` B e KUJ_X2@?J$Pe~% %=~U`~ 8KIzy*CAq0W)*,aաkS UY0қCoW "p] *(7~`fB I #HF01!_e1_%2-K(үLKWDaW f% "p0kUZzcK9 *\+0&z|AƓ~`FB WJ Jd0!!_eDž!_42U /LGWDaaXf# "p0KU.=* -\|A6KO K|0!_e҃/ DWDg M _Bb2@?zJ SH0Pe~%&~` 8K(=$L#B,CAqPzFHI(*,0 sP U(=!L&r|A.J/I0!_eK|ү(L@WDz0k U8/)|AJ/J0:!_esһү, NWDj0k cUx]z3Z *.-Є|A^^ f~}`dB IK00!_eWׂ_c2i` % "pRz*XDea * ~`TB 'g_j2K`%% "ЏH cR UYB\z"XL!)*,!-=,'CAq~`@ 8KJKJ0Pe~%DE_vb2/<FWD#Y`]WF# ",KUNzXZ*|~`(B 2{=`yF" "$=`|Ad|A2;!G! "NzIAQ*4~`B VAWDKq!*DzI?0!_e3F ",/~$`B 8K8V:9!P@ 8K8T:9%T@ 8K8T:9)X@{ 8K8R:yECAqpt)*,@UGS UV~H?Л2tsVք|Aҏ& "+\~@3!_e`Y# *J>hLWDXU: @_B ҩ&U֔}6J?(Е2tYQ|A%+ $ "(\#@KB ҅u t$ "'\+@CB ruҏ 4$ ",']\/@?B jm-O # "&"@;B 8Ktss(*,]ϭO4 "Џ{Jg=J?9Ќb2@?(].@/ 8KtS~zPe~%O:H?=Њb2tS~~!_e`鞧(@'B :=OU F|Aek3}*H<{H?EІ2tʳSm*H@`|Aå9Us~ @WDM: H?tp8b2@?nnrK1TDg 7K'9Q "L$]D?820t~HB 49N\ *H8qG$ ""4~8B ,-N# "L"ⴐ~ 0B !N# "!4~(B NG" "L!]ᴑ~ B  N#! "L ഒ~B N+! "/4~ B M;G " /]ߴ~$B MCOWDg WI7J?Pe~%\%޴~,CAqptxT{S UY5MSM1TDg WHg7mM3Pe~%\!]~6CA'ܗ20trZU.nzK?pOB ҽMsIWDW. *J6Q!_e`TfGWDUA)U.mƐ~N^|AAC1S!_e`Lf'DWDS:EI;UlƑ~V>|AV !_e`Df$BWDPKy{Ulƒ~^|AѤX!_e~%\k~dCAqpAO)*,t[3S S UYyfDv "Џe͘-M1TDg gÚ1[؛b20tW3 ;UƒjF~r`gB PU͸.KWDJ:W} *#I75#K?+!_e`$fdv% " $]Ԍ-|Aq{`ؓ20tO3 ;UiƗ~`?B (-1GWDEA)*H4SH?!_e` f" "!"$N|A!+ie؉20tD3 UFnh&~`B 8Kx!L%8.CAq\:Ky](*,t@3 {P UY3|f:Gv "Џgtҏ4@1TDg J3J?P "tng&~NWDh.L)X@2\R2!_et83 UB @onfR|A̴ҏ6 *im(U:KG3K?P# "4Nff~FWDh,L-x@2W:\ !_et/3B @[\fzG |Aҵҏ8*]cr2UY@! *MS%s2TYB1 *,t(R UY;Nfn "Џ~F 8Kp7R UYAvb2@?!)MCA҅Rҏ;DWD',&-|A~}b<BWDh',' |Avurҏ<@WD&,(|Anm̂=\OWDh&,)Մ|Afe̒ҏ=\MWD%,*|AVY̪O>\KWDh%,+|ANQ̺>\IWD$,,u|AFIO?\GWDh$,-U|A>A?\EWD#,.}p !_e~%J0K_\C1TDgɲ9'WP UYt C 8KVa0Pe~%J0Db2@?ΒESd` @ww *=CUZHg0HWD A`!_etw* >>DWDh ]=B @^-|At§*q7 *q7 *iW *ag *aSw*Y҇ΫP IDAT*Yҗ*QWO.UYtkҷ)*,YIyUCAq$ݼuY 8K.^8#}pb2@?ΒI "Џdޅ(*9ޅg*1څ 'g*1څK7URҭ ^' "[.K_ JWDI.l>x2.]"}'!_e #ݹIP5B @Fsa+|AtFSW*ʅҷ * ƅ * ƅ' *…+NU҅ H " "/ݷp)B } I_ ".]p B 8K[Vf%Pe~%K-\-}4b2@?Β٥xA1TDge 7H  "Џdr鲅[S UY2tM҇)*Jg-(}:2ptҧ*GJG-,}<)!_eH馅>% "(P>2ptBA|B qA % *I,Ԥ/>& "&P>!2ptBQ#B Q1 e#*GI,# "$B A% ;H| "#!{B 1 H' "")!_e銅O *,Q:ba7cG 8KfnXMb2@?Β v>'x "ЏdB邅 (*,O:`aO{ 8KXUPe J_# "]:_ag:W[A v>*0DtW:wNWYA }!}W`)3p_pH*wVe2pWl;IU)p/`yB =&}\NWDt *wNVy8!_e w>/' "O:Xڄ|AI*UX2p7\;KKUY2tO )*,E:VGCAq"ݪpw#`e 8K&NU8@Xb2@?ΒIK>3 "ЏdPC u)*T8DX2pLO e *wT8HX2pHo e *T8LX2pFä U *wNT8PX2pB E *K**}pIWD_OP`MB y KKUvS8XX2tG|A>:V$ ",>;$ ",]>;$ "+><# "+ݥ><# "Џdh,Pe~%CKg)O(*,ZJ!%}{F1TDgM 1`5 8KFnRI_Q UY2tBPXb2tBRX2tBTX2tBVX2tBXX2tBZX2tB\X2tB^ X!՛7˦AKH! ?ŦA_DJ(tC1}? AtB C`_6FAҗ2&=d = q =/e?d HW(tEV1u7[oDE:B-C>|,A3Ut "!ݠGXļ!ͯ(Kg~7{ "!GXĬ!>}{ȏ4<|xÿ  5LgAT5tB'{` W{v rgD!gx /%LZ gǷ?S  9KƓO%},abx7mퟩj%I'tIV0i1|4lD>|o~U "Џd8n7 &-|3 "GӼ &Dgh L_ /&g<[%DM:=Ua /ݩk`gg Mȟ89\D ",]Q.!ii?ǻ?H5twBK`~ӇA3U7~8IN)}LoO4c`Wꄦҧ m 5JA]J&>oD՛M{~'愶 o9񱪟2PnN+}Ln}yY~' "K'>oDg/6 ܦy{ZK(s> "4M-}Lm"Mh.}Lm2|u~pgڄ7 ̦-˓3 nnMh/}l2U5}1lDnC "АdȄaYM_ 9'/0@?Β>Vf5}1lD޾~۾;H7& #}JWDA:1a s`RB  ҅ I+ULJ`2pt_P |A>Y$ "\-]00%!_eZ鸄Ѥo) *J% '}HWDV-a8`FB i J-U.KPl2ptX҇ ||A뤻>\# "\%00!_e*骄AO*HG% +}FWDF)aX`6B 8KK'% ,}LF1TDgIc预(*,+00Pe~%}{>` "ЏtN \CAqI\b2Y&ap`*B V阄᥏*[[>bf" "lNI@2Q$a3`"B 6鐄)y*ۤ;>d! "lHB2I:#aS`B &銄IOi*[#>ff! "lnHF2A:!a"s`B 還I*fg&! "\HJ2pYa*`B E|ɤO)*,i']0I0Pe~%ݤ>jf "Џt;tG CAq4NGPb2@?ΒfJ5P UYLaB` yp) *祻>l' "FT2pVaR`xB 9hi *礛>nF' "NFW2pF:ab`pB b *K#L-}MWDx]aj`lB ҹK8CU^E\2t,GȄ|Aפ[>rF& "&݊002!_eT *H", } LWDgIP%q)*,!݉0.Pe~%-3>u "Џt%"ҧCAqtDXEb2@?ΒґH;R UNI7",#}JWD8!0(!_et!B|AҁKI<cU^J!,%}IWDx!0$!_etb'|AuI<#UK!,'}HWDx.݆0 !_et‚g|Age+J=U>BXR2tšҗp|AO>}# "|"]0!_e(Uo*K7!+} FWDgIR: a]`0 8KE+K?cQ UYBXYb2@?Βt E1TDgIN:amw "Џ$';CQ U>H .H|A1K0!_et w" "NA .@|A',"l' "%3JG:`2C)DzdFDTFb(X'@]$R e|y( Eb(H:%"I9HzPAB#4I"ȗR#B!@MR ~P(=D _J!ЍtK)DNr@"|)@/A@zPABL =H _J!Ћt&$C/D:H!ȗRt"IG K)DqDZ"}Hg8`"a@-R .#08|)@ LzP ABL'=O _J!Ѓt&(A/XL/߀  uJ)D>%K7`R@$R e TzPBcY2tx&*@b(XL-݀ 5J)D>%SKg7`b@ $R 楣0X|)@ \zPABu =XK)DZm ҃%H4`ғBcY2tL!=iJ)DNi~g )|)@! ؏ E/hS:{6"mJg4`_ABMKz"ȗR4)ЀI2R &?y@ _J!Тt>)=qK)DZg>'|)@ د A/hP:9$" J3`3AB=l[z ȗR'̀}KOR 椓 "I3`sK)DZe@Bzw|)@kұ HHON/XI2 "=z;BcYR$ʀ`$R eIt(2ҳ}J)D>%%ҙ HIOLb(XHG2 %=}3BcYR Ȁ`$R ? _J!ВtK)DZc@RzW|)@Ci J J/hH:Y> "Hg1 ,='AB,$ȗR4#ŀ`R fC _J!Њt*D _J!Њt*D _J!Јt jD _J!Јt E _J!Јt E _J!Іt*F _J!Іt*F _J!ЄtGɸ 쯿8K!ЄtGBO|( XN`@5=]p㉏f"P˒ݥP@`?J ztF!,]:I$YDzdgT%=؋B?DzdgT%=؋) Q@},KvN_@U# ~zPP`޽ЉB` Lz(EAQ,^:zI%4?B'r'9P,^:yI%` 𽋏|ӣ3 K/>L/o'rw"K^@҃ M/m'ͣ3 K.B& O/Gg" ]@ң M俻ȭ)D.*G<_DtF!lT*=A/'m" eKg.Rf?ju eKg.Rܦv9Kg"\@nS IDAT M^ц7_B`҉ Vz<0KOʼmQ,Y:pK(5Qm'O*Xt*Pk [ՙ&7(D,*g<*9[D" [@# Y^lCN9;?{I!,,G*I Q=?}.BcY2T:lK)4:1]F2~&" ePT.=py㐃<*%P`NfOS?Q#3^X Z@c MQܺcѱ6U²dtS̨Q=DcS,U:h ŢQjHu8 J,`҃ "O{,ˤXt!=BwHA!P,BzT0_=q,B`) X`6|)2Ca\R eJg,`! G\ȏmg"#qLJ27W/?ȣZ"qL*D֟x7(D)H+f2o!r'ZXt$=BB` X`s"OV WGY頻Nt,Jzd0A;g.^sۛ(eI,f18}ڝPt&= dD!G^ 0vvCzxbF!׳uM"P˒륃@vI U=s~l\Dzz\,Pzp0qaLD!,^:W \LO!RJ!,V:V]LN!RJ!,V:U]LN!RJ!,P,Tzx0{w^yXt*= _J!,L,Tzx05AB`Qґ X`b|)XR %I*` i "KSK)D$%KO050}K<>MN>4hg׾9MN>4h""ˑS¥SRR,G:K bLI!RJ!(,^z0!H)bx1""NRK)D"K)D"K)D"&GK)D>%[SЄ(`2C)DzdteLFb(XlQ@# H "P˒m hDz0BcYE:BH3&2qb8ݗ_~Һ)D>%[Ќ8`"&oڴOkXtڑgL(}WNB!4$=FAj:g:A"KO@C i>.>>je#Xt|iLbt?6_?Y]7V(D G)\}n?jB~4&= ~G{f"Kg'1FWȼ9AdaG(DN@kS g@Bz4'= ?N ;F>>-QT/'yp2νe ڥsР`ܸ xa=VOMD"K&E@qAbN1s;W4M!PtlZl7yb{vכBr)=(5*_j;6[!D!@%ҡ hSzPjBKw|] 3Ъt$ŗnH"uKG&U@I Ë/ݵ*H'&Y@)6U=yɥJ!,9#ve WOȜtޙ1I"P˒Wa h[zPbdb8Gu?^m9H"K%m@AMyD"Kg%u)@Alq8a& S+֥F7OڏWaG о`A'g_ r! о` ݻ'7|uKx>*>PT+/?{9w/ϼQ/gc "uJg$aX|)@ Ez0 _J!PtDzv$ȗRT)~"UJ$y8A}߿9Ȫ)DjG@OQJOW{Ɏ^ Г`A:D!@J:]I<F}c2,K"^gcM CaYF@gC1F&O!  ò$ޤ#L O!  ò^:I=FNo9GW~⃭BcYv/#K ';ܟhH!,{F@҃ݍK =s@mҹPzqA'>ERT&.G;7[9,B2Tt)=٨ f"uI"O OS%N*(DlzL!Pt&:~`1@UґVz#H)@U҉VzCB&@t,=͸ 7,B&<t,=͸ U")DjC@݌ %2)D*C@# ^B"4t-= EC3 й`[_Oy0G: KAv1>ȯխ%G: KAvQ俽Vw=t% jн`%ݽQ*DNB@c(DJ)D>eA@# R e٫tP,BBcYv*"K)XJ Ј,BBcYv*A! C)DzS:KCJ)D@R *#Fz0 _J!PtHCK)DN@q@|)@ @`ABJz 0 _J!Pt8#=d ΝuPT ~HD) G߽iV n~u-BpFz$0HYԦyosB /~IE( G_^].DM" t8'= }Zm)D:|#B .}.HE䟽QĥwV[ Fc@Z:\ 06ȟ?w!_m,roìB -|.IFn66nz[-)D34Ss%Ff֯=_<nQcF "oX<." t"=Ѹ ATl^5ŋf"P˲/Uz8pq31o\lDzK:ld\b8`x?y` /}zbNQU:\08ȟ rGtuB`ҁ`A~skMw." }JmJor(D(gKOP7w} 8ťB`q@zp d`v)D>ei@zpp=IxόBcY+f(g M ` ;~|"P˲],P$=D8kBd-쪪XJGB1 %3 '쪪XJ'B1 ;"wIB,N2sSz] ׾{X ؗt(ک^pSK:KOR^QRI:L =JxE!RJ!'0,BB`O!` Y H)~3 4 H)~# 4 H)^ T5H)^ T5H)> d5H)> tc R }H'*""{N/SJT^RRA:L)=SxI!RJ!0tvVz|BdW &.JOU+D)DfN.SKU""K* r eٜtpZz)XI'+ b eٚtl^z)XI9g+CÏF~CBcY6&Z搞H "P˲-0tPJ!0tdGztO/U:$=^'ȗR*X撞K)Df,sIW "sJ',@R 9 |s|)i`N 7AB`F0 _J!0tVUzM/O:+=e&ȗR&Tf]K)DfN*sKY "sI٥-@R s ҃g|)L1`ңc|)L)`ңc|)%RL.M,=vTRpף3 ɥ .@ QL.L,=vT\G=?:Z:]zhBGg"SKK^ ?PtB`bX䏾{B'3 S @@zt(}qۣ3 iC @Bzt4?B'r'9PL+J"?]|tÞQL+I"?ۉܽ?]ґ $=~3UH7(D&N$! Н Ӌoҁ &=z3mv"OkR:Ĥ0@o&;Q@},%K/'m" e`8!1傥@PztfpiD}pF!,+FC/&W!MvOa?O/BcY.W:D0@_K O/=){7ވ?ZԟD"PrY *=2Qbm'Ov w(D&"a1 Е)o]|>Wg~~rkLvϿZn\C^AA!0tKaOl}Ed݇ )D.!QL%D҃'AېGey,c;~^fPr_׿\5dT9 .=zR/ozQ3?˫]Ro,D6ɫdӏD!0t KObGQ̩n6l/x?=#?W"I G1@GF|QWn TpB`"P(ȸ OxT#3C?Pq]D"H*1@?(DnݿXmz̟/mH![n#m"Hg*1@? 9Cި?6%=Tld}_"HGJ1@7 9q:T'|P!~B[?O)D&N Hcn"O{,zYۿ{!A~?l$ q Ѝх!uw! a/UL!@YՍC ՇB` ЋB](IdN4"w QO:}T$=:1.~s."?wn=W_\] " Љ̼՟O9&XtJz(aBd7oQ@u<PPüutH!8PXBBYtLz,taBɪB䢕ձ,&<j]Y喙Bd|YKIf N 9uxnoZl he4P`pjw yq!/Ӡ^(Jf OF"x )DF!PhA}HO *Do}gK"eҡJ оz'TB]o_~Oo 2Pph.Ap>׳m]@tTz<4o ?x_Ճ;w_v;Tl.{7dF!P&8*OTHM"A?3aS IDAT[(Dʤ@y ct "B!P"8S[g_ {BL:oT+=Z9~Oߝ_nQH G4@F"yn;I'nQH G4@ _dsbBD:lT-=|Z<."5(DKg 4@ۚ ?D!P 5ꖞmk>/DߟC-5*Mk>P<O/?/>QH' ʥ4@R AzA вA_/or)DJ 5@g׾9MN>4Dz\tXhBBcY.D:e,@zT4L!RJ!,"2 =ڥ)XːRR@},eHg eHOkf)DJ)D>"B5@$R Q `)U|)(y *AB`tXh _J!0F:^,Gzb4J/!.$=$ȗRKmK)DvmK)Dv˒MK)Dv˒MK)DvNK->{{UP,,&=Z40?{g5k{UP*+'=)U:V,Ozr4H!RJ!tXhBB`GPD H)ҡ`ҳ= R ݤ327@s^ nґ`9|)DzZ:Q,Uz~Fb(XUK JoH "P˲f<\ BcY,'+=#1R@},ˊG8@c$R eYtX hPJ!tX h _J!0\:K,[z4E/.%-=20}K<>MN>4`$t9ВA;ǩo}y7M!0X:H,]zD!RJ!0X:H,]zD!RJ!0T:G,_z4D!RJ!0T:F,_z4D!RJ!0P:E =ڡ)("ZK)DJg9@;R a i AB`thCz4C/$ ZK)DIV9@+R ! AB`t~hGz4B/  ?szTF!,+N-It6&:ǓR(D>e})ІݻmQ@},@[S %:䅃=VO!,NIu&$o/>,s$XI'֤:@ 'kn}3QL!,k IvN ܹVB&М`h 2~]GqڊtԌBРhh ɦ8bӓF^-BРhh dՃϋm!"K&; '7\T4oR(D MJw7kWz[V:24*=o\߼bhBZШxXqAM5qap R k@#` QS\-V6U]!bSU@C`;d} @C`Y?Ts{hZR:*4.=md'^Q\-62ȯorAD!ptPh^z, 4`{#nvQ@,Zs@҃`F'C1?T̺,Y]r\ˡXH=O .|_:?D!5,+ Hz>{m_~~|gowѣGXHG= L MTwt{yۡXuH.=r)DJ)D>e a\ R i\ R qX R  qX R mT|)6|ЍX*AB`t<Hz, _J!E:t$=J/,$ȗR\]I}eK)D.Kg>2 "@gc`R K3Hx^ E`НXOol ҹ;DeA}Ν@!Уt,Pz,PA?"=JG?T(D=J~oC"]J.? O!  òJg.?L Ї|赻@eJn~`C\!5,҉S8'O:~|[럼I YI@Э `i%6oϿ[iz eJf\b<1sy<9O'9)D>eP:t,} XqC2/X=}ů8#:>,̨ _GN4б)`aJ gbDg)DN@'e׏Ĝ;/n-" S,еI`Y)D.zrI(D^IGΥORP;7?yьBt\4("nT$(DN@'%6QHOKR𖙳O<̺39H"@L$B.wM$RlcK7\zB;B`qARq齻;*Z:KugY7$ ~3n\_?s7N7ypf)D#ҧ}ifY1⵻H'O12o O|\CGξvY  `)-"խ_4$oG/ξY lOK16ȟ yՐ~ڦx)}FI'!93 e)`!' >D"P!2 }+SB$oo+DN6R}D"P#r>),CIbxɗNTȲܿ?gO Pxl#W(D>ޥ/8'}ZXiJWޚ)D>ޥ8'}ZXѣ}Կf ? ȗR/ }bXAB }"@җ\>5, _J!t/}%S"@l>9o _αQ[л5?[O+ ?~q{uޭ'9PK_UP ݻmQK_UP t{:`vAۋ˜F77(DBPA:V"Lx5S}K_pr>;we#JS@NW=zVf"@W\-} ^=~z҈+>%P=K_pIn#CÓV>/:P=K_pYj "W<s<͛%G c+}>MTm\4]oM?oBXJ57xڡX{z@%7FfLM(D>徤ILPQaSw\wa7oU@},}I_pbówn|z6Q@},=I_p^ȵmǀ4B!,$} z)DJ)D>~"}V" c WK|H-U6U]!bSUv/"}ָ ~̽OB!t*}0@Y?Ts{hZ:`V#M7APA/٦ XWG_~{=z Gk{v>kTi`lkУ=H5):`7@""@&}BBO]RRI_B R ;zv>wG/`ws@}R ;zv>wG/XJ_0FPBcY+}I@u$R e9=J)D>>Fb(XsJ_0V PBcY)}9X3@e&H ?w wWߗeQ]I_0ZP E?j#[ Е<!u) O߽Tt4 'ky O"u) GZ%x#BIZ@U |>1M!t$}%@i*wC:jD"@GWIGj2:?;ׇy\#7Sg"@?JHj2:?8B}[F(D~(>Tdl?|}~r"ޏKn"@7WKJ*22ȟ<0sі~{f"@7WKJ*22^s;ED!"}'z G\xTa\bxpSݱL{X XK_0i Ó ̬Y}<D!,p`b @ %W"A]FZڗn`b @ F33W>sÏl`rS @F'`ČBh_ɥO-7ĬVo|}gO_w(D/A76ovM}'?bо5;3H\Fb^6U"Orh \`6S @Xqε!w?д:Ib& G\TP-K_0I K/ZV`F @ xX0а:JfF'J a+uf>D ?>.pːK"@W*};'>ERJ_0 iT?xsY$DzJ:O4Iów BcYN$}ҧBo>ER@},ˉ]T)XH_ @N"9XH_ @N[fM|,ˤXH_ @θm̜PJ_ @ E")DF؋ fl_7"Oy,ˤڔ@`O'AfV?jY Ц9{>OT;oA(D&/؛) d\ޝ;wZݤ]W"@WM2y2D!`s(}PR-J_G@BBhPJv"""@WU)ړ0`'AGC|?I!'}]O< |)Мe9{>"@sW] ȗRI_s@ _J!&}Q@@|)К59 "@cҗDO?{'ȗR@},"+r"ҧ~{wܹpH!,K/ImթrPX%O@[bxNpu٭:P@},qbҧ =%1zKZ|>-BcY (}دöC4" )kq'!}E!z}D"@CҗDOC{58}rڀ};_ߛk *}ثAɫ]T:w'yyn"@;OD48?lљl G p'"pW 0 cbn N"G7p"d9rx<}̜SU޽z{?LFzZ"|AF:Dgo_巅l,1HpҧD2|f L#K"['}Dd3cf4>FylA/v}QC1HgpH#Y_>ۿwAD:B<|۪A$2 4w{ɼ " 0tA8Kywa4>Hg1TD97]/I "U` @pHAB:}G& Ref4>J0TDNW *|X:yJ, Re~<K'ozI%3D "ЏQ @3pHAX>*&}N`2@?Gc7ݤ/ "U`x @?pYQ "qo1*0t` p E (IjB2J'k>[P "*0t`0pE 'Mr@2Ij>]P "pҩo*0t`@?E &QzN2I'j>_S "`҉1*0t`P7E %UL2CIi>a;S "Haqo*㱼.XKc2@?Y4*㱼*ZJc2@?˫I4*㱼&\Ic2@?k94*0t`xZ IDATS#E  #^H2Hh&>fQ "( ~*0t` sE  "BF2cHh&>h{Q "I^*0t|`E  !FD2#Hg摾i;Q ">*0tt`&E  JB23sI5](Utp`2E &}WDұO*\:40m؁Pe~nuCAX~&Qi Uctd`JP1TDk Lc2@?ҁI@PezKe&>pU|Ah-V)UtZ`ZPWDa@"_e:Kge&>r5|Ah,Z(UtR`j3PWDAɥ@"_eJd&>t|Ah+^(UtJ`zSPWD!*U:#x"_eJGdVvS "@S i|A)XCg(Ut,`9E t'}WDnҡ*L:x"_eI'b>S "@/@ Q|A%XT|A }pE 4NJ,E2 /P "@^:g҇|AK^\2A2q _HF;(U .zK>E 3/>S "@Z:[.E /\>R "@V:E E d.\>Q "@T: E D.\>P "Je:U# p"@g2ump cpCLܴFc8AYL]%}'nZ1 X.ܔ>,e~y,1nK_J[Vh 21 HJ*A2) JK*N2! wHK*pJ2 wILk*pF2x wJL+*pB2 p wKM*pL2h HM*pt'%|A8_:C҇E .kA p"_eNN@2gKZxXxWDt'[7Dt''6Dt''6Dt(m 1@?s?< OIP/MNc~~,q>_1 )fY E )fi# 9E (ey+ 9E (e }F>WD,T(g*pt!xM2IXI_R*pt)xE2gIXJRW*pt1"_eNP?P "Iv>S "9S "9v>Q ")Q ")v>E !^`7 B2gHgWMP "τe:gCAL'WQ|2WcH0@?= JVoM" "|e:҇[S53cih3@?= ;KV:pthݥ+A -Y`w `3KGV8@D "ҁ2pt`C,"_epWDC*$}b)UH IY`u|A8R:aGX"_epWD*(}f)U8 JZ`m|A8N:҇X"_epWDäS*,}j)U( K[`e|A8H:X"_epWD9t@S.CALS8E1D)t<O.CALS8K1Dt6Ӥ.Cg2M< ,jЀA8@:gX"_eNp֤WDs)+}y%)U`X J_^`I|A]:ҷX"_eΤpVWD3)/}})U`gD G2;KRHH_`=|AW:@FQ "q2X"_evNj*t VWD=(䤯0E )E '}(U`G$ I; E2;JQHJa`-|AO:@VKQ "ϰe:@XK1b~},)ҷXɈ3cΠBl DQt52`ch  X(t:k D1tXptX"_evND$P ".Hd`|AC:{@ B2;HGOh$}E(U` 2E ԥ'>*PK2kP "@Y:v@3 ,A2U 3E T3'> *PP< P "@Q:q@C ,@25 -40?E &4>*PNTDS "@E:l@[# N2 m40;E  X&4>h Dqtw53cZPs1f~y,1Kj`j#4 "0e:e@sS LmЛALLh/} 3JgLh/})UI Hk`b|AxR:a"_e0WD% !}y)U)x Hl`Z|AxJ:] '"_e0fWD'%#}Y)U h Im`R|Ax\:XP҇"_eΕ0WDc% &}))UaT In`J3~wӿꂟ "ҡ>-W;O "9ґ>-_|GA8G:QЬE? "(`Hg" A7cΓ0Ӷ1lџů~2nc0 ӵ1l_# t~,aFt6?>Oߘ;UK "O2&`X#̦icZL2@?t80'~~|η|\a~?$ Kq`2=C7~̥_{T5HIZs_4˧F~iߩjΑ0!2a7o~T5KH\S_0/o~Iߩj01f2a4n77ߩ~{U ">L&,򗧏ۃw/>tD;#$L }΁LX/O?7O0k?AG:AȄED^v/t):0 A^D..")sH_t`'o7wɅ <%`Lc" |/濽 " <#`b" okh <#`b" r_c0Y&1ai͟~u~xEI'GH?cw{ޟP ">&,4,K?y3]Iv` o.x˿z{AxW:5dҧ„EgA "@L:4tEק^f=P2`:̠Qcˏ?kxw=Jw`}~.m7//oU2`FLMcѥ o>[~`~z=SJx`|mÎ^> uWᘟ\u z,q&>4]}#"/} /:GD.EcN0ץ1ex_KD>.3"L+}MYOw_n_n?dՏ~/pD "MJ_y`psmoW?O{|䫯~7/1tR<0I/z㳟/?2r7 "-SKz`l?n|_ "~}{A!`nK m"G>>|ȥA{ܵD)&>-ƅAxpU:%ҷE~?o>qi7_WDk旾*pE:!"_eH'DXAR "e|kH_|`X|A,`  K2!"}Q)Ut8Uo>0*E \ΆWD V*pA:Jw"_eJBXKCR "OLBXKR "OLBXMR "OLgBXNR "OLgBXOR "OL'BXQR "OLAXRR "y֔Np*tU30E ΂tFWDWQ`*J: 9"_e~΁tƢWDc ,.(U;Kg`(|AN:a"_e6XD)U`NA"_e^ ~|A$Oґ"_e>I?t$WDoI`|AhFұ"_e>Dt,WD{*sc}k'``|A1NL*sc}΋*cŝq)U27N*c՝q)UX]:П"_eťpQ:")UX\:#О"_epE:$)UX[:WCН"_epU:&)UXY:ץsМ"_epC:()UXX:7К"_eSpS:*)UXW:H3E JG<]4WD`Y+**;З"_eEptdR "t @[|A֔wҡJ2W:5])U2 q)U2 q)U2 q)U v`*ƥWDtt\,ƥWDtvX.ƥWD`9D<,*&'Џ"_eŤtQ "t@;|A֒s!F2RQxZ:F(UXJ:OKE +I9 $^*,$tZQ "tJQhE2:!(J E HG8,'F*,#tQ "tv@|Ao҉C2" E:Rm(UXC:;I E kHg7`/T4WDtrvkFƥWDtpvgHƥWDp˞)"_e~v,ө ׎1"_e~~,ә ~1"_e~ "m`X|ANlS "0t^@"_e٥pt*L.c3WD`n%20E sKG5()UZ:I K29 8R:iQ|AfiIR "0tJ@"_ey3pt*L+åWD`Z/7E J3 (UT:Hg E2 8I:t)|AfiұQ "0t2Γ@"_e~e:'+cQ "OL2T`$|A>PlC2@?2Ca(U 8 0A2@?ttnNWD`,0 *b)UJ:JJ`g|AFRAؗ"_ec0tvWD` -e=)UG:CK`G|ANPy؏"_eQ0tvWD`L!i(UD:=sHg`/|AƐN,ҩ؉"_e!0tWD`$l](UA:63I'`|ANM\؃"_eҙ M:;P "^:1IN2@wL(p:E |X0dvWDtZM;*Lg%`Vټ)UcNJR "e:(KGF2@? (Uh,PWDtH&;@"_e ^:|AJ$`~(U*DhWD #)UçAL"`1 DtWD -)UhȋtVC#*4a!|Az`:&"|AZ`zf"~|A:`"n|A`ڈ"n|A0Dsp/E y "@'dI2@K4Q "b"_e4N4wQ "f1y|C2@= 4MH*D]C "@ьO2@ҥ= t I*$D) x"_e{A!(UȹD&I x"_e+{ȇL' t\nP "rm1mp"_e{A#*d\C "@#\WD bI&E `\WD DSp"_e{A%k*b&' E 3cF'2E {g1Dp"_eD~ D2C "@C\WD\!h.P "p.0hR "p;R4Do(U8={A)/)U8]{Ah*/(U8}{A*)U8͝{Ah+(U8˽{A+*= Sk|Aqb&*E x`1E3E gxd=pQ:X/*= V'|AbK+[|AbK+A G{t1`3C "@{D "p'p]4_R "p0h>D "p'0h "u83{AB4e2= !E yn1&-X"_e<DQDN2QC "(a VWD(`zѴS "p0hނ)U8{AH4q*DD3L2*{AJ4u*DDsK2j{AL4y*쮸DD,K2 "b VWD`o=  '`Q|AvVC>le:/`A|AUC "! ֣WD`W;!`DQ "=0t(U.{AS:b*g= )`1|AvF1X"_e쵇DQEF2NvC "q WD`!`\@kQ "0h$(UÞ{AY4J*`=  -`!|Av`^4:*DE,C2@{A]4**TDE,B2@{A_4*쿇D D,A2@{AA4 *TD)DS,@2@!{AC4*<= L"`z|Avbfj0;E :j=Pl03E O:l1HG6"_eDCK2SC "Lұ WD)sJ23C "\ &WD !~.0hpi)Uxܱ{AM4*<= L'`R|Aubo0'E :|18"_eD ELI2CNC "fWDg!`JR "S0h(U9{AT4t*= *`6|AubMs0E 9m1:"_eDE#LE2=C "̢fWD!`jXQ "s0hy(Ux{A\44*=\ QWD!`z3P "p{A_:*C ")ƧWDbVy0"_e {AXJ4*C "ZƥWDb~0,E _J!`5R "b0(E K! "ƤWD31[KIE# I2k-S !FWDb{1Pph )UAz l&I 0E K!^N:-a4|AA=@pt (UؤgDe#! D2 AXX:0*|A^3+KB"_et4A(U趇Dť!A2@=ďV͆0 E u@L4 *Kom/,CrƠWDX]z`{eM0E %KKhF(U֖>.^[:DES" @2q ,=E ۫K'hT* Kl//@aS "+{\tȋEM2qh`*Jl/1>:fFL2qhj*KJOm/2=zFK2U@m)U<޳t"+E z{ǻיmD#4WDXNzxBӹh~*I؞th$"E Z[] "o$tWDXJz긏Atf*+I/w2\Ћ"_ea!^qZQ "#s pY:PB'|AUWl87JE2%@GL (U֐8th)*E c=Es%WDXAzxQh6* H^v:it͖Ѕ"_eazyq O P ".n a"?BO!_K Ba^p!t "\a~=C|-.:neu)4tPp͢Æ9LM,"\a"=C ZSt0)L>|-':g 4EOaB@2̈@`԰C!_K 5=msjإ%ZDG tZ.jRp i-R@й5)k Dr4}G ZZt0CME_:Æ ZXt0GME_8"t ""\ha΋pL<""\daދp\T"\\apBl"\XapR脛"\PaΚ.0@褛"\HaޚN0Hě"\@an0Lܛ"\8a0X"\,aP tNJ Z.QtMwF_(:'!|-':GX? B@",HӣsIF!_K }5`9y(k D Df,znN Z.At4YB@jDf.zN Z*EK$i: (k D,@`g,B@B5} JL",VӿWp*Y<|-cE pj3vN!_K 8љ5}}^褝eS0FtbpM/G_3wL!_K B`~rpIBg,B@BaAMWG_4SxJ!_K B \y|-Hbe?}7( @zSd]sDE_~Z40m Z ~kަ/Z;0] Zt}Nkު /ZB0Q ZdK}hެ.SZG0E ZT }iޮK.ZL09 Z<|kޱ-ZQ01 Z$K|J4oZI -,|-H=7.J TLB@dK{h޼,SZg0 Z.y/L\hA8|-RETh+3ZuJ!_K D<1 <Q,Nt1Oe.C%YB!\t5uS,Gtϥ0NtQuR,Ctϥ0ZtmµQ,@teP%DZ(k Df.t*D]p"s]s8 -\-|-,E\mx WG!_K 2;:Wy/,Gh PKtεh&Zp"]smw< V4\%ڋgggO@ds]G_-s9^IDATX˲BW϶^p8'貜(/@e -wBٞ~t-.D*&'P;}} Ъ*K-ҍ}-V"S]=6Chx -Ʃ-I.ę!@*#,D[x^[_9+Hy54DWLJ[}E ˆ,?~Ѥ߼`ѥ7SrD_ H,LbE\lw|7Kl!]s3aH5#BVZѿࣼņ@EL_}nK,J;x -"&f>_-8B[t}Ŗ@ EU }B-tX\zk~o0Dh cw<;"#fQߪ ZB{i#[!+ffW2]- ô6mt[ ]$@ee|wq,(oC %&9DB;^2ſiLİ`HaA@]tZD`†>7 a@@M`C0&iXz )m!`IDJ[D`F0t#:`Q-Ik&OC0f,M ri`[` AqQ4S`C0flȗ~9c[fj`F0t-0^q,(oL 03@{3}Bƶ&`[` Γo|o>sY-I35F0t#:`t&[2S`C0flHeLy `LM %"y -)35F0t#:`Ɩ=ZԴ&`[d 3vVw|ʉ[薷&`[d kW~|O-(oL 03@dg<~.8ܣ0]&`C2wZnsT `FLM "~ފ7v rK DW|DNcȃ_I7^-Xn 'vU/nt"G[˱@@HG #Dt"@: @HG #Dy^<;;{7xe-?ݷISW>tS/}Ẁ*`>~l!T L IN/qOG ݳ=O ZS3s&b`"L)-ũ>0r@VCǟ_lDI N{fr5-ũ@*:ŞgE"4xyb߽6O/-ũVD c/Z~&I LSl"L囗X`b% $S>t4Gu3S"ffIg/~闗X`!@):{2t Dy5Y*)o,N@!):V Bփ"0/]wE-o,N@pqzE @mUa"\]=޽{5-ũ.{/H|DyB8_Q2*:"0+`Vg-?\|GbThP!%f&{-?m $4fXݗuӅIrx5|M `qF $4fX*&"̪T,0+ݓcS.b#"ШS/>y.D DY鞄tgl `qlR$4nճC(0+ݟ=65)o,ȁv@9t|, Y{Y DY鞚T[3n h;X`F!n"7)U"DF ;9Xqs"gg_D DY#.6c%5tmx"F3|Ơ :̜]f~_}鶻0I`V"#nM4h@B#ڝ}ˋֈd DY#ۃjbm~w&@ ̊#:vָ D cX"S%fIy `q;^@BsjKD`"|9<@J:g 0+965)o,N@ZY!\"f{J=Xҁ@ <3pLEl"#f{J=Xҁ@ <3Dyi_ZX@ +C ̋@߫8@8Wz'?<;Nx}=~ȩϦj:/8@d@F }7?(W>oϛcY)Vœ;'=[a?H} %C#|fvד87v n?TvtrD qȇ2:Nݝc>[tm W|typWs<9Kmkրtzi~XcȐ "F!v[9S{GPtM ru3ׯڷ*(eѝGOj;iNrT@de@F'[骙U|B~8ODu]yH+dSODZ@d@/ӑlZca/E  6#J':ԏ-;E2uVo ? i~w{=[zll 2D ȽÂxܸ?w~bߩot|N:*\ܹ{{["}75"}=YNARF"C_.,@2:kmͽvڼu]IM k߾_vm~6. ͷGztIw/OF!'rظ@dpoD SȺF^cÓ?6M*vbyp3&N끺{_j5?ҙب@dxoD f]wޤSl`t?Oh ® {_j=u,@2:zĦHʃ{"/tתÄϥc@dxzDީW,@2 D۝Bygv rXbk=#)h>>iN߇=1/4Y[(h\ *7 :Ee ]g~' D7DVr0vr>Usa}Z!"mzx_];?9ST"`ú9󮳃W"kxƈ D/bm#O,@2* D n돷gs7~~[ؼ&N(ﱝc{ϟ'_7m;<`I"Qi Yr?)ST";YB^pg'_,l^yown]Tx}Gna0U4/;c/@HG #Dt"@: @HG #Dt"@: @HG #Dt"@: @HG #Dt"@: @HG #Dt"@: @HG #Dt"@: @HG #1uIENDB`bayestestR/man/figures/logo.png0000644000176200001440000031351413451541672016303 0ustar liggesusersPNG  IHDRX" pHYs㑤" IDATxݻoVWKf,"Ac7D#.\5.tXD@c7 Py"sr>Vdfoo/ytsrȻ=~V!! `{ !O[VSvK{G ,?ʯkwBM??S"c5BGT=G K,BgoY{aC ,?ʯoa`;Td?]p.avH EONN{aN$'%{(Baf_;¦@uJ0 ,?ʯکZ*(h ~Ӈ7$T=j @R"')=کo&0/i+_g̼ޞ%2,?ʯ^m_f:hqJ0Ӈ~f/ hJ0Y>eNe0i)TdŒݯγ4%%T,?/_vz@sQM!~Ӈ׽HKKeأNhJ0Y>Nj}SD"p`I_:J M ]%鷵61X~AO 웴5rs̥2y%/T!xǺ%,?N4i/lr6=# `/)=N뚄 Sf/ J0`Y>en3ݦvMB3vxOA{aQŇw|(%ci+>.Ev\* )%#EOvv+SD"ޔ`{I_:#9A M6>`@%i+${aP ݯӇT=@#J0oݯB&%e0(Qdtthbq/lr6=->5B2^oJ0)_|0 ("/~Vg{a`С,?BE@R@I_uoOK{a<6t%t"'SfNs;aN$@P@(BI:`i+lOhJ0hci+_3[ M? /%OR\D~`f,H_JE m"~7&k3Nݯk>T VdTw]u/aE)Y~9}e j-{az`B,?B@kQ@ H_uf.H| )Y> 0_n2̉DhbJ0hRE_H{Dg4մ6i9)ɤݯX~]6^`m&vZ\*Þ{:hJ0hEOݯ{@K&w^<%˯F MxR88J08i+_mq*P>+|*mC2^#%"/nu3;^%"ϦPPS)i뺌20{aG`,L/i/̉DeJ0EE_!j %"OݯKv +@pJ0i+>!G`Ľs)`,H_C } p2͎oⶠ0%{ˎF>yBO^ wN7KCj*nI ޏ *J_&3\\ Wz^rkT ^'ej`PASiC:<>5^)7KRf/ J("/ݯLN45)YQN+}as/L7㿛S_(l*~P깸X?}^hx1Sf$K Rd?uP{Y_hSN$ oKe0%$EO ݯX~^iPq/ly؃PD"/(^~]!LgǞkVq/o? oeVVؔtJ0:V'%? hݯ/aUmz/Waw%@RqW<}xS/BߗCzdD∽0{a((EO~СO^pdd){akakdhhC{at%!~ō^F}ک׋C/Qj*n NݯX~]ҔwvjkT F^aΔ`?~M:}Hnݯ'7FURf/ Y~9~e^F6BW?5NmυWoL{a%mECqvgG_ AU،hJ0Z^:}xUt= -Sq/lmvԉD;hJ0ZZqkeW,e/;2qtؼ??ZT e)hIE_!L_GϽU ›&VVؔ)h)EH/_=_ta{5~Z h%J0ZBox1ݯޫOBJ;8b/*RHZ%Mxe}~ȲC [ ['[́&3̔`4˯3^F=TNmX F%, vB׼ez<˶O$ 61X~ , ݯI)sxx9|vH<㰽0žK f)Y~9>̼t l~OW«_w?/fw0 UdT~4wz<>d^ 볣ǤOU،8HJ0D:}xUt= v@ [u"*⾣0 }WdyE˯CKY5qtؼ?QPT `_)7E_!L_GϽU{a}Yx4QPn5mM `Y~"_vh(~\\~9}:^wNrvWV5%{&~Ӈ7L{IXUzw"q^U̥2쑴+J0D/eᑑeY6;OvzTM mJ0vUH<}?Ǟʩ a¨`5anR+WKL'v:D{aT&knPA5!e~"}W'7FUs {.1> K_S!L45W^f/n/SJ0[gң{q!PX o=&Ľx_J0*KcuMj鹸z>ߊ{akN$RT 2%Y>e*/p({)/JiaQx^U(h !__} Ž߾,YeVWhH _*D*~P깸X?}*}y^%ejD]IW`O^ eǞ˯uYޝH=)=^~u ˊD{aT1ʰGh}J0Wdd*~PבeAq^hؚ?v*ÜHhaJ0voH#q+_cOD{[;V/Qj @kR˯Kގ2W/G=e>;j/*^`mZ Eݯx7O_/wN#¨b.aϥ`-y/97` J_ FEl/ )XyD# WO;zq^ 69X@R4t0~]>鹸z>dkTXu"*2^@R4"˧? ^/o ?N*2^@Q4"/~PF}=ֿ?^r\ԔYMw08`E!L깸X?}쯸a{W=O{aO p@W<}xP{Y_݉3¨b.H|$-8EO ݯבeAA{awNÞ*n2̉D}GE_H{!gN#q+_cOMmXX o<eVVؔ `ݯX~]7ez< =_,9}-a}v^UĽi=%J_ 9S/BߗC;8b/*R\Z{G G,H_i'/C/h+}2lk礊o %.K_:/[M~h/e0*{aa.%.I5RRSamvԉDx0{aD  ,J_'^/ o ?N{v0{aH ,EC]ݯ^ :T [\xq?(~}q^)v!__=B [+Ψ0QO^eǞ__zw"0K'I :%@EEO ݯX~^ h(m96 *n2̉D `%,Nv=^֎fiHpYM[aShL 7W$#c%>;j/*j髰kJ0vO_/uY݉{aT&kϥ`RdD e_GF쉸O$n͟0U|K?%~mx|o^/տ F%,%W,uz빸z>S2^N:FgSeWCfgG_y'^،N^L_׽6eǞ_@Kyw"L}󰇣\*mO "'SfW,e/qtؼ?v s"h[J0-Y~!0c2q+={*+-Ľ߾,YYM[aSڑ h+EH_,ŒӇ@[zx}/l{S +)(v^2G?}|ew'GQE M?`@+|"}e}2/##˂:J [ ['=ŽcңL-H hfJ0)YT~Tس?` `;x&yXKe0))SdyE˯CKY좸qtؼ?,V0{a@SQM !__} Cq/o? oLմ6%)Y(WdT~?vzXr`Ž>+bL MߕpД`I_ @{IXyw"q^U̥2쑴Dd/eᑑeY46;Oz&KZ~S*~i$>_cOЄ֎0{aT[ i+_8ez<"O$ 61X~ ,`?(=ݯI)sxx9|vZL<㰽0žK KJ0`Y~9>̤L#]~핾㞒*n_+  uEMyH~zq(ώYM_H mJ0`פӇ~QEس?` ŽQ'b. v Eͯ)__:@{awNÞ*n2^`)BaeWB8z:P ›!Oմ6%)C()D*~P깸Xra{㳠T-J0ཤݯx(wz> ]?;8b/*RHZPY>O̡O^OY+͎Bo^ؿTJݯX~Ӈ {쩜mX F%, [i+_ez<ٚ?U?h/ ^`mǟv&>?`W? {i/CL245{b{/.帀f50ϔ`@]gSu^"4wz<>zq(ώ5eVWa3 HoK_컸6;D"U][{a :XSv"~/CKyp` G۩ J (Z IDAT !__} iĽ߾,Y(YMW@R@)D*~P깸X?}*}y^FΡvޛ2cWh N$Fs {$-hJ0hsEO0vh(~Ȳh9q/lmv4l͟xTq;aN$@S@J_:i$>_cO@{[;V/QA M'%#RʓR絬h+볣¨"M ֦H ڋ DL9}HCG?}|h O$ƽ#¨b.aϥA m ~M2I#]~QWƝ¨f`/ Z ZXgޑFWӇЩ^/O${aI@R@ JcuQR$[T8a)Y>e*/p({)//mpZ4Tq;E - !__} q/o? oDEt^%4"O ݯӇ{ak}Wz%GZ:xWRܔ`ФW<}xQ{YX|w' T=4'%4"'ҩ_4wbuddYPK^hؚ?)R0'(ݯX~.4O/r=v^ 65X%,hJ0hi+L'狥QkY>xxXFq/lb6@Zp`pdC:C_pĸy^U̥2카(ݯB hk`#/h+}ai{aTu3^|C ,~i$~^y\?}4׋Co=e(&k3}Nݯk2Lť{/hr[T8a`(`Y>e*/?6~8٨^*S*r ݯp A@{aߟ ~9 )~`/  @'B3v(wz..O!}y^Ž`{a7`W<}x]{~@zw"0K'I v vI哩ECq+~uddYP^Ɲa۩ s"v >P=3˯rv,͎7KC2i+lJRa`Ci+_dH+OBKN@{xXFq/lb6@Z3J0xOi+>!;EaXoN$ T=%"'W&797` [+}2lk{{aP *H_://M~׋C2^Ľ H59QR#[T8a%"˧?dD#_l `o ?N *2^%"/~PF}vU [\xqRf5喽0OJ0H,?BEsq~`/Ž>+rL- ݯxzgAg|ޝHy~-P Fq/lr6=#,ڕ vbuRRVO0{aT8ah;J0JSi^F/ж~<6?Tq/ahJ0BC~Qk`#/cĽυWTq3^@ @K+l*~P깸X?}Љ^hx1OZ<1o/ V%Yϴu R{Y_B:x&yXKe0Z Sdd*~Pס쥠$m96*n0'h)J0ZFB3v(wh gҐ(¦$@P,?v.y-c%ë{a+bL-}vWR4;%M+~Ӈ7e~"}0t `ޝHFq/lb6\Z4+%M/:7, `ĽѰ5RTMl@3RTW1H<}?` `^f/ VSvKX4%M!~Lť{/}5^ i/쁰hJ0\SiC:<>g_,Hq8lpZTq/a8PJ0LWhk`#/ֿ?^rSP{>{a%ECqb!#ώ*Iߔ`"vK2cB/&DaDT `(EO ݯX~^ Ľ;aE^9%{ !__aSY۷7KC2i+lJR%%{i뒄iݯ/>hmzNWaw%^PW<}xC95.+6D∽0{a`7)5EO~СO^pddYPm,͎*[-J0>XiH<}?` ü^f/ VSvKX|(%;v5)R_nkT Ft")%O_NRr~PO$n8l/{ `'`"/Ӈhk`#/J_X\xqQ{Q{a%Y~6_%F#qC(gG_Ɋ2髰IP [v{Y_{{akN$R\ܧ@%5e2q+_`^Ɲav*`"/f~Q&~/^ ]~fiHYM[aS)"O ݯ/+q/lJ)SK{aw%`OސeW'k`]Vw'GQ\*I %@+|"EC><2,+] [ ['OߤI s):T?h$>_cO{[;V/Qj*n 3):L׼=ez<@Sښ?U?h/ ^`m: CikC^/Z<㰽0žK 3(:@{HF'_핾*n# hJ06VdT~4wz<>Vzq(ώYM_H })P:}xUt= vhq/lmvԉDړ Y7~Q&~PRVqtؼ?qv*%@(BaeWB8zh{q/o? o<6eVVؔڃ Y~"_vh(~\\~9}@{ak}Wz=eji/쮤Z EݯvR&~^}e@G{w"q^U̥2쑴Z Y>O4LבeY@fGIP7i/_h-J0vbuƻH<}?Ǟ ڱzf/ VSvKXC W%E+O~{xxXFq/lb6@ZO dC:C_D¨j.a%м`M*~M2oD#]~.^ wN r0h2EM{_WOPD_I2q/lr6=#)ha,y\\ W=5*͎:HSf/ I(@Svr_}^EN㼁08@E_!L_GϽ۷7KC2ڋ08EH/_=־_cO-mX Fq/lj6}KX{K GW%SʓR絬zx<ώ 61X~ -eik2! E/h#Db ۼ?b/*R\ZK T!+t l~ml{/l9m/nƋ2v `Y~6> Ӈ&G\\ W5*͎:HSf/ (v)_Tqxx9/CKy@c/lӝ܎ vF ,BE7{!+?Ľ߾,Y eV{aI PQ'ReWCF^w^9QN$ޕ@5J0i+>.+t=_]*{w"0Ke#i4hT˯##˂v$͎۩ s"o(BȇFW,Ǟ oke0*XM[aS_J0?I_$^yzX ^=<gGQE M?SW<}xC9C_ظs:lFs {.-%@,&W& 97` w+}2^}^`@J_:g|&^y\?}p^/0{aT&k3: 8a)sq)^}b h*[T8a€:JSi^F/д~<6?HTq/a€:B_vhk`u A-!.j[€NZ'B3v(wz..Oa{QwN O^”{~m݉3¨b.H|$-)Sdd*~P_~YV^Ɲa۩ s"h+J0mY~!ݵ>Ui$~{쩜v,͎7KC2i+lJR@P-/~פL'%>;j/*^`mVZVoxEEaXбޝHFs {.-U)TdDʼ e_l˰"o Z h)i+_|&^y\?}z8T/QA M h%J0%ӇqLť{/ OQ'q*-A 4"˧?^/摰p^*MM 4"/~PF}q/ls/H6J 4"Ϧ_Dsq~־ ]h2N$ 61X~.-`?)}SdD e_GF^hؚ?陨⛸ o/ /J0`ϥݯxi$>_b^/տ F%,`)=vbuMʔ鹸z>¶O0{aT8=W`(|*m9}HC_l M?N{R0{aSi+,t l~ЦW«_{bL^`(l*~P깸X?}@{a볣Ǽ6eji/lFRnPݯ뒤Lس?` ;x&ySf.a€vT˯CKAtqtؼ?UN{aN$;[B3v(w^o~, 2i+lJRRY~"~]ŒӇW¶WzDZ*쮤`@Oސe~Oՙ/=6 ab"n1fb6H3&XLFb4Ww3 „^ݑ l `,QRM$'rdw[mOO[6vUuw>>[u=9B{LڰsXГVE5w#ttИ:EG\Jc3iyE^鉴r~G/~Y€NrcFN_ƛ'уŬnG Iů l9r>>9+ [9Y Ffyao,F`-4g_Z=Ͼ# W^ߗ_;`/b0I ܯѠ ;gޖ@_KKJWm%/ P!ИvVEG5zly@a"/liz"]x@7sY^I#K Bcj{VEW#?L['g~Pvyz"}ynnNe0ya0`,4"/ܯ(~ml\2VN-z ]ycC/^Ίa`(XhL=R:)n"klr&m>tXPzً/ΏXte7R0Se/_t_[k~i}@D^I/vtya;s#5~Eßt_OΦ ;j_^8~cSh/ٸR ڈi.*iтzQruuh}ur&8P[v4abV{`A}(A d_I7Gg~0TVNmIF"/خo,>E0rn7y_ hx}/T"TBcYÆkH'v.ϼ- RJ/W6XդИvVz̵=zD#C7٩F E *"k}/z1rôurFtya'H"_^T"TBc*2%c=W+o3d&/ JN Jl1xJ/ܯəckyap!,+츑RZhLݗ~Q~m9r!G]~t,/F G J$և?q]&rFMv.+Vũ%И:G~=77V0`vyz"PӋfya-(",AׂNə4r96Y FB  r"_ IDAT8i׀nFX9&Q^=cNi`rn7y_P""ѫ_dya1ȗ"(_ N'v.ϼ- Jűʡt.x>bQA~  og_o:ܯѣgjxZH>bv*중SZ ^l9r>>9+ **.OOhH/f0ya0@`0 rE~mIT\;/l.%x9S`(A-4O)E75696`f"/N_wif1&%/ L d1u_VEGȹfC"/KG]iZ$HA(:e_GƒnF,~mعd`Z$F/NeŰ-XE0XԱ쨲/:ܯ(~m?o`HE^鉴r~x9+ikk~Eh}ur&8Mv4aA5w~ń=F7GgӖ'Χ|nzfwZF/"/خo-" z.!m~B{/hyaW//^ʊa-N rƊN6\N[y[j/W WG*yaЙ"BcYnjDѳևfk0tya;qH)-ևQz͖#蓳rZ97]"^͊a`pq_}iLظdh-vӋ#E^|E ZůSJ'~M~MΤ͇.+ ً/ΏpY̺\ c%E0Bc꾬%"kˑsևE/=_5t3Hb)1ܯh}#w݌YڰsXkH<(/^ʊa1Z #E0BcXv$XEW67P@D^鉴r~^ôHd(14ܯ(~t$ZnI#?0N@], v|܉ Br{6݌M[8*iizB^cNi;E0j+z.!m~B{/Eb]yc0zq*+}d+E0j):Rjtqϥ469# ѫ磓0HZr+K'5zl!@}~nY F"/]s'N,DZZFӮ(l9r>>9+  +Z$ҋY1L^F-4wO>9+ @+Z$ҋY1L^Q Y6WN7y_%Mk\z&/)_rhfC @ E^+v3r*"0zq*k )10 /:ܯ8i [~@>^ôHd 軅Y׃FN"+_#?0N5rmnG<=8?bvHo`MůU=:{qM7YVq#ERc1u_ :j~myևͼG >e~n"ؐrO},nSg҆KF BZ$F/"/خ6SDz_rhKilr&m?o( <=VNЋ >`C$^ XY>:9# Rxd0z^0XCld_Qzzǂ9F@e, g7 V)Bcx!ݽo>;rhx}i. EV Vc`5И~ ;gޖ@-\8^9ZzRԷ/:ܯ-G5[@D^Dזn沼F^jb1=Ѱݍ0m@Z$L_^ŦSY1L^XM(Bc깬&"+_ C#–_=E/gyaZ$V"X-4O)E7mG`Zً/Ώ f1 ;nKSe/_ty sf^nSa?7RգV!YW>ɰEWڰshmZ$F/"/خPԱ8z)n6&gҦ i^4t\^X5(\ů>tN56Y FB J*},n/胕{mуEJܯ>ͧ{}GQH>ya//|Jd1acǂ6\N[y[ ciC{ 3x>兕"X ,4=zD#N7٩Fx`Z ^0mZ$ҋS)ŠVTd~E7ůK ya˯HWRЋb(l1xJ/ܯəc%yapKC7YVq#/E,4ˊ_r(r9-.H~qԕ,/F*`~Eß/"kٴa iH//^ʊa1Z6@ cqQlspycya']Bz,/S5`~E`~9*ZnI#?0Pv4abV{`"Xe_ltm~)f/{mу ;kěܯ>ͧ{}G hx}/)BcYFn~~q,-r(]}oO/ڃQ[ԷcM~=l} ύt>t ;iFlև/#?L['g~7Z$ҋS)VOlSu\DW66.+"/l x9+ "XSNI]0V@O"/N_7`te7R)uИ/+~r\3KC`-"/KGeya?7RwvYWTQTGD蓳i%XVũu`XhLr&ri*.OOXzrV h}E,~E`)^NΤH@], v|܉ Vܯ!WCɍiizB^cN95E,ևt i3rD+FNeŰuĆ~O)5Jr( ;gޖcio†ИvJ,rFm>(ύ7[$^xC7ܮ'i>%x9ܖ#蓳rJX97]"^͊aCV"Bc/zqur&ml\2^@–_;ы#6ya--4O)E75696`J^|8}q~܅Ŭ^mjW[hLݗ~Q~m9rN"/KG]WZ$n#U"XTCɍYڰsɥj"0zq*+.U"BcXvdOEW67PPi.8x9+UEb`YWPbpL9 ks;0ya Ž;BE,+{%x9ٴ-TлzfwZF/"/خoVq*Urh}HGƞ:# "1®_^8>hU~O)5Jr( ;gޖciѩ*ya/-4>|/ܯѣgƛ-}èM=kɲTi`Y(~=]Cm9r>>9+ `VNM'HgbXiJY[hLE/7NΤK ya˯0刳*c^X` g~Q~MΤ͇.(^|8}q~bV)U^X)` RJ'~M~m9r/=_5t3 +E^XE,+ZAeP@Z- 9ZSe0_t_qkyP [~@>^ i{l1x`?ʉܯ(~(ks; yab1 ;hVr\~ 6zt6myև%v4=!/^D^ر]s'k^rO^HcOIv.j_^8>h И:~5C{._uXr~^4jH H,c}S+wm"=l}@}~nY F"/]s'NbZZF}R[[OO{ -٬׼Sdzm}R[wOW+os#E0_>?0TNt6Iً/Ώ( vWTɵ>"/l?:]=;-MO r`CjSgP']hiH//lH) {.5n??C@EPtC<=VNrE!N`D=ϼ\a†"r|- a6bSͧ{}GdDX]y}_Z~a1Eڰs9m}m_p9-:?7'ҵwk~|9P#?L['g~Z$L_^VNU\~EkcҰ˖'Χ?}ôt}**rJ0C}&gҟ>q>}*h`0"/l?:]=v⨑E 4ԙ͇.4Z$V!`qϥM}(Q].OOVbp/()癷q2L^X)Ԗ#Ue_P.ѹm?:, +'E{|w~@ʼn dJb~@DG6iC{]P+X~= ):mSsiiz"]x+Y͗C=9a:9# j&.OO/y' _Qظ4t; v^ҕ7PQ~M4ozޢ\N_wsr\3KC. #Qw@d X~>9ۼZ- ,`qϥM kg{鉴r~8 `}N4n'yfR"XlcR3/lfDyawONٔ}s_K_0۳B 폵:UiѶÝYV>j⟣xC@ z"׆)HiNs5He˳XrzZR:a햄Q亱u!ŘşoW89OO2g 4E0ʭVBŮjLi|cELM})PnQ#YA?Nub*,O2p֟4@Ϣ%e;]J0ۍML/)=~JHia ) E0`}ڧ(E0ŰM"R޿k"3  FḬC4v +E0`F&7`@>FS<nJAg|EVط "P(=p"]N (dJF)@'ھQC@["'L!>h@dIitJQ`m-Лv!wRAc %}`nQ>cڑݸM[lJVRZhmC' jVwtiIG ?Ն 2ePZ:*t-m l'! dZE'T HJ 1{% Îi2mZ(vʼn /o~A`=N%OH z@[[#8~oSb˔v~۽j/t/Q7+{1PGQ@ hxeC"pgi0zt J"(;E0WSٓS%x!"Л2¾4)BtpJ#_S)-?h@i)ۿJb)5E0`Vٓb_~boۣ|JK X+N-=E0`-tqpJ|">s\(SJ`~L._ϊ폹|"~ v|"br㶔F."7Q,n "+sD<"rE0: -`@|V11(|`R `@D.صtsE&`@-?#{\DnגO kP WAF.);m)- Ԟ"SȰֿ[p?h9T믋oK>:r_J#ְln4gmCWԗζ eqpY̵cnmtosyYj_9y/Ͷy?}d20H`X7@`VQbWptCyv_*>}U󥶞ފ{!?W/k?\Ea6{@3q4{1>'cqn _/n&ѯŽϹY*:͵qş&(`- VS^Ń7՗~?F|?],L~+~ޞ=ew頵n|mqz,.RQ )( 7<98J76nX9BA3@i5ot9n6zt6>9[TR)׏/O <5He*JxX*xCQ OAbmJ)OK5D}Ɠ2,E{>|2+⹁q/ ƭ- e-)2luk~/2s;m<$*OUuX_dzPx[.ӭޱ[ob ڮ(O umʏ{7j֢2hcjw=3s fn(e["EO/E"qϰ$&(>zuc1v{:, ,łNg EÐ,:qmqcJgE{:cWKA3W|v `0X_/yu[?5~g}л*VQWcS'0 EvIEŬaۢ`,l^b2Z=~gFaPs fn)*eCk@깅"+Ɨ( U5j.Er'OZumyVEDcn!Wd+m[Wa1w6 Jl|SP_kaan1 Xkcs E0O|ymXdNڧ{XV#acV8SqX_EBul73 m{SJl̡?>y ,#s-anX"o` =;n'Ƣ{me_㋿S_kc]Bl"[!wN7X k>;ޚj- uڀp3tfnsKgEt/{ŗۉ1c779__U=glWGE>jIb2*^ء{Pl-3w"6/ͺ$D/iH|ש;QR9N#/.G1[;(t=B|ck{'A_:gRo斵1e-F?KW>35}F}[dD—޵O}?d/8޷Uϊ}E"q{bM{1wcYXN[ҝem-l+U#Nzj?/Тn=!(b6U|@\kxji>q5w§o!i_M+$X3qTM;{ )m)O*ܒs-1XvրP:,}j-(Ew|R֦V+Ødo+ bEЮ3RSaN8mwF)+JV麊,*w#2ZbN 1Z6Ti!)ㄦEbw/JsQIuӮ3Z[;X96s`T[|*o a0 /*RYq?;eވjQŕYi'Toj<[62UZ\t|)=oS&Ѯ53?-_gn)7Er:n1nvGII_,X@A Q+]첉x-9LY iT㵮Z{2a<,?=^{ܓE]FEHU0UAaK3ފ^,VEx}wMTyU^xl:s~7ﭝT9ܲ:1tfn֫ȜϹv"LVlb@F*Ű(JXG5!㾊"E*Cy;e"ǪBOٵ['fb-%>7A7^w/RڌE3VUHuNk`y›nsژ[ܲ:`'I 57m"_z-\Ul߱XvxЈDu; :n|:OPCNi"OėҲ PT,E/Z شR؅Y9sXU[owZP3em-@/)0VA"e2;_"EC[)ױ-b|{*+~ut[t E-Sf^OJ 1vKb~vEC8X8*jJU*|k-biMUNj,],-cnYs[;iID ˺}i? U}7`|/6tNܮ%bW) E~]He"wNYMOgUؕ #eA[.~~1pTbe|&beU${n20w17h+糨61W}M)1\zZP~(K6ӭILucf1tx(*5b<x,tZ}E"Œۋu@`E-VFتYBe<8׵eq|/ -an鍹-@6Y EۿΥd3NOE.Xxu`t-G," e?-"F E2Ŭ"vv}-扢4qHv`[ҙb^/w[|Zc vr,>YYnu[xpe\.HQp5^PV0ő2~aD?sX)ۢP cR 81.D2x;3Od,~?o "Fdd5 .n hGP"X-GԳEj Ѣ8LmVD\9+s%?3G);ʼh1hEBgf}4_2=("9u1Pdž[gn`}bCs>8F(}% a1e4S/E+I=(16}ZVE*(y;QؘUPiмuF;08[bnXP|ݔ7IDmE0%@4A(QZl`ç`Qpw-V;'LhsSvǮ<lb=q˸e,Sv`[eni1N|G+Z5Nby>Q^G0¸(GFq tرwFA* `-'Ṵ)b޽)Tn;#;1O,a`|G1bn.?NߵN~Xt>Q>>$mSD `Pwۚ cA߹layԏ篋qs+~OxU*sK9[-wq["X9PcXH."cV%"XNsʚ0HEB,[ND;c!.]URUeq <-ؘ:G>i*sFFs讙V@k[ 0yN*ENTQ}UN뗢Z!ZUYޛKֶ*mF`0-bn[~'EKkCz0dA=qQ-(rD'H*YKj =,_2hpFy.TElyhecd>/](d&ԅ\-2Go϶?];Fb,ӻT"] 1Aǂk7KPĽݺuNNUNCU[ܒs @bs{*AX\[`}ځEOGUڑZd-a(|yHC F8?E?L: 6\ h W$yֽQ["Q2Eʼ 1GqWuyN]eq IDATu.vC[2xb!("X}l6! VcUǢx"ǡEXS`;skg\Y"+R |[ntJIUM %E0)*:fX{<۪V`u|x/bV/TylYhUi2Zp|=`nsK>-_mJgu E0%N-_ы7vgI0jѲݫE"~߉ȸww_~Ay)[ƏP'0Z[_(s1COj)[g"цͮ9eszC[nxx"~}s1*ܑZq -EEz[ܒs 0B=_mJJ]>Lg?!(Sy"(y{ ZcuvYo=RFt R-[aϾCJo3=6Q+؁:*Mc'U'^D+.YfI@c91,:%6IXVR=[?OC.6"kE0î*NJ_:(3~hXf󔭍*g˪TM(E؏ϻN-cnɗf_VIj\+ƷssU MoW1.sޛ_Ni[}? (:e$>Bf{RiZHZq*G+ĜU{;gK{5]W>M;._+ [ؒ苻._;Qq 9#^TDv/N.|xvN@5]{lΞPS]_fJ0x=p҄<[ز=c 1Uaoca !Ԭ\4JnŃg]Q%jaͿXmZ[!yk8Xn,BU (o L5aI_[rs/b.##W@F|'&mQ&XوEOfXjIӭ/mΦ=cjtTw_.ôu&[A)-fl)ag #+!IFmrs!حMCL]J8wm७dj򼭻خtO}U5ٮj黜aߖRJRW# ˿}5݌-0]02] q^ވ7v!ضt<bupTll^ETD6'*bڢUܧ_qmS&6c mfl&V_p]gnFAؙ]h Q1{_Lgˤb?im6ldߗ&(ukiTUWg/quy=ݘ ]dlcd-_]}'?:6r^m ad"p\^^0Q#c$Wu8M_Vr0b:J72_N*ǵ5t:-cl~cZVƂ͒0hYԼ\-(lRX+&n_dyMLb 15.x+mͨ6};cq˝ W,[c eEW?k+Is-rA8!p-}0hx}ZD͡j[!*R[7ĵ c1YY{U%c`2//}j{q-kKZOH_nF~9Ym+ $݋U]0%"@Ob~/BRðpcW('p[6(_݌H.Qӭ6&.ќ:&&+c~Dp?ߥk+lelk-@ߌ°oJ鷟+*lT 3$hE!b2ե%>D7 1WYŲ_j KJdl-@_Ψ ZGte`EŦя=ZQhc\C[Zk[!n+鲘,i괙TU<۞y0>#EfFl/&کGSثW?kyw5 \/V`w'+ K[׿\F-"3$TWSV_H{O鎥r{sLv\胨Vrڷh2ؔVK[c ga\H &4)T<,7 16TȺ})Tp=F􇱅0}VB~'Y#VჃձPJt+\-({keO_i0k"]#e`w $b0^p`\bjUZp&[!FhA ]ԅ 7qӀ6V29S ]N)-hC<~T"l#nJO]쳨NFVL t+\' ]jJg>=G`hO+W5(-@:o ryo0B0 \&OT* gn ([T'yMhk[! z"v1^b;&eSCط-@\C9[F5Xg)^#61c׿k)ƸO*&n59O>ۺ x`+7<V@9-glb(.@l&`m7*q2 ~r'V0ۨ[׏Mo2_hų=1 @jbN{܌-`lb)0%B0iKm מ>Y`Uq{˛l+ܛ^7! Ds]~1Q,& ܌-al~9 #wgZ|KvJUo?/sØl iBh,W4BD?v{~ܻľ5[|o=?l'hPx͊UrM,X`g"7ۛd&;o}+VÑIxPfQmӭמmwM[Z@ b,~ |-# x!UDdB0t bo/bX!ʱU|wrVٍ=~w(M,4vm[2[6gxfAKD& n4<|2YF=q*3毆=AV-a', X-Tb뾷g2mY%帜ݏ|!p|ZC#>G9tQK[!ڇ!Vþx`2ZWEgelJN`GhG`1}CӔOww,Mm2O6}`Zі h]v2@Dqmպu1 ^sPrTBEX\!n;L[qS:@VB&Ԗ [42U  7PrL=b0-?fvV;مVMWƪ_F %LKջˑn1@-@)I<$|hhA빪!6#iR|٣V9Q{+vľoc[kF{9=(-gK\[jB0ʗ+\cok_g{ R9i/lb}f!ފ&*Ml'm,6 c L`Z"2!p\+*~oRo[^)G3&[!A ~rTZĊ_?0\ S\mic 䒫1(V6*תwnWy` Ҕv^:MB̵4GվPϛ_KqКn(hZ7hj_0 |Za4+[%XUܷN5>shcd+ĵ`MﱸD%ם|<^.ˑ2@-@rUݑ&|V6'4{s,>מmwBܔ6 <&[!=׽̦Ԃ/=E4idím25 (Zk,O;Oӫ&]w4ft!fB`<1Y-r c h/eZ`o\|{X6Ʊh9h(}!z!0h_{*-. .96IZ`CpOl?vWVXmB{j ͓\Ou1iMKTalv1MQ-Svp4{2a ,Z5-W8jq["i;B)MW9'L&g*SZjdz`trm\` ,JãD-1rlxqM]cUl96('tIT= i;Ghc 0k3WLuB0b!X&Wix`DeaM m)"M;oysdlf|9/s mE59WX60c5Z^bC:mg֢"@i96(sgw7k/glv2cAmAF{ܥ:l 2F }Vlӛ ٛ|Ojܹ9ndlf)GiZIF{D>7P=ts 96̱!{ WY$WqX)L&׾ wD%-VƖr[ /  hq¦m+Ik,AFsjKXD mҢ456g]肨TR[mel).6DEB0%g`ErhtLcC?xnzO*7ʻ }ɱݏ܆IO[-`?49bnAF;E5U{~w }_(OnQTtc>`^\ss<~BOYjwclvվ`܂ p0&f *%A _Z9ھwb{͸~$(>c Lƾ`FFEUQ}FQ`ʯoB72aZx?v֡W7Hퟴ}Z\X &*?rK_Es?Scs}P}velƖ!cx-09Q!UQ{۽c Bimʽ*K-`s弖X=}?1د%Š}ӐuH_*c %0[&alX_X7B*AUiVykW.~cRm_yڤV51^u ԣs]?s;q bL U[]ۋal!7ceR}(~.LE=VB(V}qXMK6o76Hot֑Jj]5|lJ}/ׇ\sUӤrEA•k[nƖ[`r B0L Q5oE1n=nr{(&u:-B=F]sjkv\Kc\\'c5ߵZ߾:&xqat IDAT_}f>S',sOR> ]Y4{~, Z2Yy cK[2=\}_ A (O05r0&--'^` xAۈ֖ߥL);A2\7q WrkwUq2e\c\mzԮfNXvkIS8qNrf-`li͞e-0l-;y\CFu74suP*ʚhr+f'M5~.&KoUIYa)o﹪v/uܳ{k-|%kx,hoêѪ:|0mȫB3wWL8Ǣ][4kω9s{U(}dl)}Ɩ2̆6uж̜^ycX2,Jh789~Z푲ÇmNnUjqK۰yz_V͍'Ѿd^Jn%&b@Ic4{Fc}@vjWu3ן6Mxv%,*Yf?mzMqnwei0gl9ct[fB[EUxyhk.-v9-F=0?ULf,0=V&v8qK[mas}|4nQnMRm&@Nq݉wwcvOڵ9;Ѳc=n\]Ү?=[]1E/ih8Q]4b4gT|p>W?<-2L-c34BXcXΙҦCz=}-W+g^{ڛAE?}E;V0 |mo.r-D<жfw g.°]_ޞ_pfෟ4Ÿm?u%,M$qu߳4Emg5Bq眴s-*^+0fE5八gvn\+\ҰEN׎_e-2clɧoc˧.6;K_Kwíh6v76P FyIrV#Aׯ\W1%\͛^BQY{nmyZ}ؔIky] ^%.">[?1מA;xƢZm&"ha~uŪ6㴫b*ޯ{g`VVǟ%=u')1+&;r4*m]6 Ɩ-1alɫc ˵}BFy[U%VKV9SE3])+.=u;Z%z+1Y|P^[Z*l~[{AɮNVnׁ'/7̼'цIU1׮47/zz0!8_KIhe~\D%Ɩ[ؒɱI\Oc.!=h(aث&K_]#;=h@(CUqɪ{W7V2]dfۍ6.y&SS=õ{mZm8Ɩ1ܚ-?^,؝kl#$UB-`#Q!艠bk)S-io&IfO]վ%OTΪ]մtqD7{gcK[&cli7cK/V->re8%"Cp,6#O] BLlN|v lVɭOBlFLVFG{Mb(ݻުj^Ŋ}glic [[:r  F{lkB0R"n>j^hs}v_~S>2ciUMV,_&)> bv0c PW,<҆}-9!eybeHT+Ӈl$>[.cm{ N\W,BbMVn'|/vU]3el\J\1k,{+Ɩv0-9zƈKB0Z-BA `mU3n̦J\V6JXybb W1_PhAkQ̧mOv~R&cK[vflD%XMZ"nV *K.`%=|$>?7ފǴ}rmf;=ܪt1Q1e(i-HfcI 盳k[bl>cK5OtvkB0gAn.n֢)VwiopsQ7lu`S=nV97 l^u!&Or~V(kڳ>fS={9dl)ی-[?ehkB0+BqShm;uYF?7FGxMt]ȴg`<\Z5F}1yꇵ%+%QX|l21de[2te|'gKĽ_pzJF5w,5s}{Hz 1}xQǰ n'V.ųw%(uh|MX"ǰs,E4cK[&glzDaB0ibV>}+V'ExkK=Пx}{4Ň\EӤ KTcQ"6:a=l|,b8>=&~iy4̎-elr=GK NT 菸i Ӧ[ïWY|b5dn¯VڶᨤMn|o ҵfi&,NPiSi{=p, ^lwuݝZtKLPOw?:ڿogq]p#阤ڊ_ɸcf*1i׼~J^\R?-2)T|'g=Eqn[KN˵|_#&OPT~X ݼN5Qxp&>Ӎ [7mLdǟ]rF-Vi\YcK/ dl!X45|N#ۑl;B0a2ʠ|b0K`MI7nfGξ)`B[ԉф;klgi)&2cwq넭psl$>K-b>c 10k`J`;mGFeq2>4- flb/Cų?̸ :9{ntMq/[%c 0&!؎`Ŋ+4iRo `FYV33LuL0CB=v#/Uo=tӌž|&Ip= ;pS> V0!ps h`BB0`g9֞SĄ`>7&& )كU`W&&nk>{QHJ=0"!p )-| )׺GU`L x/4*0L O50T0B0`kكqiC3!Rє~ @^Hi{멾}fD}ͷALW.0#B0{Ҟ?|!/t#y7})!K*6Nc;fJ}k>ɴXR@3`7K?Ki=y?Mi>r̜ `;x(.g:ƩA- b|9ؠ ;r`h#`R_ƛ6W x! YJx# P~!LD  ({e_"m}?,gRz'ezOmOi.қ_QB0hRhtiCB0(Yu#eVD3eJ3)vSk|메~\ k 4Ji)}Sq>`Kؽ0 ~Srᵔ~P/'~K).b`{B0ȥ-mS:@vͧ )hJiߡ=`78?6tLGJݬ}oKmivm2`Є;?_}=--~\ʹ╴V?=)@ڿN1`0kXKi\ ='N]I+/]N/refeBJϼtyt>pfvb@T}-~ٯyO]J{'}RZ?|T!Y;IN?4uDu+aXrܜ fm<0w.~tts_aXr `{B0gRvfs/2=JuA@- ЖjK) ?* Z$FF{\KMO?̣l&tpʵi v!؅RzaFKY{kGj y);kϥR^hޱW^p477<7Y\\\_NgϞMO3g!*NV)1`ДhXZ@OΥKixtAV_E vر666&z-/r%!zK;DhFA{m=6,>8qrl}}==iyyyvAǨ #!4p6N Z/(hӟ'{AѣGmw8(E6յʹ ~@%bNQuz+hZ:ښ題0,6ƅ$zIMzywT6 LEZkPFf?Ǯ }$&?3܏I|XOBEUa6 Ξ 5U 6K@:Kc@

~5ɾa=sxc=֩ld Wc%@NW^g>= T|e}:;A8O]? B0Һ0LfiiHT=i^@3`@kXV8GTձ9LRT*Xz:y dh+!J`oeeePWG hPzzUJHGɎ/:"d<mv2NąP`7W-bGh!PAo ^N-DHXZ/BI ;~)mV~Qt1'{߉:{n3=r;zhZ\\tRkcV-76E'u;t'hXz˃TB0H'N]I'k=L+++N><ڸ-"P6!Pq2uC^^T4IKiuzVL&Bֿ ( (J3MQw?+vaB#t B0(GW/Z]"[4؟<"|S-.W # "S[ IDAT  h\#u̢N I4G:I _j0 !Ш UYPR5 hTݽTG5B01Qf/nP N4FXwJ' {T%P= S]R S]R R}uNC̔*[ V30!03NxV^%fnС T3~>cUj0$B0`&6.TUj0$B0`TWjO_!fDL݉SUTTU~YY-U'P=GN !0U+/^**T(Obի`4??_왗.Uvgިf V 6!05ǎc(`Nkg^<.!PsWW=zQ877Who0>!PK@":ܠN:#B0:D071}Eƅ:A+g`@]B0W8p`tND}b?:`@%L'kSƭDUUg?:!PIvt i,T',]yѾ`@uB0ϧtTLUi LR`hoW'ؠb߸?:@ +9јtСʿ+B0`Hj#XД:`ϼt9wj!0hX}?0iBZ t z.PڸPDo߾22S vy! [yz``/0ryc3=Wm;`cw TNӴ*+/@ B$:ѭX=I=vxNPf:gB0:wv!  ϧCU~Z"@ T8p`'V'@ NYj6.`WB0m表>cB1|O"@ `g.N! z&`ؾ}B7P'B0hX p g=/>@ϬT=PF|6ϼΞt.g`#D P}<(bZ^^T@ 9vzpAS b}n@8U=PF|FW6U@ '" 9!śO2TB'8J*tu`'B0:{-ANEbM=XU,,,XuBەT@_B6ʕAe$}B0踳6U9rGVٷoߠU@쁴}$h:᭖B0:uAIVj IVg?:(>sss^f:]Jh!tTLDhHE%cj0-˄`Qu&Uu>u*%@GiHmiAt :(:`tIB0*!tЉWT_u;qʾ`UB0:GSߟ* Y'oV^<د"}9Z" H"k+Wy f3 /tٳg /5o}}=9s{ii)o{ HT.~rVJJh!z?84GpmtA" ":}{?o ;ylqqq?Zwկ~o J!t :N+DU`AWTt~666x 'xb(Pl3mj6j8ЬҦc~=}-JzIpK1}#8NTv8qk˃*iU)ƿSK5Mw'T@Gm(kQح Kb,@,*' k՛B,[;9g?^xᅾkDѣG+au*W^&e@M+rEN}NdJ\h7)gGVP M}Zwoя~4}OGyᅬ1B*F-KfL-b; j(D!ܴݿ? ٳ?O<9WqӖU@? ŴBlA A(*ATk> F3<^VBV K!mk|i9֞Ͼk%$sJѼcUߴ-,,\x͢«iqN81ZC Ǩ<Ї>Tsߛֿi'FO! _w**2j@']]-G /0ֿ9xR @`;zB;Dh6BUG.cn;v,(6,ZFťN!nC;DJwfnW3 "؉k筂YaO҂au]FUz+^?g0gccㆿOpmȮt!`K;l;B0Jwէ.UzTLè&.l(zj( =Õz[:OH!wә7oǨ&q*AOZ__V>*"-u@`;z0` j~+*bhя~4=䓭B?"U ?F(ZՉS!3!u"CPWmY{g Br+/] @ԙ:WPD}!_T NQ  Zdڭcoɓ(°}iZ"@r= ĉlcc#OU>enE cGj%IDH{UG -"n[~gǷyViB𫉶 Vj1Q$FZgT>}z3@'맛;UZ".uO ZbZgE!WDΪj]gQ6jXZvDZ =`h---- BQUBTkP,* Ţ,BZ#v]B0ͣ{&mAO<1qY^^~/`aFijXQ(V}bT;vl =Z"@w  1 0+~bU+-`hS;UJkY>[YY~@KD&!?{[yމXV]RRQ{٤fے]Ybm9HPl[   Ĺ.z8((!/Zl!oE\3\d9TѺIg#2i3@!;9s#S3$%!%_>C b'D̺q { اh{UI7V@",iIiS+T0Z!;*<>LIe&09B0aBoT *lX["*&C",B00a|Dfyj pN'U= gϞʪqcc#kmQ+i&@ 1#xg}\b%=:gY_755۫01,*5`P]g}7<7K}J 3 ݭ~MJWi"Z__=r\F&QE?,UEkJ} S?)v $X(EI֔qb/WT>Eu`]g3*/sD%fG5T *svu NC"ܯ}QaU"$sJ7|UfTܶ\N*Ks:`5yVQb+Ĩ* :XcVI{ҹWdVEലRk( "0|B0 !T@y[!F0rT19[/Zʝ{-K)¸$s[ĒQ ` 1rU9- "ԊpkPzb UYI֟VW%%` 1 ƌ*UQbNXIq&Z-nmmeV?ܷD3rlllDmv̫E/ R]ar6-^ʕ+WT/ֵͅ,I8-`0A;wi;</FH2;;;5 [? ?":?,Wz ģDxOQ5WG)U `JڦU ,bgAaQ tz;;R.[ߧr~vh.us`҄`0A%U "ʙqQ]'ZV/E\q1+Gu >յ,sШJO`r`0![Ա abZ+Ga/^znk{g#'(jC5XI'0|B0BɪplnvEE"%׹,0YB0h3W%U`MMnK=VM! ulY%"L &bX%XNN KKKcYϸE{a|QQִJrh{{;u:q.Ԣ-f&GPr0Uiޛw5HS":M[~&4b`Z"@=`osX۫:M!aT5]N[uS=%"L lAJ[`{{{icccz⯛k儡QYX %m-``0f7ۣUb6TE:; s¢WT~#1k+́9|eBqk- bΞ=[K.nzqQ/!Qo4 *q)&3-sB*)mXR  ƨ;N/8NlcrDXI`a `J«6/JmmVWW* IDATF*Rg!kcNV|6"\jo ``0&q0`JJé噙ύub-``0&%%NEJlaaU`}SǖEs f'1)9Rk6Vž|XSu9!X+JZ"F%0>B0:B &rɩ 6J;LNPUuTT %" :q|W-4iQl}}}s`?߂HKD&!AI '99mk J&mzz:MMM \E["FU9"s;%@+`{^ޡwWjPؓl4Mn+6V'g?&+ WqTmSr]*6=9-"B|Rg%goꯤ289!X< fggDz=ə]ⓢ%bSDP-``0B;wi;; VJJ**ٓ\UTmV &*i{Vyu$FE+ē˩gٳ]n. F䠻!X`mۓ\Mn>*Q!`DV*ooo/\pmh%"T Fdf~ФrfffZ7'mjl FI#RrmT=t:lCG+ 7`0RB0VgBz^̬g٫_)Qü]XXS}9!֖kCHXFG#QDJ%%%@!@I3!X=TE?U}Φ}ؽM; ,`;LFU`D\}7SV*, }*OCz=Z sdDU`l馝Z" ` {;sOWL{P 6M̕|`J qzzzs.JK*HurrƲ: lggUW%-|I*B08޸aXNӖlkkks<^Nִ=,>_ ICRrؘ!ɕ6h.XA &!)9C+ĔYŢ"rӦU+^JR-`x`0$ޮ"`aaxZ9Ee[T~*9OC~Q:sssj,)[ WrpT ]U‹hVquV=h9BϺ&`QyŬf WY3խb->'šI^WT;333XsUG\jrZLNMM}:|rR2͹QQ~N8-U @cu>1LVutɩ8Z{^~tʕ,oѻ>MJ?@k|rO]åK&v 9:ibX_IP?S)EF@U-믿b8]p3_ë:n} %rUJB)ф`pJ%u>Ή{ljvw[%Q5L$EIqUQj{m8sΝ 'c<':G*-maT=YSW^M7nHs#JQ8o~: !} b9mX)YlZApB08 'GmaT+‹똞k~{!AQxS3(t{"z{t+++{b\îaRrATj'S(9nz6JhxϟJ£h;xhWloEt5;mEڤ"cYKQTؾU'#y=qݖ,*~qcݹk+h=nkk+5{QPO`N#~sێb_Q5XA)iB08j>I5333yjT ~dz"dəu\TT x뵵C*R6GFQ^IA3^z3քUwƍOB*^û;ʫ{{3&Any?~3#|/|n=vPhG_U>M ǡ8Z"55 sfqr?mkW4 !7ShNNz|>Vq5_y_2u崳m ~BJ*4/ҥK'~:`4!(EݨEX|Vyh9A@8q*dmmā8Q'`hјNMMzn\ݕ\PNJ|1eī]w˩ 5Y\ TDŽ`Pd6O[EǙϋU]iqYy`n ,}\UYs'! lq{/:&B<(~rBWWcYO,vϟoGT@ !d*9nrX_TT]|x>VI00NqqocU]-,,l9h S4 J St[Z^gn87==}E|sB߮5jo3&D ݲVm9䏊9YUWT2^0UE]}m{YAIM-  2TU(DUQnkxU"^zi *3}{*6}*ٓ[Z"@.!dXnQ'vvP>>^ 2=8J1*/*Õ̙۾U `8pΜm RAznI00W`?*u ÞUms_QQ5Õ2/74)i8jd=X_;|ᇕag9nުWiY`0@Ɂs+]r[nmme"T*rîᤢ +W{vvv0%?;:ރ vWc{ܷޫԚ(wf#tMias߿?Ԡn" Fnܸuŋ h89\%Iʽtٌ6qQ7Hwn ̶j]xl:s~>wi't11bNn,}\1[9Hk{{cTG￟զ0G\ma0DɫWnĵ߾}q Clƽ  KKDLhX"#B8􏪢A5;r/*B"\V׾vx뭷z]Go`t%;ZZx7}_(٫YVB0ݢ,WIG>Vbmoog[]HExmל4j(B\~ə]\wanvZF[7~rr?st8 s;hêj>mUE9AM?;n>a_SRFYΚ-\QvΝv0+»f0؅ ?mQrT`pU`'W>Eu0+p2r`NXBW\)^O?4ʮiڛ\Xkf 6)YzoKG(9`6IDГ+55j-֚;,;XSNNU($Wixi Gt{`p?>`*VI"PfQ- cR_FV[sc4.^[[L~o ?$`98^ϓUh!b*aP$‘\XE5LMe"`vQbW?ʝ]5/X_Tş魷ꭵo17mRKub)!̽|[<{oW5uf#_ݯsƭ$̦p':L*QTvvP>C;;;J{Um G|<ٯu{OWŴo']V VX*l]VQ!IIEXMQ);əX!*Jk#MV|DbUh!ƜVü똃7QeR}F~i;ttYv!Nr_gô{/?o߾fHJ išWlj`[ g933sh5bTr:{/oϟK_=cZFvOUta0v;,DuST,E@4j!,댪3V0^gu[E2~﹟k_L0X ኊd/U֟UExI硍S1Z &h.Ms !|l|<1#,B?UEof/;M%`iw}_T:Q  RJ;wi;8B G_vƍ`"9pqXPTF+qَ_BvJ۪OmĄ]zubaXoijz'ïK.9ۺ@ X6~~2 ,xh ;Qv0 GbgN!*Z@{Z! &'*bN{ͰF3{zhq "f1¯&bIe+4S.m~3 d6M7Ѫh{{{{G4ƟU _+B5_S?qֲֽy32'oVwPFpG]cbf=.\Ⱥ[zӟovB0Z-7`Նʽh55FֵF/6o`q`>4J0K+9(6 A%?4J0Z-Z 477M@ >:Asq爃8wZ%%RR%"m'J *RgffV{:w%`8sā =;;;ieeW3gxߏϋ0T^˟{M#JKi C_~tŴyuഊJUpB>ѮE s%a0 ٹMۙSSSZ!T :/_f۷S(SX2D@딴 W_=q _OmjNh.ush'!Sr ,kh_pʕgL{DJ&*UsDkT@h]"[[[5ollZ (UB@̯+CM"닶>o* >B0Zey`<)fsM* ,8U3j֣^%, ֈ !Y__檂VVV"LOO*&8Xf-pUҥKigg+4 (96 ~5֣-"JB_&%Bl͉;VM# sssY0~ h!y;ʳ> m, GI`, %Z5_TEUն*2S @h_.Q:33{Թӵ ۹戃y۫,ENE%TmV @;h*dnU+++XUWkG' 6;Z!1N^&#~vMMMen{F|B0-z7n'666jZ"M`4 F+9]\\ahVUu: +d]/+!VrЫ & Q3,*dD`4m!?Q0\0rLOO*u$,|]\fl :( %" 'ҒmhIXuPL:z]qQ[Oۡ I4l57s?ch& @#mv|Łq4AIe4 F\gϞ!gm!%JB0&O333YW{:w?$q@7vsAq4Iɜk4 )9-9(bA\Y!Sr+kZ^RZ" ٹ qjj*={GEfggkyu oUB0ڦ\I%-ԁ (i\{-..jƑ+ 6;B0)XeEqzo~!?^KG>?N?՟Fg֣ r̽|[<{oW5uf#_ݯ8}~n^z.+_\Wәo[~_e޾}[;DN%B?~*-gl8@|/|-{eh]D{ /U{ϝ( u{ig{#Z! 8`PuB0-%>֋r; > IDAT_Y/ÿ~N$D&`9I G>?~S>{mb~a333iii]Ԣ1 sy @SC2.]: j~?|ǭJ*kh !jfEuֳDz1+ߝ}G*0h.XE*!Z]]MSSSsb=+++c]7;;1*kN@iiĬª$ ϻ  ZsPiE7|W_vt{}24 [@@9e`Pw_돾x?]Vu 2JQa833Z"B0jK+D)"...^? F7bmnn3(Ec.ԝ 8e{a?~ҽpl诿ٿ󴰰 clJm3zP[MRGz/~+3R!>/_N/ bq*i{:w`Rmoկ~Ս&ۥKҍ7za;O{aIgEEY|_׍7I(izE):*9=c={6" K{/O‹iN?/=gޟi?Gaﯧ[oW\z]u z[Oۆɝ/a۷S,BavOo?~Ѩ׈AŤ["\I%WϸoԎ ٹMۙsjbJ&s*\E@/=77fgg{d$Z"VEʯ;>~W,y; ,מ1TnEUYI%"uSu~+y_Db̨-F@ld`[D5XP+%~8BIz#@VﺹG,%n{`'!~3A+DZ"^`FB Ξ=}PB0jٗXJ*6`Ԍ `bkFE.ԅ Z `4D`ԂVQ1"@@-h0in.gTj@]VϺD`T^I+o?pC ---eԅ `L{DenN`TZj0zΝ~k>!{Ъ"锵D4 Pig_qN%T ʊ8h_ϻ%"M"JXoȍ8%-h!{zn>S%"M"BgfT J*9XşM`Z"PeO;TQI+g>sxvůjpD{ @rJZ!> YI5T Y` 4v6o#-[oɕDܹM_jQ @A&M5u'RrR `]7 P)B~KQP%B0*|ni@ V?rFLKDL@eB`D΄`TVդ%"u%B&-+!"@5i@] 8MKDHiPmZ"PGB0&N+Dj:0QZ!ԃԍ Dn`LTAk$i@V?FLԉ = }1oj0iZ"P'B0&fw@KDDD=HiV^f@uh@]ymK@Uh@]*0EKDBؕB\P5Z"PB0.Sy-!c q5JB0-!cU y'7R,..fm-?!c"@3&!c"@3`Q0NOm[Oۅ1(i(^KčuF%/|]`lT06c3 @i@ f`s @Dܹu_!cw? D̵~S5#`,r>Bjy0 B0"S+Dz) "XKDEi\Z"PUB0F.sZ!ԑT =\zͽ#-"!#U QPOZ"PEB0F*3fiP_Z"P5B0F*ܜPgZ"P5B0Fs')7J[" 02"\0FMȔ%Tu*`T`Dl q<0( [1 '!`$r6^MRi(  !@;D0*B0.4~S{4MQ5-!!`r4{&Z^^ξ0B0.@sL֕E尖 m8f^M%"&`r2^M}uZ"0 B0fAJk2hҖa  MBhj0-2!C{ynn<{~<~t̙O^IqQEPFn"@+Φ|0 aDG>ȓ{pZJSTI0+͟@{;w.Zxnקpjq`}g)-i-ײ~S5!r,"Sq`  S=SIKĨ,ap*g_Li%{ Fiqq1WQ  8V߰mVR f. Tr[!m!TNF1GBd툖 b 8L˵򎖈 bS<0xE-` "{$翜K'D?P$8D)i[  ({i)iqQJȶs;$Z!μ`_8ZD!-(% ͼ*0QR z]@!r B G\;^E2s;$01;;j`%w˹.PT ۖ \9!)7 MMMe}n#`W*oxɍv?yP E`ׄ`0%U`@YKď `B0kB}1W%"L Dۼ|]A*0N,Bvp뙭n8ݸ(5 8R0nu`Sy>prinn.oxB0z-3K8R欼c.pk~ٔΛ`sGKD&PGA` tZ\\~1`Gr !;w\ p4!O{w8^ ODz﫼_}`dF^k+ Q*[yGKD$SV mSZoN\{Թ%"&GA"lZ\\~`8D6??UKD`dKB0&$fZ%"?& |wp6^W_I{6;0`׹MwOY2==]5I;M| *m @ ZlAAṹI`rΝ;fffaT NbA )`"˵~KKDw)ͽT=:k?z:s'ꧤ%ƭGi4 `pAU`T|^&hw^ :JVifB0hs)M=@u`i#h+!@ |nU`TtZ\\^j0U`Ϧ?vƦlGi{FB0ٹMV*0ܹsiff&{u9-h!@.' ,;B06Ll4Oյ{Թu7ZF"7 M ٫T >B0Y>pٔ E(=pWD;wiV?@=D655s1h!@K-}'(]`-zmpعf^>s17;}@3Z` KfP3iaa!{9@3Z`O/8@D\Q{& vvƭ ljj*{s27!@-}'*[yGB09;7 > rcNfl04XT?|}?S@ΦkX&h:!@^|7bJ P%-׮ԛ @s *2Z=-'fljj*ZrfPOB0tu)-@sل` Css)M=@sDܽU `B0l MW3?z4wVWR{yJb~fy` ӹMw@S;w.d_]MG0+ >ț}1B0lyy9⢍pY` xq7H[y'7R@D99^` xq77umzz:?>Dh!@^4b lw+i{6;= @}<^zйM[wmiinn.moogGh e+ nt^qhjk B08Dmz`M!hwzE̾y!-AXjA1dU`UIK{ݴ1c\mANNiaa!U4 VUM=N^%-7n=J;w &Νnh٤"m!T.?!@;+2TEaMV}?& ͟.l&' iTsxim0:#oqS9tFwwwص+S{9!@H!D jnã0z!X\TI B! P?`u`?W[!pPu_W~a +Oqq;' W" P`mݟ a;IX7ةpeҨNaqH8Żi;~w~ 400P<_6?dX[`BSJ_s `P׍{Z)X\TI Booo21‰ӺLQCoVZǃ!z @-twwo)͍mV ⚥Jt@m~{Ka8Ew'+CAX\TzzzBPHV.!@.0XR'#ȘV)W*'SeI1io`T vԌ`$Ȑx lB[`mj 6@2$zvK#:t({zs&\t,Y"ȈF/`9Tqb*eڹd #t>B'ˡ3tuu~OF`!Ȁx ,W,m0ld@[`ݟr [wwwշ! K} +N VB`Co @ Bv `+nY2@rJz{{3B0re2]y.0 ` 6]*/';,p V[`PB0ԧx=cB0"Xfn@}Oޙ ã8 ,3`V `oƍUNv ` [`ɴXfB0eGfЛ",'!2Is Sny/X,`&Xn@ mB0xchd=B[`]rPO.!s m0,1 &!r m0l,!1 =B0%4#` H[`Ϻy6@@[`_5}ȋjo `K -y6@vjgCn@#ZH7`KCPci^ *ejKPCرSC-! | W(UԖ ++|:Y_q Ƅ`52|844ݣܝ.nnߚdG[`C~/vȑzeONCo΄w0`lmܸ1W5 k/O?j:t鑿r2Zn}C2{9:Fi=Q9CB8YiM}0;`VN\X,S7nm@(mʞ0s4{wO^[~ҚD 6]1 (/EFU@\n ?*m#K)~@M vxbC3_ ]]]T4^@#MS:=.T:jw"f]>~㚺ns"#J|V8e'l2ܬ, S?.FqcݥuT:ą\\,oxvlc[`@zС+E{f^yVBEx;_.}@N_ >@z=?vjȧX+{J_.KVo*}@B:}a?Co΄ws[?lܸ1Wwz6Qn~ֈN%z\hy0#:bF\?TBקx^tttb7n w˭γt-7U z"Vn@.n*~M v lXX?@~ml a/08===+(^  ^k.k X}a.~`2sU{lp@=vj{pmI,رȇm!j3:t(ۊ+6ԃ17/;^[̖l?cP500S͓Go;ȨmaG]qzlXt'Pz S_`5)n͋=.Ŭ! }a  %:0`i`B!.^  +~l H+MTwJԃPxQKghhL}g։ _G<+#`?!b!^ (@6un z?T`ڣ^S~#!Xľ@ r( ;"r~3HQ(/O }a _a; `ytttk3⯭EvR{>.!XO1Lŷ\rvJ'ZD`L701UF @Lc^ue2TA_~/Epm'57ޯ~?B:?b_XD + a`Twww8|pUo2~_,޵SۓmzMӥGʢ:r+;Z?>[V|vַԏ+WP,S?]N|_G ,Tvt):`u*H}ay@N|et|t0`mܸ1 U(NZDX-l&s0OXf "@ĵYޯ_9!ˉ  &.}__ Pomԏ:'whdO?V!X\{k/5Y" TE{_G<[!"_`9?Q'aw/,~g "PCWWWUZD+{J_ʫKl!=AhٰzvhlW-ٿoWu7ȉ/011 znu4X'7]{k_` JE~O_*'ȅ";5GEq80~ O :\cAk0m-" g)|/0s@A~)>.4Y߶_ZZ]N|߫ȿGvcQݺTb U|s,/EׯԅE<ۥ @ޯhF&kp @i;}T &??7pƂ"𖵈[o^<&a/`0b@ȋfJt.'A|5kPhooO㋄z[<ԓ,~ hz'wX PĢ@ğW>OAWq-bVۥ @=_w߼S;  oR_XfUnj"K/ztcޯkomw~ܕ{}a/ |d:Uaο?XP]uM`ԕƋ{_@b` _X ޜ N_ kO8K wZx#|EY{&~U~_vC!!X̸1^-AX8V4'2'˷2?ٶ-[ VϬ޲&ܷqy?􆰡ѧ@g>FGG??~l>AdORN@o)fgVOK 8ZadD\}f5-_q¦͛\S.^~?w#uK!xoc|au%s{ ϟ 7{'uR<`xc˟W5z\!u406Vҟ5/ݡ K!=֎ `$W[GG!ؼ/l(er+;/ eQ; rJ@Zh?Ϗ ݝDj.nkI_>N& NŠ{ } +ofN,)=`~'.`f.z0*y9J]|Zyx+E}a`M vC4=`0hTM|W?{&yGiKzVuwwXwP4+=`塞;s:DXP?: 5@M\?Xz|/҈_Blx30/Jb_X끱R_,TW7CRЁk^`9%pRVm?6ޯxk}è_sExt4 ;/rb?<X\-8ȳP? {&~Uz-Pl^,"K^F9/,~}a 5t]O+)=~^z[Fz}gwn̨d῏_[GjZ^x7ㅾ/P8z\iE0zs&=olE:Ĝ>&a'Oj([ bW  /% 0L_ež τ{5(;Uִz_ռ rJsʕR?XX] {.n W.ákp)G1K^"rb_G<[/ ;o`ñ_I(6nN8Q=vjtKՇ'{_ƑXUXXl^R`6c_X/ 9KcE9~g“4oG?5Zh quT2~ײ%]x7I_XL/;- h`݇o ?\vo?]!YS!=-57Ǜ ō^WK~QQ1 "-{6/ 7 +7Ogºg>ph ӥU;|p|(Bh{?>(uwwݮǛ27x}g=$X5xt`b!ؼB_,?/ ZB~)l8x&)|rnPKп%,?`h)]bW\}xN4%eὬx悰Ivx ~{0̊D|%c olp%,?`P'NTvU8 SNmSo |5xt8 D6oיueU}aϟsF9rp{>U}=o^ zCCC^znuȑ5y|w~cb~ eiR z0L_ež ehqɠ;on{?jd`$G_ \3%itûd6oW$ v~Vo4+:t׳_7~9YВ `zzzɓ'5K0z{Vt~q2ie: sAXGv(kw"Q_@2Baߓυ>dKOB0X+W066Vԟ S 3[%W&z|6oםa(+m8x&Ad\V -!,N :::DoGֆUN bW n]ꨨd"IM6oכYHYk0><%B|⍎nU0xSȠGiI~Sw!X 6&]a/eq }㬾0t8vjZ,rK1<<O9t<$Ȓx[;쵵΅JFqUN]`󒾰x+l6YW$;Z/ `O5?C=``$ /BooMaHs0VڍzH_*CyI_X veU7O XAi7yU=P!X> Ë/Xmᣟ>4-ţyV.Byㅾ"q@_~)l8x&)|hV(X j7;v+X~knv'|~,/,a3pȸ{+aV$,,`A[B0X0vj{zc/I¯ѼM+w!ؼ/l(ЕGDV5,4ض5m>t`E -!,pɪ߾ `i<5&5xt(m6oדȪ᛿ ;.9#Jjolrx,`t\XCP{kgτoo3]*Hr<>܇` }qEb0*}aWaIXz ,` }gm|),B[}Idb#LaBn!Z](HP+!$`$'!,/011Qjo a͂0~00sd, iR /u=¨$m8x&Y7;R`%1::Z&X+_.l5q*H¯~Ӑ!ؼB_o 5|PA`A[B0X> jĞ@qӏiH{i,~Eb {)[gE=`A[B0X^ ¢WN .ͯkoĪF*Cyㅾ/l6YW$;Z/ 1labD8g ,`au0y|/(&WC~#/,a2Ȝ՛B /m'‘?q I+CxMՇzH!;xt>Nvㅾ/=XR) 4oު,rK+gAءVq@É_s;|8+!XI_X gA6 `-6xS)|iup=zH#~ l)2ʄ`U/$}ayЬvLXȕ+!tީC=`A[B0Ȇa!ݹʉ2s-LpmJ&|b` 0^+QI [ 7OPFߙ-U W5ŰgCu`$AX8v؂ӫvMj Խ5p$ËU!%}a1u=TOeղBiE0^LO`ۚplÓ35Ҭ' ȌgVB%$`K( aخ>Ijb~ _ܮt_t^* 2ktt-!'~6twZpn_1=4#[I_X@neQnT ˭3dHSB0ȴ+W'O.aZkObޯ'[&I_@pC賠}J+ժח+?YIB@/[aB]?Xz|/KVex3 QV k=0Z`P@Ej_\ rJu-G bqQOí0ku0 IDATօF%I5dR!ˠB_o|0*Yȇ W&C| ^\E?x+l߬ ï5xYW6 2*Yد/4=AhY}a ?Kn-,`ZK76~uVF|UFޯex#YgAeΕV$ u׳'/n_w')!pVĢVakSwkBw*$n&3[}PI1 ~eN0lWςb_؆g4jWn[ng!XN wzpɚ<[H~F|AӪB: ZH%k_ ϟ `0tJ\*CSB0ȭ'N°Z =W$AՇ1EJb}ޯ"CI_@\[賠{+a ތLޯ]aϺ|4T )!ZoE// (~MKi$רi!XK†B]> ʋ+[/a@z0:j_v) rJ ַ[C a[W"b~0%ˁ/,a](kvLXu(_O3VVISB0hKq+,a/K} A>zHc"$ rdW$ YAXHgå/FSB0h86„aPbÝN4bыBIbJ}gAe-.V$64-  R6009Rӧð&1a$B6]?Xz|/KVVrjיa(+ -]0(Ȉ7gJWίhۚw>fZ hVXMǠ3 '~Mtd" L*`97^7QI _ VN^Xo~} VCB$app+C]]ZPQ _S O?jޯ5dEb0Xg͓gKױS3U3 H\x;vlIFչ*>Jo,ޯko4N&~5!X/u$QQs`i8}; '+,`'adddIF{ͰxC̪DX7n+>jTR !j,B4^Nv5,(/m8x&IA ĕM7oW0ڳ'_DM}``pfRaV%BM7w[.[GMX6hE"~)>N_,RUÐ_מu2 Ɖ'J7Êm~Ua0qa ~ґ$ՠ` }T~{0L_T6lzs[_×+¯?^+^ B 144TaXtա糫BW 5._1~VcQ $-Η0.z.ZȺpGW _llps X %c z*twrf 7o W_٣4Iu´B0>) aXp(gvLXDCW5tj&Sc'sһ`$ʕ+a1 [aQakSٻK F>zH#ZapKiq'!w5^+QI pLXSЬhYu8/<憧[0#`$ji$i~eb ŠQޯՇ;9iKz.$㞒)QI˾ M-7͊\] |חZ kO@!X> 0<<\ FFFm!!F]?Xz|/I¯a^`T4^L0}aZ.u/ۥ+>1g4|6!X> ðx;رc>]7.6<_w79O*HzLJ`6^I056|y%nnki TƗ>I,;⯉΅a1|ɹS7bϞ 7Hi$+_(+_՛' ̹2~kv{g>ܭ z XnfX?i~ubwgS)xH(F~]{kՇq2XB_Gܷ/4Z+HJ7Fy6ga~IͰXI1_Y{^f.:*)z~PB0eםHe6<}לXko.ZLםW5=|,`JO8Q 2s]1KncL7w[9ݲ[GMQㅾ$ "N_U_k8N juk|W {!5!X> XL>J"k!5 śK9qEb˾¸mkJןB8lPB| CX#[W"b~0-jMƒIbV0eYy"q3S2悮[i+ oxɺK?%B|f~eb%@l^akSx1V Ȟh 5ef.~/҈`KEƒ/¨dKa3aMCʩ;ov_1 c,ݿEKoy^`$hi]b!J[C|bU) x(cy{VnoQαy`,/l0pĩ$0̊4 f׼x=`S>xM[ngMm Iŕ+WJaXM,'T6ws,ԧ WE#I5lZ,!jיa(++=7(u-OxQkB|y&MLLίVA+nܐ˜5y|w~S&ޯ!b9 Xㅾ$ FY/l7w\2e0nEW-FvZǵmM[)zAKFOB0QՉHnuWN1bɍA6Ϻ{&LFx,Id X1Ɋ䗾0ʊ}aWaI >)vq\!J W%{?Z@)ڶfC߿93 hT1tS,eydc:ک[;>$/_!+n #J+6nbVR}~ 7UJW{S\o@JOB0913NB]ɀz> ~M3\nٲƒK+NFf0L_ež τ{]A5uXǐU]1ܚf&'!N;{??vWrpl~7ڒmᣟ>4U-ţE=QW }qE‾0*YRpLXSpIg`iz)llk ݝsw>ѴžI>gG>m`]lۥGߙ W&CabT7bK>ȕc/_ !u' AaG%/,aK"Q65'Vm!t>:j '?Ԕ K` W0pp͆%? K];=LKiޯ-ţæEQ }I_XS|N΅ZQgVWݟ&܍.Bons=6Gs̑ qZKޯjvɥb~ 9^7^I0}a6|yǥ C;\|E#@}90, P3ag-C5g{/'_VRׄ`x/HF%/o*z|w~Qs'#I{ㅾIWKNJ=AhY}a@]7#^[d$ЫF a$}aV~N9qEb˾ Lqv[<[iޯEpBSSs̙>zH#~ n)0-5^+Q†gš‡f5p c/_4$! - AF^) "X NmSoEk`KiȄ`0u&}a]A9}aqM"ry~kkMM%$2)njz0L_ež ehqɠ%{&L6皃V . }qEb0*}aWaIj"~ͭ>iq2hZqB0/,zQI˾ Ÿ~00sd, ~=B_wܣ/Jb_؆gPï[L$/@)z0L_eyv0G 4ğQt`PdEb {ܨdoUuaZB/ `ב7?ʉ+[-]8-L߭4I @0l9RSs\n Y_{, j`ן􅵛'~)l8x&)|hN b펜4J_xB0/,a͔J}Yv0._1_!K†B]fK9}aqM"/7o W_٣4b!ӂ/$}a3vLXu.~MpmJ&Z}KCKlW$ v~Vo4+3knNGG'ՇM  A_qȼe߅ҊD}aP~,\=[i%װi2/u¨$ -]0+Ȩ5pVGD%I  Vx7 Q֚G>,5dP+_O?HHHY/X~B0X!Ɋ3uOھqV_~]{kՇ1B+G+lבwTz\iE0X^7n w˭&O%$+L0l3՛B,pn_{d 2fן;YR) 7{9Y}tIX>" JbvP X5r0._1_# K†B]ΉrZn}K7y~k_{d x' +8/ʉ}amτu|`N0gL$?OudW$ v~Vo4+5pƱEӂ :WrvTҲBiE0 WEcIװiA}A/u&a0ʊ}aBs {^n]裠$uJuna 5|X kqɬh8+_O?Ie_P`Ɋ~}aB7 a[;>$B/!x#Y߹RIsȫgVgL%$9"/u'a.K97Oυ{5'rc)\=[iޯ-ţ#KH/0}aԳkͥK)"z `sI_@ᰳ{+a7NmSoE#I5jZoB0hI_P˙SNS˭в|) y~k_QL¯A & aXSN k;x&{s sf.~/ҘH*C>"5^+QI pLXSЬXqknNAk`KEӂ#Wq@%-.V$64+Vӏw"diA1 L0}aZ.&~MdT2_C&/¨$m/CKf_S O?jȤ/NB0cHco 7O53uVHWIB0 }Ɋ&D%ΕV$ `nVZ}8s,_ze0lIQN pLX]sjMօG%k`KITA+dKsH%>/R:k<~iԒaS{+a5._1_% LrĖ}Ka̻y~k_QL¯TK,Xð)RS \n Y_{PB0` }ɚD}a6<>4({Vt{^4-`1`@M$}a!C&J%-.V$64+~pn_1_æԂ B_g z`,g}az\iE0{˭NJ!^_rfםHe6<}ל2l)\=;ܺ1QIR<:hRrnכaV$R֚G> m Ș0_zHH XwdBd@\{FޯQV XQI_PIPN\ز^0rp=zH_'L XIB0 CBL@$8Ԏp1`)0k],%n@<dV^J9uIvWxqkcn4)vf%4+%,0{a4)Y;_ߟ /9.6#-)N$NP C_[?{#NR/%(Rdxg^U^~\?h/ ݯۧjt(S !LZ hIEO~ IDAT'xA{a㋡{쉜Bojhx<$ݯ̴VZZ s"}"/ :V<}˯텓~ f*~-K ?]{z<txͧӇvF _]4O$\XWKCa/~QEܑ.`@I{a!HfDѳվ۷5=Xn`@*7N *n Yv]49{a_~m o0eCv oUEHyQ ;2"(VID7~t%@I''QϞ kވӇT1By`i/^~\?h/ 17vV{ejС`.2LgAc]=DR7ca}nUݯ̴: 6D"e0{a߷Ӈ[/ ?K{aS!kL?D"{f_T1ʯJ0G !|:\XPX _TQKiWJ0Q eX&%{a}r߾UNO`*„/> Vdt"4){qD0Ý^NZ*~`"ϧ2Ti$/'rhcojhx<)wk3Ӓ`(ui/lډD^ W h3a,~QѭxfM H{aS!kLzf/ ݯT~={A J{a!sx"R Z }a*j#- }bIF6Cb8zZj_~QZ:?!A */- Sd FCq/岠Pژ o~=(ʯYIpP`"'Y {a{a_ `ݯۧIS _4%M!HFG?{z6ں7!UM_)(h*EH_ TvwV{KZaF @S*| ;h$/'rojhx<$Nݯ̴hFJ0Z v"2WBS<}/_Tt3`vhZJ0ޟ®{-t=aۼ}Uͧݯ)hi/l6pΫH(Vz&/Z ¦CW$e^X,ÜH:©>7j*Su_Z2%mOSf/^XG0Nw6FÛ_yoʬݯYI`"/2^ Ž GFV*nH v/{a4>5!(-ݯֽnmG @*Dco/*z/?HytY&~Δ`"ϧg6Ľ=DN@KyS;F!G55XN @(|"aN$СO^E{a@ӋcpcQT#(( )=4^yl/ hJ/*O_@'QБ^l'{., ƫvʯ;)hi/,aYgAc]G>p vVo~n*`mfJZt2%-){a9<á셬}wޞ>8UJ_Ϥ@S@¦CWeB0'p*ύT~ݗ(T {Q}/ `7ݯcrL-_PdT OȊw6?^r\Tq#mGZ`P6i/2q/o~]Iq8lq*G)"O#¨D{a@qk鰳+/0a ޟ >P _]%=@zxؘEuGZa`K^l!)t l+@Y }/_Ӄ)iP.++D *{\XHe{Tؼ}:z45z<hg/v"~MfG Mx"q^ep({!+xOqaRŭ/hRJ0hri/,a׼eǞ0'l868c*`m澴)EYi ;h䏽x&hP}ͯ$EZ*f%A -K FCq/‘A_ݯφ e?O;}E -x"q^e^6d@Nj_oO(nz&/h=J0hai/,ԫޑ2='ЩNQ_T(_v) Y~> L (|,+:Fڼ}:^YK/hJ0h#EO2^ E_@[_^8顩fgi_>`f҉ĸvRgCWvbuoC!L6UdvSD{aѰ-)SK/hSJ0hsi/,ag5t l+C9rԎQ_Twk3SQdd 7+2^#[t/ J0 i/,a׼;eǞE'hZ[?T~=t%t6B8i䣞סR0hw_55Xt%t"/Ľps9p`vVg_{ʬΝv: eX<8i/2q/o~]`ݯO *ӇϤM ԥeJ2='׶NQ_T(_%WE (z/? =_. ]w6oKT~ [EO2^ E}/Ȋ`q+_ 'I7? GDb .%yꁽ0[,8}H! _@#J0T'^EiQD{aT17vV{eFZ*~`@ei/,agF#]=DN7ca}nUݯ̔`;+|2Kz4rxx^ ~:N˅*nvwK E5 R{i_t"p/OCiC |6B8'Iu蹰T2 ji(/뎴vE eX&Q{a}r;}a/=7eӇnPx"q^e^6dІӇ=/Uݯ3iE 캴URrD0p*ύG}iM "?Me0{a/hx1HT~J +J0`Y> L pddEV-$~m>Nz6a J0`_¨gCW4?v8}H! _~QDESD{aӇ;^2T~ 8EOe/@#q/|1t=@xS;F!A55Xp`J{aN$RJ^cen3v\ !\{Z/쟸_T1ʯ hi/l6pΫH/* 0_@[Pm%MyYt=a€vw68c*`m桴vR !4O$\XaPX _TQKiH ˰KHf_ G>ВvVo~n*`mfJZ@;St&^ Žp({!(%ݯֽۧaF3iN t6B)bQ+YMk{TXE/-S(Sd FCq/岠w6FÛ_yʬݯYIF t"/2^ Ž GFV*nH DJ0/{a4>5!(`_ݯֽnє`o˰q'^U^~\?h/ qk鰳+oBvRIӉ3r?Ǟ ojhx<$`ݯ̴ oY>0'i'/B0`Ӈ^8)T 0_ .#t= W >H<{hC=%@6B8'+'{., ٫vʯ;hL PQ eX&3جH_`OSf/^XG03ݯc~(vf%`K FCq/‘AAL*nIDAT_ߟ /9ɩF> `/{a4>5!(hSqkuoCK`q^U^~\?h/ Kڼ}:zYBvv `Y~>H<#c{a㋡{쉜Ž s򐧤LLK `o(X s"}"/ Ok{磊`O{aMc{a"C_T4NJ0}WaN#Dbυ%{a^>86F~QEܑQ6BO#]AY }/_Ӄ)i?%*5!+ZFz{Gi뙴ڛ CY~"]鹰\?h/fp*l>vV{e@Pt"ϧ2^ Ž=DP4˯C2ka`T C}h q+_ '=U܌]g 3):XN[aPgCWq`buo$u#Pf>0a )c/l:pQ8|l/}17j*j bvF,45z<ƞzS;F~QELI ?(EOIF0{a߷Ӈr[/J J{a&!t= N$~?Ez(- ^lᜤh䣞סR0xWw_55X(Ki/,Ľps9Qjg/l|6帰({hCPN,''Q&}ϡk`CViPtJ ;K{aO_ez.,O$  ܨ/xʯ])xoE,(z/? =_.˪ݯۧNrk{S,He0:ɋ^ؑAu˯텓܌b `Wq+D)s:@,8}H! _%i/d){qD17vV{;= Re ]`OX0t l+C97ca}nUݯ̔ J0Ti/_+2^Xk}H:}8QPͭxB{I K{a&mt= N$g~Q|*J `ߤ9G=Cυa4WKCa/~QEJ `Y~)eҧ7}.&6?^rӣZNߔ`"I{a{asؐ_oO x'wgb (8Pi/,~%pKPrDp*ύG/- Pdc/2q/岬Aژ o~=|T~)(h*EO2^ E}/Ȋ@ڼ}:l/l7 _4%M'HFG?{z`/luoC!L)hZEH{aez/?H^>8^?}۪ Z*~д`4˰3^F^Xb{"wv,ύC-3s``mf`6D"e+텕cen3vhJ0ZJ !\r{Z/_T1ʯ(hIi/l6p H/*)Xi/l*poeǞ˰ _?Ez(-: fC:= '{.,˰ji(/뎴tJ0H^X,2Hf_ G>ӜvVo~n*҉i-%ED⤽0Ľp({YݯO{_SgWWC ଶNQ_T1ʯ,4ah(^~z\~hx1ASf-~J  *(R*P pddRPqk/K7~@5J0(HFq/o~]?ֽnTwTdc/*z/?H^Xڼ}:ʐ2„/xwJ0xOEO'ȐF^Xb8F!yQ&~M f%G T9솛@ O{a P`^l\j#0=J0i/,a|wk3Sݧ=Tdy<8e/ [i``o(`UYCǛON I埦2^t5`(`Y~)a 3H_`p҉0hOw_v(Y~" R !L&Pdt"wk3Ӟ H s"ZT&&󧽰|z(I9oMʯ; \ eXݯ̔Qdy<8e/ ̭@S@ I{a! |*Z ZP埦2^Z*fe G -K F\5) Y&o>%"Om_|(%k^X`J{ak&(_%%/,?wuT~ WEO2^X`K'Vu鷵„/ (Sdv+Z*~F ˰3^ݯT<%4,L{a*-V_;=(fCPS_SNhNJ0Y~)e^)vhfJ0%YO$N ;Pwg"`@H{a+^m_=J{Z h9E{TZ*~-G "'Rf/l݌Z hiDb %w|a`@[(D EK-_vJ e/[IL P hKEOyt+#%ж^X,®y2ʯM3*%^l\v-_wgSJ0cXeki'mCS(Sdy<8!{aqkj6 ~}:R _G]mӿG&Y hEʰv [Kl,F  g[|//`I:8ق{a! _G EH'/6y6T~ %?(|*4YFqkj63? @SRH{aMr"VVKP =Xe{Z:8!S|"] _Sg(>P _m]G}oa`OS{akC ˊ, +ݰ`{ Hl6B7`{D/_{H ,?B8a `ɻUJkXwIENDB`bayestestR/man/figures/unnamed-chunk-14-1.png0000644000176200001440000017260013607554753020466 0ustar liggesusersPNG  IHDR .PLTE:f:::f:f333::f:::::::f:ff:f:f:::MMMMMnMMMnnMnMnMff:f::f:ff:ff:ffffffffffnMMnMnnMnnnnnnnnnMMnMnnnȎȎ:f:ffffېnMnȫff::f۶ې۶ȎMȎnȫnȫȫې:ېf۶f۶۶۶ې۶n䫎ȎȫcfȎې۶F pHYsE4E4Ƶ IDATx_u% @"WMIt7PP FT2Ya# @p?}O":~/8Iۤm6{tM:&`oIۤm6{tM:&`oIۤl7oOݟ/~MQ~3 OR%5~KKԤl7O'+Irb8I_{Mq?#y'7w#4?ç_ǻgȧC?W'"&`>?+"wgF֤l^o^q_#tۻ/_Sf[otۻn_y"pT/\!;"pC~oQL:~>BksU)]M:~/?vk 3zi<>WL:>N\} ?~ZL:v~:_x e- &`3_[~}oG!뱖~uׇ\n4>v'_hK?Bc- &`#WN+Bd- &`o.N^uQ%pl/MF!u&` ާzT?WD_(DΤlỻ>O~;Y/]t ܽ@2)|x5dU@Ioa:ԙtۻ/obɪ:p{߾yw~u1Q@I?rمg*DΤ܇<yï?:psOBB!o)?Q@I:PO!u&BL:@=ԙtz 3"Pg)DΤS@IXKIXKIXKIXKIXKIXKI:PO!u&BL:@=ԙtz 3"Pg)DΤS@I:PO!u&BL:@=ԙtz 3"Pg)DΤS@I:PO!u&Bc- &Bc- &Bc- &Bc- &Bc- &Bc- &BL:@=ԙtz 3"Pg)DΤS@I:PO!u&BL:@=ԙtz 3"Pg)DΤS@I:PO!u&BL:@=ԙtz X.tz X.tz X.tz X.tz X.tz X.tz 3"Pg)DΤS@I:PO!u&BL:@=ԙtz 3"Pg)DΤS@I:PO!u&BL:@=ԙtz 3"Pg)D`=`)D`=`)D`=`)D`=`)D`=`)D`=`)DΤS@Ijt*`kPO!nB"pPO!eiB"ptPO!M߆(E&BZ щL:@=,M^g)D`mmC"pPO!ڤ щL:@=j:D'&BִyfPO! ڧ Q@IXDԤSbCT"h)D`%6D%&BCt"f)D`6G\m)D`=(]|+M:@=Zk/? PO!뱖p6\6"k .?TK&Bc-Lǣ IXK8t.N+ͤSgҵy<tz L82PO!.N-ˤS'JҏL:@=|.N/=d)Dtpz&PO!4"pI;2Ո&BKW1"NNzƤS;"҃-&BC(1"YJzäS>d-q PO! *L:@= h0"^H@IXKWM:@=Z®ҧ?IOo)D`=v>yPz,`yPO!뱖#҃tz X~ XܤSz%(}h&B3KBzH`]PO!IO}X֤SpNC+V5"RjQEM:@=^zV`MPO!o|#=-I>ϓXѤSp>JO ,h)D8uϳG4"N3tz &}L:@=g>yr&Bc-aS˞I f)D`=yb&Bc-aK鳞JO,f)D`=6yZ&Bc-a;雞HO,e)D`=>yR&BI_Nz~`%PO!yy^)=@I<< XȤSpkKc)D8--1"ES˘tz N"}s9UL:@=琾㹕$"&BsHJz`PO!)xn'=KI 7<&X¤Sp J,a)D8m V0"_~ tz /}s{Iҷ;HM:@=>Dz n)D86siPO!뱖pK黝 &Bc-g;IM:@=Zv6.Țtz XIl)=]5"k 7>Tz j)D`=n%}@ҤSp\{ &BJl.=b4"UXg!Iҷ:;HL:@=>Ez f)D8N҃)PO!1tv4Htz )}@ȤSpD#g B&B#J(=l1"PDgWqI:JDL:@=Ǔ>Yz a)D8y#PO!᤯sv9tz &}:ߤSp0Ӝ&BI_Dv7"k > Im)D`=^!}<ۤSz%B,'&=zIXKxQNPz`_PO!뱖r雜&Bc-'9Q]M:@=ȉJj)D8ANXz`OPO!aq#;tz #}AѤSpc3"Eg!L:@=>YBz `7PO!1q֐CͤSp CE2"B gQL:@=YFz`'PO!pFǤSpv1"@g)q]L:@=8I$a)D>YNz$`PO!@r# ;tz ڥoJޤSz%iTȤSP)}e#=IQƦIzZ`PO!@M&&BB .y-L:@=6] [tz kڤ'60"k kG60"k kgno)D`=iZIXKxZQzj&Bc-I˚N[tz X'k:nm)D蒾i\IKUzr&B*髚^ۚtz jzgnk)Dhi^IIYzz&B"鋚n[tz ziʥni)D葾i`IF_zv&B[H1ΤS"}Ks)tz J/i!=p3PO!@!Ane)D萾9$L:@=W4eIA@ 1"4HHz6&Bc- ͡nb)D`=>9<ML:@=Z4hIXKJ|p# 70"k _I_Nz&Bc-KJ5ޤS^o)DX\tc 6",.}9sHWtz ֖>9`kM:@=kKTz&Bf+=JPO!W3mxI`e飙K7ΤS̑^g)DXXd 2"+}0spטtz ֕9kL:@=J^z&Bee/= PO!2'rxI`U[H9ܤS)^l)DXTRc/6"k eN"=RPO!뱖NN$҃/5"k o!&=BPO!뱖a?Ytz X;J;̤Sz%CSzE&B/dN%="PO!z2'xxI`=I<ĤSyG^`)DXN:t#/0"&}sBtz V9M:@=IƜRz&BŤ/cN)=lPO!Z҇1'|xI`)鳘JO><כJT3 IDATSYiGi)DXI(ҳ4"$}sbtz >9L:@= IĜZzY&Bub-=,PO!:1'^xIXKN+}sv tz X0^xIXK*} Cz&Bc-91 L:@=ZrRS"4tz Xҧ0F"PO!҇0ڤS! 5",!}U+M:@=KH_AzJPO! G0|^ΤS ʤS I\e)DX@ϥ1"_Bz!PO!@^/70"ĥ_Zz'I .};M:@=iKo\4"/_x@z-I ,}C{L:@=Y^ d)DJ߽fPO!@TGW.tz XTW/<&IXK$}£Otz XL7/<.IXKN$}Otz XD/<%IXK#}“ Otz r.<-!I &}'L:@=1k.II`G^!>I`?^%@I`7s^'AI`7s^)BI`7k^)BI`/c^-D'PO!^ҷ,ZzO&BOY'PO!Nҗ,@zI`Cn"HѤS 7^$h)DEI|0""}W >tz XG,LzΤSz%Gafw&Bc-9 7^'xo)D`=֒J_pCu&Bc-9 'xo)D`=֒IpSw&BͥWF;PO!+XzB"l-}­w *D^O!+\z@!z 6>][ S H(D^M!҇+l!Wy5JbS HoL:@=JjpzPO!G+l%[ޤS I/g7"l(}vM:@=JzprPO!+l(^ܤS J/6"l'}¦ M:@=IpjPO!뱖FZW Stz X(*l.d٤Sz%GUas%&Bc-9 ;H'6"kA/UAz8IXK!}.ҋyM:@=Hߩp^PO!&g*$j֤S ;I5"l!}^һiM:@=[HpVPO!7*&lդS ;J'5"l }ŽIM:@=>PaW&BKߧpNPO!ͥSYz8I),rҤSpkv^:h)Dm K/g4"X4pBPO!/SH'4"V0p>PO!mRH/3"kIY |&Bc->K!$zΤSz%W)wtz XY&p6PO!뱖4Kߤ>Nf)D`=֒bL:@=7H!)ˤSp;{ L:@=G!+ʤSp3k+L:@=7F!-ɤSp+[KL:@=E!/Ȥl7o_W~v)D% H!'2_U[YKVCc踢_)DJߡ"p}s]!$ w(!Ƥl>D!~g(!ƤlA(DG!MPXEz8I~sm!rW # n"}*һYL:kA."ߩzOG!-oPXGz8Iӫ@.7ߩgB[H6pCW"ߩ?࿤($äl㿖oBTUp VGa]x{]!w*D`A֒>֒HNaBTU%}'%¤l]!y;U" kI I$g0Crm!靪'W_U!뱖INpg\WܽSӏy &)D`=֒6֓JN`v]! o><[}./XB6֓JN`vU!CW!/OXPz-8IUȿBFD!/OXQz/8IU ?>${*D KJ/&7[yN7OyWXܣҷ&,/٤lB_/"PԄ#t]S_yҧ&H)6[ꥪ_"_0/4@zM8IUO}?w " XCЄ E&`kݻתyw.w"kI ҋ qM:.D_} _Fշ XB̄U&`kW">D! TH_P"֤lB?>W! XAȄe&`kW"ou/p@!뱖4HߘP#դlB_};/1FzY8I'&I+5"@„"u&BKP%ӤS|ctz -}^BpHPO!K(^Yi)Dxq m; !M:@=ϕ>.Nzi8Iҷ%I/-G4"*Vzy8I97%J//G3"k'%K/3"k%K/3"kɺ%TK/02"kɺ$TK/02"kɲ$tKo02"kɲ$K02"\-}MB p(PO!$K/1G2"\+}KBp$PO!ҧ$@z8IJK ȤSp! ^dc)DNCH/21"\'}G!tz ># ҫ aL:@=WI_pU0&BkH82pPO!57$Fz8I $ĤSp ^gb)D,}@‘tz .Kp(&B#Jz8I^ia)D`=֒夯G8JpPO!뱖&}<ᤗ#tz Xd5'Sz%Ip@&Bc-YLrJ50"kbҗ#Pz8Ii)tz ҋ @II*ԛtz ҫ @I)+tz  @I +tz > @I ,tz > @Iq{-ttz >+@IQk.Ttz >K@I1[/4tz K@ISN tz k@IXK֐>ҋ@IXK֐ҋ@IXK>$ҫ@IXK$ҫ@IXKV>,һ@IXKV>4@I!N#tz >D@I N$tz Kp*ӤSp_>SI/<&B{!Lz4"ܓd+@IugyM:@=_K߆p:ѤSmz M:@=_I_pBФSeg{L:@=_J߅pJϤS]|L:@=_H_pRΤSUg}L:@=_HpVͤSM~L:@=Z> @IXK!Yz2"kIP 3K?]&Bc-I߃pn2"kINsK @IXKr [oL:@=A84tz >Ipvh2"|tz >J@oL:@=/A@#&B҇ zPO!BM:@=ww NojL:@=說@_Ԙtz K_tz I߀ h1"Pb)Dül26=8c G4) FdY@#)8@0@RMN!I˝RazU{}|:׹O!Аt;.{ D%~@*]@"C)@*]@"D)@*]@"C-@*]@"D-@*]@"ےcK@*]@"ےcK~@*]@"ے#K~5@*]@"ے#KO~5@*]@"ےJ~u@*]@"ےJ~ ҍU DFt .=7JW'\znn 4tO 0lntO 0lntO 0lntO 0ln4tO 0ln4tO 0" h[ @`dyEAжJ=ptm.{=V@*]@"KO{-U Dƕ;-t+=wH7 ZV'VztO 0)&hX @`XISMаJ=Ƕ8҃0AQЮJ=Ƕ8s0AQЮJ=Ƕ(c0IUЬJ=Ƕ(S0IUЬJ=ǶC0QYЪJ=Ƕ30QYЪJ=30QYЪJ=#0Y]ШJ=0Y]ШJ=tM.{;`@*]@"JwN-&U D[Mt'=I T'OzvntO 0l(4hQ @`8YmРJ=pғtA.{Ѥ;`t=.{Ѥ:`t=.{Ѥ:`t=.{:`t9.{:`t9.{:`t5.{hm!g:`t5.{hm!g:`t5.{hmG:`t1.{hm':`t1.{hm:`@[*]@"ےIs T D};mt $={IR'Hzn!4tO 00)DhI @`YSВJ=0ңt!.{a'9`o6@C*]@"HrҍvT DF t"= H7Q'Dzn%4tO 0,"JhF @` !XHЊJ=3t3.{!G8`1v@#*]@"CHOpbFT DF t = J7P'ؖ,/= J7P'ؖ,.=JP'ؖ,.=JP'ؖ,-= K7ZP'ؖ,-= K7ZP'ؖ,-= K7ZP'X,.Vh@ @`ғt[.{Kn y.{Km y.{Km y.{KmA[ q.{KOmA[ q.{uKm i.{uKl i.{UKl i.{UKl a.{UKOl a.{5Kl Y.{5Kk Y.{KkA[ Q.{KOkA[ Q.{KOka{ I.{Jj I.{hmRҳph.@P @c[\T'ؖ,$=3T D=% Ij 9.{hm2sp N@L @c[CS'XGR'XGR'XIR'XKR'XKR'XOQ'XOQ'X|SQ'XxSQ'XtW IDATP'XpWP'XpWP'Xl[P'Xh[P'Xd_p|.{IfW'X\$;GW'XX$;GW'ؖ%=U D=%{IOe@Fpl.{hm^C=[ @c[LGV'ؖ#=)U D=%{HOd@Np\.{UId@Np\.{5Ic@Pp\.{5Ic@RpT.{IOc@TpT.{IOc@VpL.{Hb@X pL.{Hb@X pL.{Hb@\ pD.{HOb@\ pD.{Hb@ҍtEznDO @`%cЂt'x*]@"+À&[GS'X4!݊8J=:0fT D!=H7#tO   hFp$5kՓ?t2:&X4#ݎ8[uoWhm hFp$5kՓ ?vb$ؖ.= I7$f"?yݭ3hmАtC8j֪Hս7uF -YznIEZu!|@c[4%ݒ8r 23hmДtK(j溯> {@ hL)p 5{ɗo_DƼuF п4&ݔ8g:#^znKAd"~uC н4'ݖ8#[g@҃Оt_j8?Zs ]znL^-9'2̭3ޥ.Ar7q@s hR5pp}yq"@s hR5pp^DV@o hT9phu.?OdݗDFVW^yjսoZ![znNZ-yמ"rvjot-=rJ'!Ͻ@Ьt{jOڝ2/o>,=p K7(?{99_,=o K7(_z^e"/}@аtjOxW2}{V|@cq hZEpP>*sɓ6 D=%-iAk?F2<~W]@{lKJ[@M C O=F2ڇD=%Sg-q&!ռernyasό@8 ےңкtj֪gٖ[e./p p%G-y6լUtlx*p%'-y6լUo9yț DzSVDUbJt@sЁtpj֪M rek@S1 BUp05kՓ޻k|B N, VԬUOe@S) BUp0.{>,fT DNRV-O/y`ٽB Хt#ݮ8j󖙗>~꼢"RznWHZuw 9D#RznWHZ55툵(=`I7,f(=_I7,f;"4+=^]I,f3s?۷^DzI74QaUُOޮr'`E e_4,=)sWW<W<@҃~>|/>_n^{} @YA Xt`k'~ҵ ?{\T Ю ?h4@]1 A0[ @Q) C0W @Q! C0W @M D0W @M E0S @M E0S @I G0O @I G0O}կ]ƶ DZ;knK0@0t`8D @Jz:nyRs~D ےl&-nJ"2l'#`8 5k"~[}lm@{l'#`8 5kD}l5=@{lѥ#`@j֪s@ks0t`w5kգ!Y> D![;9N!µI ИT )YY '@[C0t`g5g&qs"`Pj΢M ⑪ DڒA96<'hJz$n,\ hJz"nfz"/DZ Y{l)$=K7@vSVmʷ'@K0t`75o!)@C0t `'5o hHznf Dڑ +7HhD"HBM]_<8W#d%fG!`x6j֪3U4CW"HOBmԬUshm9  HZ%9'ؖJAԬUshm9 IZ%9'ؖJA'HZ%9'ؖJOA'H @c[)=!yo)ypE-0A^-&ĥ'+m j'oՍu#@Zz*f;=/z"ŃE\ʖ D҃5jǧ9]-r<={hVN {I7FV0y@/ t@ ,= S[vzȫ"O)"ptkN5kDkfxь@c[%=(KZ枘OR3=3hm9pts.5kw\DN/yu: ؖcI<7J7GRV=| w

ۿD=Dv ǜ|PW@{l U D= d *]@"r` *]@"r` *]@"r .=U D/=$4Ie]^ D/=$4I]_z~`Y]zQmp&/;"KO6;JMnP/9qs?Z D-=,8v]pk2b$"8\t߾=yQi"ǖkvn\W8';HmYz!:v9䷓򐟼yr$8T0CupMrKӯ7_6h""8P0KypUpe^~2uJ4'WC{vt!'_No(J4'W#5呩o}۽@# L 5ȳfR^;M3JO43'W.DD pL`tಚz"/}~ OSz-@&wzǴ/##J3;(Nq<=[(.ݎ@c[XzCpIM 0J=K7ZU D%=\tO p 1ҭ``.{IO)GntFzH8tW @03 Q-*]@"r# q-*]@"r# -*]@"r -*]@"r  Ѥ.*]@"r Ѥ.*]@"r [.*]@"N(rU'8tpD 0ɟ~l,/=UfzV{e+@`yM`L5kՓe?vb$X\z48tRZ D~[g"KO&GnCY^"Uғ .j֪ O^~o[g"KK&Gn#Y."c:#XZz.8tQ\ՇW"ao,,=[/jʓ/߾y@`a zT,>[g"J% 0s^D%@`Y "|SWz̻#:#XVz&H7_rq"vA Hné>DuF Dné>ꇫo @L/>No,)=Ĥ0hjǶYzI7` IDAT>?Ǐ!_N2n|b 7?dU%ŤGtJ-'v?D^|lGSÍ+yOA $nC?]1ʼCN>H]"C"׈DDҍ`$>*sɓ=_fRI#ߟ~Yϧ''t'I]xr1o9CD?|??__?DCnyˮ?8޻7*枙"4lp@`!)V 0 ~V x?C?|=O,#=4!݌QV]DtgбYp- DAnY."*sf 7ggsDO'"7] j_ DAnèY^"w*sq{W1%:YlޡO'"w\}^__ D@nèY6eA?<ה@AfCD"HO HcaԬUOު{ 8dZ r1?Ż%f2(j֪'ٲe,cJ rs!8ЌtCE 8Ȗu}[2 *]M Dn~Z@x@C-`5k>_ݭ"[޻+8АtKDZyKo?ώxu^QKt =|4%ݔPVlx%& ȳ=ncY"q$@;1 0J " D<nCY;"W c[&=x&ݗPVlw 2eSȶMzhN1 ^L/?iJCU^ b[&=w4'ݘFPSF pA 0~裝ԛ,r@`?I ~5͓Tw#U"eo/ŝ@`?I ~ñwCvw1Ѩ7eߺ+OzhT=^p{*y7wwdF Шt{XjsLeR =3="'F3լUk D6ǥKD_ r"7#=p4+ݠ֮fZ] ?.\< f4լU D6?WM@D аtX/"g O0>@-`*]͵ @6d+f D6nVmj I@`дtXJph׈L>D жtXJpho.!O̖5nVm@_Sғ@m`*]@"s 5մN~3ҏw9;|"s 5մÞ|K?U@{lN [5մ"[ D=e'S@ҭ`ja"۲)^ ^50VhmهЅtXv@d+ǶCzC[VM;L @c[v!=at"ݮVtO 0KzE_U @`ЋtXJ= R.{9@?`*]@"3 {6:U DfH=Ilut.=[t%ݴ֩tO lЗtXJ=ң@_]`*]@";KOIm5i~Ako@`g7 F5'o /}~ UzOsPM;L @`W? B50V {7Դ"[ Dv*zSl%QzQwOM;L @`7O :5CV n#@`u*]@"۲iW 6.{hmٴ@ЫtXJ=ǶlYzWL @c[,=N+VtO -&:n+S'ؖ KO=Kput =L,֥tO 0]z[J @`(зtXvɯyW^.g@`$лtXvؓO/}~ǻj҃@}`Mja" ޥ8Դ"[ DJKwri DL#+Rl%(=EAGM;L @`^.{C{9zT DIQ'&=BCF @`~.{IZ9ZT DHnkQ'"=?GD @`"G~D=e{::ԞA]vo(hmٞ*/߮1R$"ؖIP{8'RO -V&V/22E"hmٜ65{[␟|"&ؖI nkPs^C^~kC&"I7v5?_xWv r׌@`ҭ`j޲Ns{^=~,ys CzjXtkX컳C>gO[}*@v`5k"[9= @v`5kiҶ5*=2Ttf:}XJ;@j֪GwF&=0Ut,:;n0o&=0V,z֝/@*]@"ץ>@*]@"ץg>@*]@"ץg>@*]@"פG>@*]@"פ'>@}{Oo@`0tQ쓿K!*= 'zT{w|u|yH Qz,Pt/=탺@`D`@СCҽC5w;C?5swa[fg!?@j޲'oMC뵻@m nݩyaj4@Zd[F'A?@wj֪Nc}sӽ_^$@lˤ@0 75koys?f:c,5!׏D}@{lˠ80)35kգzq3GSϫ& = ,} LYtz ȫ~R3M3 ҧԜEyt!"g@\zZ$ЗhsK̅Ǩt@\zZ$ЗZ r婪gא  Q`p@Wj΢kM\r1x<̋f"/'O]9".9H޼aDΤO=9"^33CD"gs'ԜEן2SEVK p*=>,~Aȵ^d"cj֪k\{@`8)t5kյ{wwf"!LWV];6p&!#=wj֪ 1.9CD]dd"ϥGKQmddsň#=\tЍ4}vǗ|kgVK LzTЋ=,!y5"?yWK L?'^ugȋd^$$/?u< 5w9! ɹ@lˀ^H:QW>z< q@d[BЉ0D""_ }RCx%$@lKI꓏_{ yVE -.Kҧ.Ԣvi$~n"$}ZB-'_oЖ D_,}^B @ +'T D"}bA @^k?פO tK.{`t/ }rh_'_>|w?3`t }rh_Ǐe?vº!ҧ^|nH@\?7JW{8'RO  .KzE s@[>[O+<{40}I!Ws^C^~kC&"`hol>E4f~]'_y*2]3`hol>G-;^=~,ys F-' ռeߝ] <{SY/"M,д-wŜOuC  ,M[OMYN=7@X>K'ZVVb@c[M>K'ZVV=52Ԍpό@c[K{>wI)Vs]x7 D=層pa5gѓ|)"#
[4fztCU7PUJ`Y5k2ouZDA0M|Ъb67J`U5o]|W1#~T3@j޲%"["DA0UШn{7'"O "`PoL>g.Sw]1أD!0]Ц4x+5!`L }hSM;\(^Oy~ԏ_6_n@Q=H5T;}ny/zDYI50V`@o&}hQM;L @P=I7ZTl%Ɠ^gմ"[ DA50V`8o.}hO @NK=K;S'RT D=a0GМJ=Ƕs)yl@G{<{JHZRs>z|W~k/~H`_3 @Cj/ٕ=k@F[<{KJR0sw}ח$X8P) "PѨ M i(K`HkQ%tةs~9/{{Ks&볥Թ"ix^%{}D?""g>@ẓ'-"p <?+/~W "IOx6^'Ө{o;F^.&DH_l"NQw}߹K}*<I/Y=4~k3%"p ,oz12rxWN!}FEMOAqŗ,BN!}JDM3kf+`u7 ""0;J/9=KU"~*^v`C02+|-3@rsM [c$_sNrsM agfL;Dx->Xz̠_򏈌yB֗Xz̠?3y_/E>\zL p~o3AX_j`s0Ɵnj /}rȫ;oԟ__7"7;;H/?c13Ͼ?i#K"^NQSAX^b`W?zSO`u} @\={|*K$`o񯿐C~ÿ{fYN  y)?d&`isݤW @X=7ޫ!77DMzTDuv^2Y=AXYV`G%U}ۏS"ҧ:J۾SU*W"җ:J~wO+C> K_*f޻?v`]C @RM?w 6'AJN`gET|_ Zn$}{),= "0FW:Kgr# @NMOCyGx-8@z]ʶ4%|H!mr "':H۞~D$J_"nb{*"'AXS@ R~xfkm8MG}A  HoeH ,)}p/Xo9o]D%s^9!uw寽C9J// "ҷ9J//U\ IDAT "ҧ9J/ "җ9J/ "z҇9K/OMG '}pH "rg9KJО ,'}p "jG9KJО &}^>ǫ "j79pJО ,&}^? "0C9p[7oo|8Z7K_zzD`>^GqB huW} Z> }^A[{_';Mօ Z> }^A]!U_fAx->J/!c_zCAXJ (UW叾B_<DOqkPu, ?/8`!K8T]?ⷨ_~wl=":҇8aEp 3~a_7 AXG ,TW~_=ED>HKo"#Օ_7~W["g33@\z>~x?3#H_ĥWʯ =38u}yL"*78H/#u_ _[U`)a/{9^[n&^LGLy Z*}w0f8@]eȋV٤7/D^$nMLy >Oz7쯮2AE]`F "@w/D^$ͥ/nO{){DK)V} "A-}o0Y]eȋ  `guݗ "/D6J(}u_&HZK+U} "A,}k0U]eȋ [ `Wuş{N> Ah,}i0UhOK_-TDw6K/*=Uz қ `G=Ah+}e0QhOJ_/SD`>^olH/+Tz: `7=A㵼JMhOx-!RD`>^kkH/,Tz. `'=Ah)}]Fze'-kH,}Tzm @#JО t$vQDҗ5.*=@{PChOI4^\{ "@?nқ `=Ah'}UNzu'jI.UzM @CJО t(6WDf5-*=@{LZhOzI4^`[ "@/{ `c=Ah%}MVzl'iJ0UzecK `[=A|YVhOx-_>h-6UD`>^/iZK1MUz%Cҋ `K=A|I"RhOH^e "@+ҫ `C=A"}D2NhOH, SD&'4KH3Tz  `3=A!}@BJhOzH,"RD3Ho4Tz 2+ `#=Ah!}=JHhOZH_#QD3H4mTz BK `=Ah!};RDhOZH$PDҧ3+Io5-Tz Rk `=A㵼 }9Z@hOx-/H_%WD`>^ ҇3kI5Uz bҋ a=A㵼 }7bxXhOx-/H&UDg3Io6GUz`j7}6~x\hO&~x\hOf ^a=A na=Aǂ8GUz`b;8GUz`^ ")TDy "*TDi},xLhO%xLhOf ޢ!=A !=Aǂ:Tz`R:GTz`N " QD9 ""PD)},px@hOx-?D8Ltܯ'|z ΢ ~=AD8Ptܭ'|z ޢ+n=AD8Rtܫ'| "(UD"*TD|,pShO#pOhOf >=A ᢛ.=AǂNj>{Tz`2pJО cAChO"~w "TC "TBv=A=D( pJО D 'nUD|C݁ "DKF=ADž"pQhO!] "4.AE m*=@{累 @Ztܤ'|Z^!GnBTzY_KA*EhOC݅ "03.CTz9_z =A z=AK=D`}pJО LA`хpJО "pFZhOf 0JVhO&rD8\t'\'DIt)\'yAE"*=@{'0ZNhO^!ѽpJО  "&RDW{ @Bt3\'iFkTz "ݍר "@=D #PD,A)E#*=@{VDGUz J`RJО 9kfDHnH7Uzϙ^KAiE7$*=@{D=D &#RD`>y-!1% JО 9k)0xChO浼"D$*=@{#0x]hOR!AE JО "L/)^UDz @RtU'! Dw%k*=@{qeD.KTz CxEhO"!Yu JО D"4ݗ/ "@=D ,0^VDA6E=A"EW&*=@{ Htg'ǻ"E& *=@{pRhOpwDMt|'GDXFt|'||-!ӉTUzς B;s=Ak@DOt =Ak)V*=@{g"0^xRD`>˽U'=AC=D`F NhO&fx';{"L)[>SD}=C] "ݮ "D]=C&ݯ`WWAq{ @y I`Y =_˝z @/m JhO!g8JО lb"LtgRDM"$ "v!D72p"=A~=DNΣ'D ѝ GhOcD'Ө "!D2p=Ax e,*=@{]{ @G DhO%WD73p=Axо=DnΡ'D ChOsD)S "C!ME3p=Ax E3p=Ax=DN'D @hO4z-!mEw4JО ZCڊ.i`}=AD- ,'|ڼGAƢ{X]hOy-xUtO "0.!=DWD{ "WD;CZj`m=A oj`m=AA=DVD"pVDCk`e=A Wk`e=Aa=DVD;Cl`a=A Wl`]=A݁=DUD "pUDC*=@{p3An*=o}Go+]o "Uv}6~$(8",!5Uz|>Dc"pTvo_[p{ XRDž_ψ|r"ܯ wno`I`=?_="?|@LZC*=/H>_-o̯=D`鏿?w1/˻ߩo^DZ "pTψ#"b"A3ky|DXFt;xt߯}㷈|oy Dn ̥ݿSUn!nq`9`~F^NUA nq`9`{Oq㫿/仯SU! q`5`{ \-CVb*=ō~W/~ÿ߷p-Ab*=.׃Ty? \)CZ*=.?7waG}<,ˁTz]m!_;bw>"6Rw9ߺD.AK"%΁TzD>[??}H.| EMD9J;Ȼo?gS[X"FRwG['/A nt`!`{3w"NQQ?赿 D`3ѝlv E!nu``{lD.K{M"^VQſ?}շL${XEDhDXQtۻS/f;44k)ƢXD_ݫs~5k!Kv` `ǫ.[f^pDXRtk;B><w_/ "0^KAv*=~?~K:"|z-=D`Qπ@~?"r]$|z-=D`Q)<᎟wD>G "k9D`Y ,at_+ap"9D`]Wv1 7?>z=~䣏~Og85M "]C>DXWtUz}?2r "k13c ۋnyJAxO|D.|u=D^!OVAtUz|G>KAC~"/Kw,v@w`7?Wƅ Oy?:t yg̒X`E4Wv>oz\ "~"AxI:<äVEtUz2i*"*=@{t 4{]@k=A,B>7I: "e1NZD=YhO.Jw/y*%*=@{pQ|a̓>V`/х4VDKKc |JО \ όҷ*'*=@{pA<3&Jڪ "1RTE>UhO_tynT`GɭUD`>2@3fJ_J[ IDATڪ "0kN CUSn}UzOL'҇***=@{'Z Ww*+*=@{'ZWg*+*=@{t1VLE?QhOޓsT[ "s1XHE/JО <G*.zTzt`LQ`#h'_JǏKhv*=@{th̖>Q;'Kp =n*=@{tl PKh'C|`L>P['C|`LO['O%c} Lj^@+=Ax'=^4Kp=tRDwEcu @'=AL:{l N(ыh'CN'z}TzSA& >*=@{0uD8UQD`>1c4xUz ܘ1}*=@{2<^7L_pJО DCS8qgWhOZƔuUD`>sc] : "pv1f.cEJО 'osR8X>: "pr1g*EJО oR8ZB "pn1h(E/JО WR8\FWDұcM  *=@{Y:v\cIx+^hOKY')D`v=AN,:2fM_Uz8t6}@BR&WDҥJcA [[hOJ+i(DDo`n=A*:5MߣUz8tژ7}@H^fVDҝjc5 !{YhOsJg덁(D/`b=AN)]9n0&N=Uzer`L>F fכ'|-ӑc- 9{@_=Akn73OQ* "0A1sJО L'یӗ($w}UzίephL>D j' Ǎ;0JО gcPȊ^*=@{Lol̝>C ,z?3 "p.q1x ̨'+Ң0JО wP|*=@{JnaLA .zC "p&q1zL']&#Tz8t۸˘=}w0JО Nç/PA&SD4eNc S\*=@{FliL?` [KhOH{'!zMS "pq19D `*=AN"56O0=̤'9H_0EL'9H0Ị'|vx-UoOƦ7WhOlZ#GHߞ0-JО D!}zD*=@{g242>Clx&mUz֯e:iHDO '9qヤoNN*=@{,-]360>ID '1c 㣤ONP*=@{,30>J E '-c㳤/NQ*=@{+21>L)EO '%c#ӤMR*=@{,+]262>M9E '!c+MT*=@{,*163>OYE 'c;MU*=@{,)]164>QiEo 'cCoMWb*=@{214>Ry=~|Uzïealj|  "0G_tPKftSD`>邱҇&l h'|ƧJ0MNJО yL3`n!@'=AV[+}fܢQD`1~g&L.z =A֒,}ePD`-|G&.z=A;,}d7pJО +INj=1`~+8\hOGK0'Bb㳥OLh zG "tp "*=@{#.1>\pJО HO>0-'*b/K!z "txz^#*=@{,"-v3>_&pJО kHg.E'b?K"z "tڈ$q*=@{ -4>b>W pJО L7]Ϙ>-`B=A#<7>cFv8OTzϭe:Yk|e q "0_tZDRD`>`1w%ϑ̤'|L>G 0JО ͥ{LL>PDt^(*=@{4}T@;_hOұ㣦oJh'z "Y:Va|M D`w=AKCϚ>)'@_Tqa%tTUz+*1>l JО mKAƧMRVUDt8{z^+*=@{tG7}N@OkUhOҝ0Ih*z{ "S:Sg|5 ]E/`G=AZJWO>&'@KJq$YTz()4>rW JО  šgNXnRDt8K-^*=@{2(5>tΎ8`UzePl|! rǪ "0Aw$v '|^-ӁhcH#8RhOZNAg pJО yL"0=AzI"'@+:0>y7 JО D^1*=@{4n㳧OHX@VDt=}A JО }D=emUzh#]&BƧOߏ1l'@02>~|ED`K=AHw#,"z[ "DKČϟ`уPhOY"g<ˈ4v*=@{A oGXG6SDtH }:Bg JО Dx=kTzh $3H_al'I"k<K66*=@{L/]$SHߍql'A"m<k^7&*=@{2$cH̕l'|{-="n< 9f*=@{ky㡤FXN6RD`>^tx(;umTzϗe:F`<;*=@{Gyn< ];*=@{L,"0EbEQ=ANs#}1JО JI>`I[xLhOYC,Hߋ<'!bqEXSRD`R1<",*z "0tx kVxTzRBLd<ˊ<*=@{L)]!&2HVeEo~=Af3$}+WpJО J7g>`aѻWhO\CI_ܩ'tb2㩤EXYTD`:1T҇",-z "0tx,;~TzL?Lg<?=*=@{%3LJE/ =AO&}$7pJО 3yJOe6Cc<^A*=@{Ld$Al'"@nUD`'Ap"@nUD`'Ap"AnTD`_$At"BnSD`_&Ax!CnRD`0|!DnQD`0|!DnQD`0!E"nPD` 0!F&WD`0ҷ!G*VD`0ҧ!G,VD`0ҧ!H.UD /6R23^F*=@{ĥSJp*NhOtpxL%zW ")}D#*=Aҽaz9B8}\'@V:7o+ xShOtkhaଢ'JО &A Vf^VD !:,} yE&E=Aҍҧ Xn^RDxxhK,z9/ "ptbe!zS_ "pkO^g<|.zU_ "p xB>WD`&Aӗe =AvwsDO1}Do+IhOݞcL}s x'Hs9>ySD`gxxO>SD`_xx_>D'5aI>+7 D,'~)a a/>CK N'n%ai>gWD`/鐰8pA؂ "tHXx{$zmUzI#,c<\*=@{#1h.^\pj=Av O4}/\pf=A+4}/^]pb=AvnK4}/]p^=ANk5}/^^pZ=A6.O5}/^pZ=A6.O5}^pV=A5}^_pR=A6Axz-7-a<*z9Uz @yn<.z)Uz& @yn<!zUz6 @yn<%z UzF @yn<)zTzV @yn<)zTzL,j<-zTzJ:j<\!zTzH,k<}\#zTzF:kMX @Nq@ 26t+,ڶzU D <6nZ.{/ܖa9 ֪tO 3t#ڼJU D`t@pkݞnD7XJ=̔nΰ]phT'yv|`*]@"0K:AÞO7q=Q @Hh8Wt l& >gq>.{. ܦa;8|NVtO 5t\@tT`V ?ݿ`e*]@"0M:]HoED:XJ=LNnppіVtO SC[6t\H5tL ݻm`E*]@"p\:mAHnD;XJ=Nnpҝp9VtO ǴOwÐn 6w.{8"DƆKw.{x^" "ݶm`*]@"H Dk.+*T D9@ 26t\XɃ5t3B@dl8 h+P'Qo #ݲm.{8(pg8 hݫt! {H7lD=] @H k{йJ=엎H]"A*]@"W:pDpіV'}!Cn+6}гJ=IW%A*]@"+06t\M~U D`G:ᨤ;5zttO O %ݨWmW.{ؒ2t\SNU D϶4ઢ tO Ov G&ݥmK.{KMI,B*]@"0c86 ht>IhEAO ۊ IDAT@'NCЛJ=lGeO?ZvЙJ= s@dl8> h"B_*]@"p/= DƆF-!tO wS{3hSt!cnf}!tO C"O )ݙD;CF @pҍP7^T D =02hwtp>G )ݖMECC @[9f8P h+!B*]@"ܸQÑJ7e@czP'ᶥ}U'ZvЁJ=7-=3p-^Qūtpң>S +ݐVtO KOL2t?$DEXJ=+=3pma*]@"ܬDJwc@FaEtpS> G,݌!ўtO mJL7t/DFXJ=7)=3pҭa*]@"ܢAKwb@NsŪtp> G-݈AtO I3t$EGXJ=7'=3pma*]@"ܘt?ib)#h T'ᶤi"cáK7a@ZtpSҳDإ[0 .GT D%~*p$aq*]@"ܐd?@dl8z Xh/ KS'v"cK_DIXJ="=s/` %,K @9pўtO mHn8 XhW R'&gz0t,Ftp #=b#YbT D鉞 G1x -a)*]@"^zLqL]DKXJ=k9p m,tO ʥy6t,LDŽ%tna 2sK2a*]@"ZzcnʼnW'aң<0t,OӄJ=乌p.`&U DXυ 3o7!tO Zx.e8n Xh Q.{V*=s1M7[BE{NHtN!i*uBP @UJ\pLӭXѾr*]@"Pz?ib)劶S'a}"caM7ZEOt:"cqMYEOȨt6"cÁMwYE;Pt2""cÑM7YEPt.2"cáMXEPhtO šGwa8 Xh' U DXU G7`=6X @H\pxЅh; mU DXΕ 7]}6T @H\pЉhK -U DX G8[6P @UH\pӭЍh_ T DX5 9YJ @HO\p}Гhs T D^z^ʆnDThtO B:6tSt&ڢB .{ֹP{*7.tеNñNT@*\] @GuZv:mU*]@"+=pУh WV'[9Fn.EUJ=J2t7)ڱUU DSzHᐧ)SѦtO B3ݟ*b8^ VotУ~mᠧ;)_tO B DƆÞnEWJ=I DƆnEWJ=IOMDƆ#E[XJ=}ImDƆCnEXJ=]I >B݋py.{zI~d*]@"t$=1t@KtЏ`Npn.tO B/c9)HO*DZJ=HO + =+i*]@"!=3,tE tЃHNҰҭ.tO B9Q*H7ND{[J= Ao$…T DX8Nڰm..tO ¥q↕np .{-=ZHLD\8_ @%K,>NV'a҃80,tPمsU DXB !/w<.{*=ÂHwKJE[^8G @eJ,ǰ$ҽVѮP'a3xib)5n6pJ= "cêH7JE{_8U @IyaY$`բ/tO  "%m$.{&=}/@dlX& Xh 't,{"cHHE`tO ’GohX Xh' U DX #  L.{$=zDHGM60O @%I,Ѱ8p#0Q'aIң7K4,to܊h? 3T DX #7#t.{E'=zDUJ+y*]@"7xX&btO B'=zDJ7*]@"=yI,xaJ=Q[/7ћ%zV 50Y @Wm-VB=2LT'!g&=zD;P T DHJY= P T D:YKP U D82YkP T DH8"YP GT DhkLzf.VB}3<tO Bsϼ¤G兩?-U,uJsT DhV@#Q'__ңRDƞ]:g8tO BK^]ңRDƎ,VB=4P'-{)"cG@+>tʔW P+ T Dhd Kzf VB4tėM[C:jV'/*ћ%Z uT @YPc OT D9(ћ%Z 0R'fGohJh&^ÃJ=W5$=zD@+6*]@"\\L̈́lW'z濔Gohrh%gÝJ=Ws+Izf'fB6O •:YSV@3v"gp'Go5LU D_Cң7Kth%us*]@"\/!ћ%:}]4ji.{.׏Z ܰJ=tGzf]]pnV @9#=zDg//VB}87tO ¥\#=zDX`zqnR @˸Fz^2]R\dǹA.{.R{)"cZdZrnO @ ؋Fz^ إV@+[S'l|HK!:h%ԙs[*]@"/{)"c\isnI @\"=z/@dk P tpKZGo‹ Pͨtp˿VGo P΍tpkRGo PMtpNGo*KP-tpkLGoJPU Dzћ%ڲh%Էr.{f+Dzf 5Z @YY=VB.{&hYVPWT D8KBzfڭCVB=R'ሦ/ћ%j H @g5~5H,Q۵Lg5*]@" =zD͗#@+>tpH =zD Lg *]@"zHKqZX̚h%ӿJ=r/{)"ce Hw.{%_ңRDƂ PO*]@"<}OK!.MVB?t0OK!KNFBCݪt`{)"c JhO.{xO,V(@+iU Dxc)Oћ%Z"h$4ПJ=K47Goh9d@_*]@"7.L?ؔYeUFBtܴMAћ%Zjh%4%ЋJ= K?lJJ,+@+I>T DnUMUћ%Zh$4.ЃJ=MJ?YZVBSKW'9g6ťGoh+U DnLMyћ%Zh%4A\.{~?hS`zfz ,T @v1=zD,`VBT''#6eGo5 Lh`q*]@"7 =Z M,K @dmM,Q_kЄrT DV, =æ ,D @dlN,QKмT D(<ߦu U DV'l|M{)OtKh%4}T'Y6 =z/@d Hh!tO slCz^ 8@+YJ=:wϵy{)"c+YFګt@6%=z/@dl=dB[.{Υi/dpңRDֵ (4T'U6*=zD\Ztt)zqYzVBS V'N*6.=zDk^tO җl_zfV 1\K @k< Jh**]'/~ϯ@65=zD7t"4mJp55%K?K6yћ%РÅTky_W@dύmmuzfnh%4p.JҍOŷ'Ysb{GY=- =\nqD%Lћ%*]U7?> 2 26!=zDNB#Uk՟?3ۗ@$,lݑY" @@h@bJp ޥ_~|/@$%,Y" @Ph^bJp 8[<4~B[I,3 /4=Jp,g 2Gf`Iћ%r,FhbJp}?;gG+J?Yudң7K|XРŠ\vܿ.#5sћ%rZ,[h\7l_.\F)wY"'@WB3 t?sI[DN~Y^MKqZX @BMtw0;ѝň@dɪmvrz^ Ș3`5BJUd<obD OYl|z^ Ș`Bc_*]2 -],Ĵ`z"MK&ެ'EwXiěD M9i+҃7@d5r8W`-o7j qZLK&ެ'nAL\LYO2 }| i+p[*]]kwadq4-.>oGAL\R..Ss8WTΎ[9i+p[*]넜,>,ƹ\ޯM_8x8` ܖJpCw4x8` ܖJp.q_3 X}<Y0snK v<;^d4x8` ܖJp ;W 9g X}<Y0snK 0c_|WFAL\R[c^_ o_ҿNMDY0|]( ^<֜M?o]!K9\y?!ut\xb4{ϛ~ǒiN?W^~@V3m53_3{Lss76S:Ls?}G SYIw37%Ӝ~<-XJ14-4%Ӝ~D +R9?۸믏m}s;%Ӝ|~O"..㝝O?o?O<]$`ϕ76T`iя'?n4%Ӝvn#t }?{bш/h]K9\$t 9h>\ צ$`Ε;wo@VI/nw}, saON ߏSo~՛2%Ӝv7G?̾$`ΕMd&XJLw?_?McI4+C rw N ߻OiMcI4+< XJLfI4'+?^uU SwӻtX0in tә~bI4'+oܝr[?.K9\yg)SLl$`SΕl#թtt_X0 /ON 3Œi+wN Sձ7]Xe%?WF߸{G S"~yʒif+oܽ#թt^q~gOީwM, fuXJ7?~o%=W>|Igx.ko (4~Ēi+.Kw{7b]K{DT`}ϵ~ǒif+.a~6m5%}?{N^w, sŷ U`wvq1?w, sE +T9?h;3޿m]!K9\ U`/Jx;5BLs޹"t,C oO;5$`P y ̳?oU$`sP f;{{, sE +Tsk5 K9\ U`_ϟ{, sE;OIDAT +T'}3۟o{, sE +TZtU*]@k.JZ hVZtU*]@k.JZ hVZtU*]@k.JZ hVZtU*]@k.JZ hVZtU*]@k.JZ hVZtU*]@k.JZ hVZtU*]@k.JZ hVZtU*]@k.JZ hVZtU*]@k.JZ hVZtU*]@k.JZ hVZtU*]@k.JZ hVZtU*]@k.JZ hVZtU*]@k.JZ hVZtU*]@k.JZ hVZt@?|{/|]y9?;voy?lST?JWٕf~p_ݿ@ft@@?NG_̵W~C ND`JDz9ޭIll.F"7ƍm r$xwIݽ@t@@W/O .A6yIUC ;zCX2S>3s%U"pJ DxDAP.ټl.x6y|;nt] {#wS_@j?ԱaJ<=v@dt@@d>꩷5mYwq?{ß~ety'[ emƜ']{@!=vF 2T kع]+;LQʽO޿l r|cu͕jdb;=p`]*]p4(w!%A~w&&`>}yNr< D&l~=vr 2T `>f҉=Qpן~Z|v] wL&yNM@d?ٻ}5[S֥GMBRO-Fsh~͗7 ߺZO}s׸_ ?~ɏE/}r7R|8MF[A&3؉ԇ RcfF~7+mMNǔ3%(>6.'zv1{>=؇okwn~rB!l{@dUt@@d3#? ԛb{|c7|]<"bj/w&ݝ??zv?7)N D-XJ<vB=7; Do~}wG)ȌRg>\AaH>( ؛`{q7ngv~.e7vLC}knc"3J "8ǿ Dgw DۼawroxR=g9>wG)ȌR_:2^?L;wX0.];cx?r Dfl~>c5T7w[*.{CȌ1` 4V G۹8zmsߋq@dUt@@?>~7"3qhߙ\bk} 9@dc.[*.8IS?|칋aߙ\i t!ؽO D\ٿ[*.8|O_;Ȅ)]@+jpϻyO'iwbcY{ VڟM?d(緷7|/z"0] N'^G#)8+r؇ Dl~Z r;sOcY{ V'"}мǴ,{c"yZ 2.V >lr=a D`JD^7+wtkwb}^1LWh_Npx|DI U\D=1w~пJD}_=wq@dڕN"6?5MxP¾?8{̼.,A "owqݝ]vw'b|ՐN\Ru;kw}.7s{~cgpޙ3@d =@d'"Jz} Dv1/f-XJ Dy'+{:̹V{q聍ҌODOsms@dUt@@dÀ}4s"+&{~Ew"O&"/9{ V3w$&-o'pW {;ƁMo9coT @dg4~5|vcz 2. D6_Wk?>P Do~F rG=6z/Χjlf$SJ N?fN3] t˽:;y3|FA<)!w{ V3 ӣȜ87D U.^`vd?ys=F1?>㦣[[&.|0IOWkRwqn 2No~MA7ssWt{0Qxo}|aۯmr5t@@ ;?)8;e 37?'Ga;s٭[N[".8!dq × &x6%v D^[}Oc`g6?/\tc1!׿?`=*]Џջ_b~}7ݿ6z_lG6=pJZ hVZtU*]@k.JZ hVZtU*]@k.JZ hVZtU*]@k.JZ hVZtU*]@k.JZ hVZtU*]@k.JZ hVZtU*]@k.JZ hVZT?VIENDB`bayestestR/man/figures/unnamed-chunk-5-1.png0000644000176200001440000031346513603762236020405 0ustar liggesusersPNG  IHDR .PLTE:f:::f:f!c!!!!!!"c"c"d""""""""""""$c$d$ c$ e$!e$!$c$$$$$$$$$e$$333::::::::f:ff:f:f::LPLQLQLRLLMMMMMnMMMnnMnMnMMMPMPMQMRMMMMNPNQNRNNff:f:f::f:fff:ffffffffffnMMnnMnnnnnnnnnnMMnMnnnȎ::fff:ffffېېnMnnnnff::ff۶۶ȎMȎnȫnȫȫȫې:ېf۶f۶۶۶۶n䫎Ȏȫccd d e c c d e!d!e!!! c d!d!e!PQRQRde c cPQRfȎې۶QRdP~ pHYsE4E4Ƶ IDATx}m}JI4u0׆X [~8/L6Aؓ'@KG28N M`&i&#ub3Q0rLX6uEKAđܿ{ys:Xg:w޵־v ̵7`l  B(D8  B(D8  B(D8  B(D8  B(D8  B(D8  B(D8  B(D8  BR^po~SoI}xq}~!Z?zgI[|Q!r9s|. =?7Vޙy@"w֧Kk3*Dz6gy.T,e9,l ͙G+ P|8.,DxCM ͙GpY(Dv}e<K ;9Mh^o<],"6|gۦټ+d]C_ycv'<ҿX!1a[pQ?rX8R,|t[ ==HyoE)D;ED+DnZOwB¼"Kx}wBd{?G!Ra[pQ 'Lxp~!Id(D*{.J!WDA!rk}˖ɮQT\Bde};^rf';VT\Bdw0Y WDI!Nc(D*{.J!R'>qy'ڮ'ŋ?ϼ=ۿsF՛[>P"kftz]w.c"]r{_{\./Q8FBKf׵{>!~Q˝W} n7M>6 חƋ6n} {6e[qO=g_Z];t΍t9/#sW;[?HOm;cQX'E!ya9?x>pg-y{t8|0yp {;PmOX =%RUt+g;h!rζZE oY Л]:`, =D8n魇{aBl[y׏a>yGzڑBm) H_7𼥲ŹuO1X"+Wr8;f犙[}kz ߮]?~(lD.~?ecQKqNjkW KIvʏBxooK7,;p'4؇0\"E]٬߾v6cq||ƢY~p0|BܔzcG݃ǮSof3 PR7kz޹0>Z!RE }|޶wퟸT8Xm>cQ,$;u1'?{c{C߱c}/rx6+ū>{-/B֓a9<۝k?Y\[O|p Sc"]ypWeW>;[劣8X׷?;`, ;oad/ڃޕ{wc{ݭX?olpB6TyaK~A!9<__/&9~.L嶿 " lΑ<^Gs#_ZFb"_ycߜpp`>L?="}<9͕ E|5vԞdJ_܁f捶]wh!R#"]{ݽFd ӳ|k)KX!rA>9p^~Iɫ>q}X=<}hA;uS*t#"vmlr/Vgj@!yMA:/X l]O1X" ^skzR瘯se}9^mwϱԃߖ +)-D>9xV#9kH!R .Pti Wp ?[S 0]9W!i_v^1s7n7?3OvP}Wt^^"Ol rpI9G mo_W_\^ۥXo]O1X"ko}c?o;GGW ~͙vLw6}P=wV|ɟ{>(c))FBU>[S 0=86z:rܯ W̬d> \RrͿ~m]eѿ9_n`) -D_t ջǢw}Ƣ;:%v(azTHO'\Ѭ5پp!RTe-D#;iBݧ`, =vߡc53C13t,}fzw5=NTM #cwsBt"5U"e ~sBݧ`,a1OvlQxcϿwyW%~w~dE!һׇW&e!y"~Bd~U"1 _RSn >c/DScs?wv;+fJϠ^²BdxysC3H.C!R G #꾭k)Bdio؃O~CGv{׻rpv sd]o)D:e!rC ;-,DJ}BM!R#QlO;?l:pp̅%o~bچvy˟(D޾) nBS}Bt1F)rhGzw?v9b悇QtO!RׇhKfV[v F/D>'+D`$ [AŝAo"GWT ]ֲ}(D.׷l؅HH!S 0z{w?"{Wv+7R c-dșc\~|nO1X"[׻Ggۯ'^{Ρ3Ǿ{lS+7-D֯>tx}f93ȅHH!S 0zs>}+fv[ocͯ,\9z=ܸHo}ƢCȱCywVO?ο^u,>7tv+K(D;^o_zy }[Tݧ`, ͷ̬Ύۿ4}WkBMn~e0"#ۇυTap"՟꾭k)Bdcdaq暙w޲\[Oަ;kTo~e0"HMp"꾭k)BdcuP9;;;1ny(K|_ZבҽJW .R_ } !RܨHo}ƢY[ Rvãw˼z~@s[y "}} /W2]B*"ޭk)Bdesܽ9dSֶG=kՏvZQQ=D7.D6zx̼֝#"UZn?cQ2ǔcڶcQ?ּ^O79[wpBͿ>/DN9gI=v *D*؜FHzh|Ƣ9rd9s{V{eTnMґF/EmR79 Ob5E!c ç-ܘ+|^^0x$yuHn/0U!rdkt~qέD.X79 ObE!woȕcdz_py},m'5Rfod9Ή|Zσo~ayapcmZ֍)85H ?fD"-!rF"-^1LB!Ҁ+f`" ,9K(DX^1(DNwQm(DNkYSTRʭ}^D`"5'|)DN> "0c +D| ̘Bd`. ɼC旦l Շ)DNڵkC`"@G!Qq"@G!Qq"@G!Qq"@G!Qq"@G!Qq"@G!Qq"@G!Qq"@G!Qq"@_t,i 1'01@sC *XbN`c9N!9+ "T)Ĝ<ǐsB s 97WP(D`IS9y!:rn.PPsC t 1'01\A]%M!9bN`cȹ@!BKB s 9'0)Ĝ<ǐssuB 4rN`S9y! ,i 1'01@sC *XbN`c9N!9+ "T)Ĝ<ǐsB s 97WP(D`IS9y!:rn.PPsC t 1'01\A]%M!9bN`cȹ@!BKB s 9'0)Ĝ<ǐssuB 4rN`S9y! ,i 1'01@sC *XbN`c9N!9+ "T0VОI`C t 1'01\A]B$I`C t 1'01\A]B$I`C t 1'01\A]B$I`C t 1'01\A]B$I`C t 1'01\A]B$I`C t 1'01\A]%M!9bN`cȹ@!BKB s 9'0)Ĝ<ǐssuB 4rN`S9y! ,i 1'01@sC *XbN`c9N!9+ "T)Ĝ<ǐsB s 97WP(D`IS9y!:rn.PPsC t 1'01\A]%M!9bN`cȹ@!BKB s 9'0)Ĝ<ǐssuB 4rN`S9y! ,i 1'01@sC *XbN`c9N!9+ "T)Ĝ<ǐsB s 97WP(D`IS9y!:rn.PPsC t 1'01\A]%M!9bN`cȹ@!BKB s 9'0)Ĝ<ǐssuB  39 "c9N!9+ "TP09 "c9N!9+ "TP09 "c9N!9+ "TP09 "c9N!9+ "TP09 "c9N!9+ "TP09 "c9N!9+ "T)Ĝ<ǐsB s 97WP(D`IS9y!:rn.PPsC t 1'01\A]%M!9bN`cȹ@!BKB s 9'0)Ĝ<ǐssuB 4rN`S9y! ,i 1'01@sC *XbN`c9N!9+ "T)Ĝ<ǐsB s 97WP(D`IS9y!:rn.PPsC t 1'01\A]%M!9bN`cȹ@!BKB s 9'0)Ĝ<ǐssuB 4rN`S9y! ,i 1'01@sC *XbN`c9N!9+ "T)Ĝ<ǐsB s 97WP(D`IS9y!:rn.PP[A{&']d 9'0)Ĝ<ǐssuB &']d 9'0)Ĝ<ǐssuB &']d 9'0)Ĝ<ǐssuB &']d 9'0)Ĝ<ǐssuB &']d 9'0)Ĝ<ǐssuB &']d 9'0)Ĝ<ǐssuB 4rN`S9y! ,i 1'01@sC *XbN`c9N!9+ "T)Ĝ<ǐsB s 97WP(D`IS9y!:rn.PPsC t 1'01\A]Yc IDAT%M!9bN`cȹ@!BKB s 9'0)Ĝ<ǐssuB 4rN`S9y! ,i 1'01@sC *XbN`c9N!9+ "T)Ĝ<ǐsB s 97WP(D`IS9y!:rn.PPsC t 1'01\A]%M!9bN`cȹ@!BKB s 9'0)Ĝ<ǐssuB 4rN`S9y! ,i 1'01@sC *\[z+h$!:rn.PPA!$!:rn.PPA!$!:rn.PPA!$!:rn.PPA!$!:rn.PPA!$!:rn.PPsC t 1'01\A]%M!9bN`cȹ@!BKB s 9'0)Ĝ<ǐssuB 4rN`S9y! ,i 1'01@sC *XbN`c9N!9+ "T)Ĝ<ǐsB s 97WP(D`IS9y!:rn.PPsC t 1'01\A]%M!9bN`cȹ@!BKB s 9'0)Ĝ<ǐssuB 4rN`S9y! ,i 1'01@sC *XbN`c9N!9+ "T)Ĝ<ǐsB s 97WP(D`IS9y!:rn.PPsC t 1'01\A]%M!9bN`cȹ@!Bk So활v1@sC *(DRv1@sC *(DRv1@sC *(DRv1@sC *(DRv1@sC *(DRv1@sC *XbN`c9N!9+ "T)Ĝ<ǐsB s 97WP(D`IS9y!:rn.PPsC t 1'01\A]%M!9bN`cȹ@!BKB s 9'0)Ĝ<ǐssuB 4rN`S9y! ,i 1'01@sC *XbN`c9N!9+ "T)Ĝ<ǐsB s 97WP(D`IS9y!:rn.PPsC t 1'01\A]%M!9bN`cȹ@!BKB s 9'0)Ĝ<ǐssuB 4rN`S9y! ,i 1'01@sC *XbN`c9N!9+ "T)Ĝ<ǐsB s 97WP(Dpmaꭠ=.2bN`cȹ@!BH .2bN`cȹ@!BH .2bN`cȹ@!BH .2bN`cȹ@!BH .2bN`cȹ@!BH .2bN`cȹ@!BKB s 9'0)Ĝ<ǐssuB 4rN`S9y! ,i 1'01@sC *XbN`c9N!9+ "T)Ĝ<ǐsB s 97WP(D`IS9y!:rn.PPsC t 1'01\A]%M!9bN`cȹ@!BKB s 9'0)Ĝ<ǐssuB 4rN`S9y! ,i 1'01@sC *XbN`c9N!9+ "T)Ĝ<ǐsB s 97WP(D`IS9y!:rn.PPsC t 1'01\A]%M!9bN`cȹ@!BKB s 9'0)Ĝ<ǐssuB 4rN`S9y! -LgrEƐsB s 97WP(DIarEƐsB s 97WP(DIarEƐsB s 97WP(DIarEƐsB s 97WP(DIarEƐsB s 97WP(DIarEƐsB s 97WP(D`IS9y!:rn._|;'9w>d'eIS9y!:rn-7/o7*D&gIS9y!:rnt Ǟ>D!26KB s 9'0)Ĝ<ǐssĔ9Gq,i 1'01@sC"=79Vp|D!2=KB s 9'0)Ĝ<ǐssĄGVo^߿ˆ|>))@J)Ĝ<ǐsB s 97WPLW|y3,xCR,i 1'01@sC"\lDOY'gKB s 9'0)Ĝ<ǐssudȢl'_>zǻO/5uW%M!9bN`cȹ`Bd1{|/; #} ͐CR,i 1'01@sC"9l?>~䚙E[r{%M!9bN`cȹ`Bd13>W )YbN`c9N!9+ *DʏdǿCR,i 1'01@sCS"_>겘?o~J)Ĝ<ǐsB s 97WPLZt蹯C/cb EJ)Ĝ<ǐsB s 97WPLU|w_{uMg[W7/Ƕ')o O'Cw>7YO Y"6W"kf&"Bcq ׯ_>?vBdq*S|~٣+f~sy~W.~~O=S۱>uƯ۟Z3 rN`S9y! Z $9 э;zY^1sJ__o:N!{򊙾Bv^G}9s\_yg"wSfN!wY["g}/3bC t 1'01\A/1Y!rx~Dzۅ,')!\)-ffy@!sZꚙmUGdf53+fz WNu/D,.jm!rFgb~ٻGɪy ȼEƐsB s 97WKLV֣"'wSՇF[ Q{Y]1_8[rS՛}̬9,DzFۇ2+Dz_pw<}s[ۈA;ȧqέL.2bN`cȹ^bBdYhң#{e +f|Ni%"=̬/D>ܯrB皙3Ⱥy}vOx+Ox~:B<7y||YE.DoO>'~_g="f}g?suԝ|i{ӿ ̚]d 9'0)Ĝ<ǐsstℐw|/sه ]|ѯ;'7uWhK)Dpx#ٻ{o:\PM!bԸ<d{'M!;d'5ڒn G|~q"7f9|ƶy`,UBO\4C?(Dik(VJO-D`Ywc䡞t'߹{sys抙BŞo'?ϜbN`cȹ@!By/iO{9!9'0)Ĝ<ǐssuB s^:D%rAD%R`1sZgNM t 1'01\A]lQg1sZgNM t 1'01\A]lQ7Ϙ93&:rn.PPaKZ݇hDi̜|sB s 97WP(D0%@!9b1sRgNL t 1'01\A]Œш1ט93%:rn.PPaKZ߇hD6fNhI9N!9+ "T^ш m̜М癓sB s 97WP(D0%C4"f3'3y@sC *{Iш q̜̼sB s 97WP(D0%=7jȀz9%9'0)Ĝ<ǐssuB ^9hDJHz9%9'0)Ĝ<ǐssuB s^߹h!1ۘ993'%:rn.PPaK;/D4"34y@sC *xIшk̜ҌӒsB s 97WP(D0%]W+D4"]3fڛ<39'0)Ĝ<ǐssuB XRf1όA t 1'01\A]xsѨkD&abN`cȹ@!B9,4HZ#2y̌`(@sC *\[v"c09 "c9N!9+ "T.D.2bN`cȹ@!B" 09 "c9N!9+ "TH.D.2bN`cȹ@!BH .2bN`cȹ@!BB$19 "c9N!9+ "T~I+-"arL?όD t 1'01\A]K:e!Ԉ3#sB s 97WP(D0*DarL?όD t 1'01\A]K:i!Ԉ3#sB s 97WP(D0[Y(DN$|bN`cȹ@!BɗtB$19 &g":rn.PPa%iDLN癱9N!9+ "T|I"#19 &g":rn.PPa%=h]4"&'h@sC *L S3sB s 97WP(D0Π iDLN9N!9+ "TxI iv1@sC *L(D2.2bN`cȹ@!BT!2.2bN`cȹ@!BtHD#brEƐsB s 97WP(D0풖 09 "c9N!9+ "TvIR$4"&']d 9'0)Ĝ<ǐssuB .Bd<&']d 9'0)Ĝ<ǐssuB z"I`C t 1'01\A]p NhYLEU8D+(Dш\ *(D"W/DnJ"s*D]DLNrN`S9y! ,Dʊ iv1@sC *L*Dz#brEƐsB s 97WP(D0*DdrEƐsB s 97WP(D0ά⍈I`C t 1'01\A]„KZXS(DN$!:rn.PPa%U$!:rn.PPa%]!r.2bN`cȹ@!B閴PI`C t 1'01\A]tKZRX\F$!:rn.PPa%--)"arEƐsB s 97WP(D0ݒc"W19 "c9N!9+ "TlI; iv1@sC *LB4LNrN`S9y! &[bB 7"&']d 9'0)Ĝ<ǐssuB -iqE9 .2bN`cȹ@!BI`C t 1'01\A]TKZP(DN$!:rn.PPa%-o(F.Dl#brEƐsB s 97WP(D0Ւ 09 "c9N!9+ "T0Vc"W19 "c9N!9+ "TP}1arEƐsB s 97WP(D90b$!:rn.PPA!rhI`C t 1'01\A]4HM=9 .2bN`cȹ@!BHc&']d 9'0)Ĝ<ǐssuB ,iM;9 .2bN`cȹ@!BII`C t 1'01\A]$KZUN(DN$!:rn.PPa%*'"arEƐsB s 97WP(D0ɒVS"W19 "c9N!9+ "TdI iv1@sC *LU$lDLNrN`S9y! XҺjB!r&']d 9'0)Ĝ<ǐssuB S,i]51M!r.2bN`cȹ@!B)PI`C t 1'01\A]KZLLT\F$!:rn.PPa%+&"arEƐsB s 97WP(D0VS"W19 "c9N!9+ "T`I+{ iv1@sC *LB4LNrN`S9y! &X^bB5"&']d 9'0)Ĝ<ǐssuB ,ie-9 .2bN`cȹ@!B񗴶ZI`C t 1'01\A]µ1߱PI`C t 1'01\A]B1ץ5.2bN`cȹ@!B1cKk&']d 9'0)Ĝ<ǐssuB \LNrN`S9y! "G0v1@sC *^Tw"W19 "c9N!9+ "T}I+ iv1@sC *ՕB4LNrN`S9y! F_JbB4"&']d 9'0)Ĝ<ǐssuB /iu#9 .2bN`cȹ@!B2I`C t 1'01\A]KZ_H(DN$!:rn.PPa%/$.DJ#brEƐsB s 97WP(D0 09 "c9NsaS IDAT!9+ "T{I +҈v1@sC *#"arEƐsB s 97WP(D0^I`C t 1'01\A]Kz6B!r&']d 9'0)Ĝ<ǐssuB #/iv1@sC *h#fP\F$!:rn.PPa%@9 .2bN`cȹ@!Bq"e Јv1@sC *""arEƐsB s 97WP(D0^E!r.2bN`cȹ@!Bk cEi?{ciZw> ӸHd'`` NnfT>pi.ilLcJ JŘ2>:KɌX6>;"2sHXkyEb30fasDbuB}lW$9Xh c&.08(ᬃȤ)bCa˜ 39 "Jp :9^L`)>cs\ 3QCa˜ 39 "Jp:9^L`)>cs\ !A!0g MaÜ%H KD־H s&L`1̹]`.pQ9t RxEb30fasyv "+_DlW$9Xh c&.08(G:mpas"1̙BS3}0vAD cs\ 3A!0g MaÜ%H' DVH s&L`1̹]`.pQt RxEb30fasxG% "k^DlW$9Xh c&.08(G:qpas"1̙BS3}0vAD ;ҩH Ca˜ 39 "J8ߑN 5x9^L`)>cs\  A!0g MaÜ%HNDֻH s&L`1̹]`.pQt RxEb30fasv "]DlW$9Xh c&.08(C6+Ü ,41g sn D ujas"1̙BS3}0vAD "ig86+Ü ,41g sn D v`s"1̙BS3}0vAD "yg86+Ü ,41g sn DpAd RxEb30fasu6+Ü ,41g sn Dp#>?,oY"bs"1̙BS3}0vAD :냃H Ca˜ 39 "J8בN_8r9^L`)>cs\ 3 RxEb30fastッH Ca˜ 39 "J8ӑKDָH s&L`1̹]`.pQ™t`{pas"1̙BS3}0vAD g:ҁA!0g MaÜ%HGE"+\DlW$9Xh c&.08(cs\ Ad}!0g MaÜ%HG6+Ü ,41g sn Dp#YDjH s&L`1̹]`.pQytdyX ExEb30fass#ÃH Ca˜ 39 "J8ϑ "5lW$9Xh c&.08(,G:4<,uY"bs"1̙BS3}0vAD #CH Ca˜ 39 "JpЪH s&L`1̹]`.pQtͧVxEb30fasDk>b6+Ü ,41g sn D 2jH s&L`1̹]`.pQȀces"1̙BS3}0vAD g9ҡA!0g MaÜ%HV"ZDlW$9Xh c&.08(G:6:8԰9^L`)>cs\ s RxEb30fasqcÒ5-"6+Ü ,41g sn Dp#DjH s&L`1̹]`.pQ9tlsX ExEb30fasqcH Ca˜ 39 "J8ÑN"5lW$9Xh c&.08( G:89,{Y"bs"1̙BS3}0vAD g8A!0g MaÜ%H6+Ü ,41g sn Dp#\>f9^L`)>cs\ 3 RxEb30faspH Ca˜ 39 "J8ÑKDֲH s&L`1̹]`.pQB "5lW$9Xh c&.08(HG"+YDlW$9Xh c&.08(Ratnpas"1̙BS3}0vAD ":O!0g MaÜ%8<26+Ü ,41g sn D RH s&L`1̹]`.pQHbs"1̙BS3}0vAD RxEb30fasڏtxlpas"1̙BS3}0vAD G:<6aY"bs"1̙BS3}0vAD G:58԰9^L`)>cs\ #DjH s&L`1̹]`.pQBo DVH s&L`1̹]`.pQBO "5lW$9Xh c&.08(Hu "_DlW$9Xh c&.08(HǗ6+Ü ,41g sn D}KH Ca˜ 39 "J>a%Ca˜ 39 "J>A!0g MaÜ%tаAd鋈!0g MaÜ%t RxEb30fast|gpas"1̙BS3}0vAD GZ3fY"bs"1̙BS3}0vAD GZ038԰9^L`)>cs\ #-DjH s&L`1̹]`.pQB DH s&L`1̹]`.pQťo_28԰9^L`)>cs\ JmXxEb30fasD*cCa˜ 39 "JpvlW$9Xh c&.08(w4,y9^L`)>cs\ R]YxEb30faszbcX ExEb30faszbbpas"1̙BS3}0vAD GZ118԰9^L`)>cs\ #5,w9^L`)>cs\ #XDjH s&L`1̹]`.pQBV, "5lW$9Xh c&.08(HK "]DlW$9Xh c&.08(HK6+Ü ,41g sn Dz%."6+Ü ,41g sn Dz%H Ca˜ 39 "Jh=Ғ}A!0g MaÜ%iɾAd!0g MaÜ%iɼ RxEb30fas:f^pas"1̙BS3}0vAD GZ3/oY"bs"1̙BS3}0vAD GZ.8԰9^L`)>cs\ #YDjH s&L`1̹]`.pQB֬ +DH s&L`1̹]`.pQť]3.8԰9^L`)>cs\ =9xEb30fasDZCa˜ 39 "JhD6+Ü ,41g sn D Ң@H s&L`1̹]`.pQHbs"1̙BS3}0vAD GZ4-8԰9^L`)>cs\ #-DjH s&L`1̹]`.pQBM DH s&L`1̹]`.{y/G}Wӯğ!lJ- "5lW$9Xh c&.0;?O"G";ҪeaCa˜ 39 "~O0uocs\0 r5ލO~LjVpas"1̙BS3}0v`A/O_˿ 2#-DjH s&L`1̹]`.my̼u 2#-;,k9^L`)>cs\0 rҗ\'{58̥HV6+Ü ,41g sn DF9̥HV6+Ü ,41g sn D޽"ONWo Cؔ#-[V<,j9^L`)>cs\0 ~!{W䶷c?UY}F5"s I$I$s "o>/6\\je$I$)b=7y 2AnTX ""I$I3 1>w D8AUJ$IX E8?XI$IkD|*\Dz՟$I$i "@;AM DOv*Ca˜ 39 D 2#=,g9^L`)>cs\O"s:ҺEA!0g MaÜU "w@䉃|nQpas"1̙BS3}0v`Adۅ"ODuu,"6+Ü ,41g sn fD>s9.>Cؔ#-DjH s&L`1̹]`kۯǓ]h:AA!0g MaÜ}Ǿh:AaRCa˜ 39 s ">wЧ:,Fӑ "5lW$9Xh c&.K6uMD.B8,Aӑ DH s&L`1̹]`m|}̝wP=}~\pNpas"1̙BS3}0v`A53˅$ODtsH Ca˜ 39  "W/xV4iᜰAd!0g MaÜ y~[| pMpas"1̙BS3}0v`AwBi RxEb30fasȓww D=GZ&l`Y"bs"1̙BS3}0v`AdwDnO%91A!0g MaÜY'? D=GZ9&laY"bs"1̙BS3}0v`Aɓk~MS_"Ks[H Ca˜ 39 s"aS..U-A!0g MaÜ%8G H s&L`1̹]`.pQyTp!0g MaÜ%8G H s&L`1̹]`.pQB R:%lc9^L`)>cs\ 3)>cs\ #-DjH s&L`1̹]`.pQBˑ"5lW$9Xh c&.08(HKw "3/"6+Ü ,41g sn Dq3H Ca˜ 39 "J8a;ȼ!0g MaÜ%ti RxEb30fas:vEpas"1̙BS3}0vAD GZ"lhu9^L`)>cs\ #DjH s&L`1̹]`.pQBǑ֎"5lW$9Xh c&.08(HkG- "s."6+Ü ,41g sn DqH Ca˜ 39 "Jh8 aSȌ!0g MaÜ%4i RxEb30fas..~ A!0g MaÜ%8YQgH s&L`1̹]`.pQșu!0g MaÜ%8YQgH s&L`1̹]`.pQȹu!0g MaÜ%8[Y'H s&L`1̹]`.pQBÑ[DZDlW$9Xh c&.08(H6+Ü ,41g sn DPH Ca˜ 39 "J?`sL!0g MaÜ%i| RxEb30fasꏴz>pas"1̙BS3}0vAD GZ=log9^L`)>cs\ #^DjH s&L`1̹]`.pQBV"5lW$9Xh c&.08(H׃ ","6+Ü ,41g sn DP~ぃH Ca˜ 39 "J(?`!0g MaÜ%iv RxEb30fasʏ|;pas"1̙BS3}0vAD GZlra9^b IDATL`)>cs\ #-DjH s&L`1̹]`.pQBO"5lW$9Xh c&.08(H˧m"_DlW$9Xh c&.08(R7,_DjH s&L`1̹]`.pQB R8԰9^L`)>cs\ ԝy!0g MaÜ%8̥clW$9Xh c&.08(Ad6ubs"1̙BS3}0vAD ";Ca˜ 39 "J>A!0g MaÜ%TinA̋!0g MaÜ%Til RxEb30fas~6pas"1̙BS3}0vAD G0lw9"bs"1̙BS3}0vAD Gڰ8԰9^L`)>cs\ #mX DjH s&L`1̹]`.pQB6DκH s&L`1̹]`.pQB6"5lW$9Xh c&.08(HF6+Ü ,41g sn DP| s."6+Ü ,41g sn DP| H Ca˜ 39 "J(>҆A!0g MaÜ%ifA䌋!0g MaÜ%id RxEb30fasjc2pas"1̙BS3}0vAD G1l{9"bs"1̙BS3}0vAD Gڱ8԰9^L`)>cs\ KU߭c1pas"1̙BS3}0vAD "s:SlW$9Xh c&.08(AdnUg!0g MaÜ%8̭O9^L`)>cs\ Ae0 rExEb30fasDfWtH s&L`1̹]`.pQB鑶"5lW$9Xh c&.08(H["gZDlW$9Xh c&.08(H[6+Ü ,41g sn DPz-sH Ca˜ 39 "J(=Җ`yCa˜ 39 "J(=ҖA!0g MaÜ%iZ RxEb30fas*g- "gYDlW$9Xh c&.08(H{ƂM"u&#lW$9Xh c&.08(H{6 "uWM H s&L`1̹]`.pQBD~뾚9^L`)>cs\ # :CZCa˜ 39 "J<Ҟ`9u9^L`)>cs\ #DjH s&L`1̹]`.pQBLD!}!0g MaÜ%TivAҶH s&L`1̹]`.pQB6"5lW$9Xh c&.08(Hƃ "GExEb30fas..|A!0g MaÜ%8߉=i9^L`)>cs\ sQH s&L`1̹]`.pQ9W$9Xh c&.08(AD+Ü ,41g sn D r~"a˜ 39 "J(<ҦA!0g MaÜ%i| RxEb30fasꎴk>pas"1̙BS3}0vAD uG58԰9^L`)>cs\ #DjH s&L`1̹]`.pQBݑvDN-"igs"1̙BS3}0vAD uGڵ8԰9^L`)>cs\ #ZDjH s&L`1̹]`.pQBݑvD/"aas"1̙BS3}0vAD uG58԰9^L`)>cs\ #m8[D xEb30fasʎm; rx)b?Ca˜ 39 "J(;ҶA!0g MaÜ%itA$NAH s&L`1̹]`.pQBّ [D-"OiAH s&L`1̹]`.pQBّDM" lW$9Xh c&.08(HV "CD4+Ü ,41g sn DPvmli[DlW$9Xh c&.08(R7j DjH s&L`1̹]`.pQȜDT+Ü ,41g sn D 2}H"bs"1̙BS3}0vAD "srQ-H s&L`1̹]`.pQB 7ExEb30fasD Z^L`)>cs\ # DjH s&L`1̹]`.pQBՑAg9^L`)>cs\ # DjH s&L`1̹]`.pQBՑ"5lW$9Xh c&.08(HF ҲH s&L`1̹]`.pQBՑ"5lW$9Xh c&.08(HF6+Ü ,41g sn DPu}qXDlW$9Xh c&.08(HF6+Ü ,41g sn DPu}riXDlW$9Xh c&.08(HG6+Ü ,41g sn DPtH Ca˜ 39 "J(:р9/"6+Ü ,41g sn DPtH Ca˜ 39 "J(:A!0g MaÜ%ihDCa˜ 39 "J(:A!0g MaÜ%ih RxEb30fas.. р:T/"6+Ü ,41g sn D 2'0g MaÜ%8 RH s&L`1̹]`.pQȜDT+Ü ,41g sn DP3t"5lW$9Xh c&.08(AdNExEb30fasjs4pas"1̙BS3}0vAD 5G98԰9^L`)>cs\ # H"bs"1̙BS3}0vAD 5G98԰9^L`)>cs\ # DjH s&L`1̹]`.pQB͑vAr9^L`)>cs\ # DjH s&L`1̹]`.pQB͑v"5lW$9Xh c&.08(H;G RH s&L`1̹]`.pQBɑ"5lW$9Xh c&.08(H[G RH s&L`1̹]`.pQBɑ"5lW$9Xh c&.08(H[G6+Ü ,41g sn DPr|)[DlW$9Xh c&.08(H[G6+Ü ,41g sn DPrH Ca˜ 39 "J(9р>T-"6+Ü ,41g sn DPrH Ca˜ 39 "J4MZG6+Ü ,41g sn D 2 RH s&L`1̹]`.pQȜDT+Ü ,41g sn DP1"5IH s&L`1̹]`.pQȜbH"bs"1̙BS3}0vAD "srQ-H s&L`1̹]`.pQBő"5!0g MaÜ%Tih (H s&L`1̹]`.pQBő""0g MaÜ%Tih N"bs"1̙BS3}0vAD G;8 H s&L`1̹]`.pQBő"8(+Ü ,41g sn DPq3%I-W$9Xh c&.08(H{GgJ&Z8H s&L`1̹]`.pQBő"ϔ$=Mp^L`)>cs\ #m D.U$==t^L`)>cs\ #m D.U$==t^L`)>cs\ #m D.U$==t^L`)>cs\ #m D$=-x^L`)>cs\ #m D$=-x^L`)>cs\ #m D'=|^L`)>cs\ #m D'=|^L`)>cs\ #m D'=|^L`)>cs\ #m DNzh"1̙BS3}0vAD Eh X6+Ü ,41g sn D 2'0g MaÜ"*t5|AFU$II"Jp$I$qQȜD$I$_)!=80!0g MaÜ%ih `#_U0g MaÜ%ih `#_U0g MaÜ%ih rX#1k"1̙BS3}0vAD G=82HZH s&L`1̹]`.pQv"%=V+Ü ,41g sn D0~ݣmCIĬU0g MaÜ%if rP#1k"1̙BS3}0vAD Gڼ81HZH s&L`1̹]`.pQ6O";FYa˜ 39 "J>#IĬu0g MaÜ% ibȎb:xEb30fasw1xAd@1k"1̙BS3}0vAD G:< r@1k"1̙BS3}0vAD G:< r@1k"1̙BS3}0vAD G:< cz1k"1̙BS3}0vAD Gڹ\r1=遘^L`)>cs\ # .9옞@ZH s&L`1̹]`.pQvDvMNz fW$9Xh c&.08(7h 8!0g MaÜ%8AD"1̙BS3}0vAD HZpAIIH s&L`1̹]`.pQȜDT+Ü ,41g sn D 2'0g MaÜ%8i 2i9^L`)>cs\ #m n8711k%"1̙BS3}0vAD GڶpobcJxEb30fasFm+ ǴǬ0g MaÜ%iTӒV+Ü ,41g sn D0z]S "{LKzzZ H s&L`1̹]`.pQvM/83)1k%"1̙BS3}0vAD Gڴ ϤǬ0g MaÜ%iRp^SV+Ü ,41g sn D0x=;"{MIzrZ H s&L`1̹]`.pQw85%1k-"1̙BS3}0vAD Gڳ ߄'Ǭ0g MaÜ% iLp~+Ü ,41g sn D0x-3]"MHzrZ H s&L`1̹]`.pQw9OzrZ H s&L`1̹]`.pQv;D'=9fW$9Xh c&.08(aH;V"+Ü ,41g sn D0x #.CIOYka˜ 39 "J<҆`!'Ǭ0g MaÜ%\\ #.RQH s&L`1̹]`.pQ R? rP*+Ca˜ 39 "JpjyEb30fasD48Ca˜ 39 "JpjyEb30fasD Z^L`)>cs\ #-_p9"Ԙ^L`)>cs\ #q9"Ԙ^L`)>cs\ #q9&Ԙ^L`)>cs\ #-r9&Ԙ^L`)>cs\ #-r9&Ԙ^L`)>cs\D_^K͜aSƎxALScjxEb30fasA~f3w2v~"GeV+Ü ,41g sn "鯜!lБ֮8IzbZH s&L`1̹]`.:<~¦ i:pq'Ƭ0g MaÜ悁A䩏|/g7w2t!"%+Ü ,41g sn [KgaSt8ADczxEb30fas/xˋ~!lБ8OzbZH s&L`1̹]`.0зk1w2ta"+Ü ,41g sn =o'¦ i4pIѤ'Ƭ0g MaÜA쾝Ȳ^:3w2tu"'E+Ü ,41g sn 'N|`A/;Mw-G8 xEb30fasA ԏwNK$޹CؔAn8AHZ6+Ü ,41g sn ~zcgҙC99W$9Xh c&.0T"Ndy:w 2'0g MaÜAdD2¦8̩j,"6+Ü ,41g sn z z̗ S¦8AD"1̙BS3}0vdͷw~7s/ts)#GZ5  JzZZH s&L`1̹]`.D>s?c!lȑ'8DV+Ü ,42 IDAT1g sn ?5G߽ȘCؔ#-ZNp%=-fW$9Xh c&.0 "?P}}/;M9ҢUHb֊xEb30fasAۨ~{/͇&G ¦i(pHH$i1kE"1̙BS3}0v` rmT_R[ޞ53s)#GZ3 IzZZH s&L`1̹]`.2{/WDdHk6DbIOYka˜ 39 Ae0G_*s:lL9+Ü ,41g sn q2wfs)GZ2 t:I1kM"1̙BS3}0v`h9R^2㛪nV,"A+Ü ,41g sn |_|SՇ{?Rh6eH+IOYka˜ 39 Sz̯g@Cؔ#XDN&=)fW$9Xh c&.0LD|_6Cؔ#-"DN&=)fW$9Xh c&.0LDKed6eH IOYka˜ 39 AdmaS`p ;&^L`)>cs\1'Z÷h6eHCDN%=)fW$9Xh c&.0Lx>ןv;M8SIOYka˜ 39 S?eCAss)G: w"I1kM"1̙BS3}0vi9o6Ҥ/-xp;!0g MaÜ%LD CDes"1̙BS3}0v|+" 2'0g MaÜA_p.99W$9Xh c&.0 z(~>7U(9"GCa˜ 39 A/rmrjyEb30fasA%3W fj6e<8dMzV+Ü ,41g sn !ZAJӏ=A$XғJ^L`)>cs\DJ5hA YGZ~Hʱ'h-"1̙BS3}0v )3o_ZYGZ~Hʱ'h-"1̙BS3}0v ?wv"wLaSii"9Gc0g MaÜ Ջg^A g2w2HOr9W$9Xh c&.08(a= t8遟W$9Xh c&.08(aV= t8遟W$9Xh c&.0LD>8M>ҪD'=U0g MaÜ 6s)$Ih "1̙BS3}0vAD Ih "1̙BS3}0vAD Ih "1̙BS3}0vAD CIh"1̙BS3}0vAD CIh"1̙BS3}0v 2cs\Dm6AdN"a˜ 39 "JpjyEb30fasD7_DlW$9Xh c&.08(a> 2ɾ ~^l^L`)>cs\ GZ~$.yѲyEb30fasi1"IFa˜ 39 "JzOG8L'-W$9Xh c&.0T ">|}Bs)Si$]E0g MaÜW/=Ϗ|aSi"O'FKa˜ 39 Ńo<ӯ^xߟ-¦L=D&tO+Ü ,41g sn "u|Ow[C"¦L=D&tO+Ü ,41g sn & "o^-_|.s)S0%]#0g MaÜ)LJMmaS&ia"SKgFa˜ 39 >{{|S_GWCؔGZ~T.ryEb30fasAA;\W+"s)0vbyEb30fasA~y!|7ۋ{CؔGZ~dIWh"1̙BS3}0v ?\F;#wa+aS&iA"$]S0g MaÜ Gn6ͥff6e6< 2Ne?5Z*H s&L`1̹]`."1s3}4K0w2Hqn'鲟-W$9Xh c&.0ļIW1 MwByEb30fasA2?7FUu6ev< 2nu?6Z(H s&L`1̹]`.H";[8lD܊xEb30fas [8lȜ[!0g MaÜAd-DeAdD Z^L`)>cs\0qyo!r~̦MDZp"3Ca˜ 39 oz-D_C_=C9"/Ca˜ 39 AE2ן*s"/~!Sy?~o6AdN"a˜ 39 Akb[Ǜ/!lʴ#yvAdKrD^L`)>cs\D_%s9y\#^1s/Uv=9t폎+Ü ,41g sn kf|ɓ󊙿zeCg7w2Hrs+-W$9Xh c&.0LD-2·/vcdaSiӳ^"cn%]0g MaÜ pD "UvM{9 ztώ+Ü ,41g sn "}A^;,!lʴ#zvAdЋv<^L`)>cs\0eܼ>E!Uv]8 ztώ+Ü ,41g sn & "Ofز*M;Үg}DF$]ã0g MaÜiȓ'~?`aSi۳"nxEb30fasAd珼yf6eڑ= 2y?=ZH s&L`1̹]`.(Dj6eґ= 2y?>ZH s&L`1̹]`.pQ¤#{tAd|0^L`)>cs\ IG辇ȸ~~,^L`)>cs\ IG~ȸ~~,^L`)>cs\ IG~ȸ~~,^L`)>cs\ IG~Ha˜ 39 |;_8 ݹCؔIG~Ha˜ 39 _zqK?m;¦L:G{D*x=3xEb30fasA!"uoZw9Tzf0g MaÜ)㯟CDAdN"a˜ 39 SO!"[ 2 "x=#xEb30fasA{V9AD"1̙BS3}0v ?z2 2'0g MaÜ W|9;M2>ras"1̙BS3}0v ?y刺rs)S}H Ca˜ 39 A-U|#3;MrO;DH s&L`1̹]`.H"o!f6eʑv? RxEb30fasAzyﳗW_7Cؔ)G~Ha˜ 39 bJS.*6g"1̙BS3}0v` _7Cؔ)G~H}^L`)>cs\0=DDi"FO+Ü ,41g sn &~ʌ/arO{8z~^L`)>cs\DMUiۓ^"c澞%-W$9Xh c&.0_bM9Ҧ'DC\D+Ü ,41g sn Dd6eʑ< 2`$]^L`)>cs\D}_;MpG8 p0g MaÜ ۪^|yWa6e‘v< 2=Ed"1̙BS3}0v` raS&iÃ1"C\D+Ü ,41g sn ")_}fs)(Dp"1̙BS3}0v` qqp#s)(!."a˜ 39 A^Ϡ;Mp9L a˜ 39 "Jp9Ltbq"H s&L`1̹]`.pQU/~p?Ad" H s&L`1̹]`.pQȜDT+Ü ,41g sn D 2'0g MaÜ '=mrjyEb30fas]C99W$9Xh c&.08(AdN"a˜ 39 "Jis)"9yEb30fasGZ~D"@^L`)>cs\ V? 2W$9Xh c&.08(!8La˜ 39 Ão~61׾ѣO~ӿ_/¦䏴Dx"1̙BS3}0v`hؑ~쏿z#}'&W_O+ߘ;Mis "S9xEb30fasAׯ^ܸD~>ķ[ dz}?AWJ;Mis "xEb30fasA/.v7(=vG>_~۱Ed6%9L a˜ 39 _rq`x{NG6=gBCؔ? 2{W$9Xh c&.0LD޾\"_7:=߿OG|ު_ӿgaSG~H s&L`1̹]`.4!W?W"ݻrC/ӻhf6%8 p0g MaÜ)eϾ 0)oLWǁ_yroaSG~Ȑs;]?NW$9Xh c&.0LD&ȃמ[ȋwZ Eÿ"7}!lJH{ۏpoyEd"1̙BS3}0v`  r󾪁_yۢ^{7V G~ȨsVyEb30fasA~ dg~c8m[_3?"g>ҶCDC\D6+Ü ,41g sn ;\*fwݝ\HR9= 2A+Ü ,41g sn țw^;\ȇN}}Ǿw搹Cؔ= 2A+Ü ,41g sn w~ vMD x?~as)#{l?AdؾAEd"1̙BS3}0v =\͛D^Qs }r'`שoM|Ac""m?R$I4 D'jR/ʥOAȜD\D$I$m7!xWD:9AD$I5}ȗݽu~{H'99H$IRȩ9{o,r%N"suq$I1T+)3S?^D~{" AdN"$ITcwN}ssf>soQFG~ȰHfa˜ 39 A"Wsׯ9iS!" "."a˜ 39 A;\[4Sfnz̎Cؔ= 2A+Ü ,41g snq "u{ A5ӓo6%{}O8 ;4lW$9Xh c&.0+LD^3s r~ y#6q\!lJHq a˜ 39 _xړo_DAjyO "WoCrGaSG~0 H s&L`1̹]`U2\ O}[O_}~'Ow>> iS!""."[a˜ 39 Sz?$cD/{}O8 s0g MaÜIȓ-"|<-|O}k l|da0g MaÜviȞycq"w}?ef#{j?AdW$9Xh c&.KLD^|? ._sgطzwyj>;aSG~0 H s&L`1̹]`:o}<|_|x?_>xKUJea"^L`)>cs\0cs\0 ;rܼ3>U97w<ҾDD\D+Ü ,41g sn fD[{ȓOCؔ= 2A+Ü ,41g sn D¦$ a"^L`)>cs\D<5<%/ǿߙ¦$ a0g MaÜ rÇ;67XCyCܑؔ= 2 "^L`)>cs\D~uΩ;\⯈¦䎴0a"^L`)>cs\D{׿H7=Cܑؔ= 2A+Ü ,41g sn BFN'4w;ҾgDm0g MaÜ r/~¦䎴0a"^L`)>cs\D׿"¦䎴0a"^L`)>cs\D^5KCܑؔ= 2,2lW$9Xh c&.0$Dd6%w}9 s0g MaÜ rk\?;Mi3a"D0"1̙BS3}0v >F=Cؔԑ= 2,6lW$9Xh c&.08(b.a"^L`)>cs\ 99W$9Xh c&.08(AdND\D+Ü ,41g sn D 2'0g MaÜ%8AD"1̙BS3}0vAD "sZ "z^L`)>cs\ ԑ= 2A+Ü ,41g sn D:ҾG#D9`xEb30fasRG~Ȱ "v^L`)>cs\ ̑= 2A+Ü ,41g sn D9Ҿ'cD9`xEb30fas2G~Ȱ "r^L`)>cs\D.^~  i1"D0"1̙BS3}0v 1$8lTH؏qD\D+Ü ,41g sn D9Ҿ'cD9`xEb30fas2G~0 H s&L`1̹]`.pQBH؏qD\DV+Ü ,41g sn D9Ҿ'cD9`xEb30fasAdyş6Vs)#{b?AdXjqY3H s&L`1̹]`. "6w9Ҿ'cD9`xEb30fas2G~0 H s&L`1̹]`.pQBH؏qD\DV+Ü ,41g sn D9Ҿ'cD9`xEb30fasG~Ȱ:󼋒wBZg춍H$Ab#1ڔf6`lI H2-Q%SXs9޹}_GfUd@{8\/H s&L`19]`.pQOUѼ$n"^L`)>cs\ "D,H s&L`19]`.pQJ+D\DW$9Xh c&.08H%-+Ü ,41g sN DAZka˜ 39 "j RAD0g MaÜ5h8Ҽ$n"^L`)>cs\  G` ҭ}qR^L`)>cs\  G` A+Ü ,41g sN DԠHDD\DW$9Xh c&.08AÑ='9ts0g MaÜ5h8Ҽ$n"^L`)>cs\  G` m "r"1̙BS3}0tAD G^ A+Ü ,41g sN D ~yi"f "."+Ü ,41g sN D ~yi"D0"1̙BS3}0tAD G^ A+Ü ,41g sN D ~yi" "."W+Ü ,41g sN D ~yi"D0"1̙BS3}0tAD G^ m "r}"1̙BS3}0t}ߩT¦ď4~H7 H s&L`19]`.1ushT)#M{ A+Ü ,41g sN "77OFu?Ҵ""."W+Ü ,41g sN "/|_7Cؔfq a˜ 39  o_6e{Ofq a˜ 39 }͓o[gCJkD\DW$9Xh c&.0|_T)A$~H7 H s&L`19]`.1<on"k֙6A+a˜ 39 赃Ijou RAD0g MaÜ悹 pҷV3!lH"."+Ü ,41g sN :'Ƈ]]6%|)/n"^L`)>cs\7 |u:M iK=AW$9Xh c&.0"/D3!lJHS^"za˜ 39  "_~ᗉ|yE:S¦4 A+Ü ,41g sN D^lj|}-?:M iC=A[ "rM"1̙BS3}0t`A_ؙ|Lu>҄wzH7 H s&L`19]`.Xryq"6%| n"^L`)>cs\ r"kDCؔ&cD "."W+Ü ,41g sN D~1d3/0Wu>A"D0"1̙BS3}0t`A?<ڐU)#] ҭsq^L`)>cs\?cs\9<ۇkqH6%z?D9`xEb30fasA w|G+6%z?D"."W+Ü ,41g sN "'Ƈ'ٷo# aSG=AW$9Xh c&.0DN>FK{*aSG=A[ "r-"1̙BS3}0t` r1OU^sْ.Do A+Ü ,41g sN oV_ %#]AW$9Xh c&.0"̷Ld!lJH~7p?\ H s&L`19]`.DƿU[fPխ~-Bo A+Ü ,41g sN :7~z~[ȂCJ3\H s&L`19]`.;|з|OV¦8TrѲ"1̙BS3}0t` jT)"D,H s&L`19]`.1/oY6A "."W+Ü ,41g sN kS¦%D9`xEb30fasA _&3U)#]yA"50g MaÜ2/oUvU)#]uAW$9Xh c&.0)3L"_ aSG뼕H7 H s&L`19]`.HDFCؔ.:o mAE xEb30fasty3n"^L`)>cs\ D+x=Λ9t[hqY?H s&L`19]`.Xz7"<D9`xEb30fasAl^KM?TuGۼH7 H s&L`19]`. "Ͼ}M#]mARyEb30fasA[fv A|V¦t "D0"1̙BS3}0t "\ْG|n "."ka˜ 39 AdQA+#[THz A+Ü ,41g sN ?e[kClI#]e>H7 H s&L`19]`._<ljcs\>\6esE39t[rqY5H s&L`19]`.pQJ"ZW$9Xh c&.08H%-+Ü ,41g sN DA5"."ka˜ 39 "j RAD0g MaÜ ޯi]6AU"."+a˜ 39 A䋷^_x[¦Ď=H7 H s&L`19]`.pQؑ?s a˜ 39 "j;G|"D\D+Ü ,41g sN D t s a˜ 39 "j:9ts0g MaÜO~6t7|"ݖD\DV+Ü ,41g sN "uaSBGz6n"^L`)>cs\ #] =H0g MaÜ5"oD9`xEb30fast7\"D0"1̙BS3}0tAD BGz.n"."+a˜ 39 "j:e39ts0g MaÜ52oD9`xEb30fast7L"u0g MaÜ 6zME6%t q a˜ 39 A䋷nqf:M BoyDe "."a˜ 39 "j9ҥ8ts0g MaÜ5ROyD9`xEb30fas3K=q2W$9Xh c&.08H%-+Ü ,41g sN )3j RAD0g MaÜ rݪCJW=W$9Xh c&.08H%-+Ü ,41g sN DAu"."a˜ 39 "j938ts0g MaÜ5ODe "."a˜ 39 "j9%D9`xEb30fasDt't;n"^L`)>cs\ #] A[ "6^L`)>cs\D}?MkP¦Dt't3n"^L`)>cs\DxZ}vgנ:M /v"0g MaÜ5/f"D0"1̙BS3}0tAD G HAEd]"1̙BS3}0tAD G H7 H s&L`19]`.pQ.n A+Ü ,41g sN B?eO_ɯiO)38҅_ЭDe"."a˜ 39 AU)#]AW$9Xh c&.08AH~A7r:W$9Xh c&.08AHAq a˜ 39 "j8_mD"."+a˜ 39 "j8_MD9`xEb30fasttn"^L`)>cs\9}tE?i@ursk,n -yqYH s&L`19]`.D}OY6 n A+Ü ,41g sN f"ԓu"!lH "."a˜ 39 3gf`y?aSD*9hY^L`)>cs\0o=oT)"2W$9Xh c&.0D~E:Mq eyEb30fasA䋷׏/}?}wymHurHS D9`xEb30fasA䣇 pW4MurHS D"."+a˜ 39 3̇ǿWw_&k6<D9`xEb30fasA ۻ+?aEM3!l#y@9t "^L`)>cs\>܍_25CؔGs a˜ 39 '2CؔGsvAEd"1̙BS3}0t}}#{ 5c]:M9Y(n"^L`)>cs\<-_aSi:AW$9Xh c&.04"_uf~0OYчT)V IDAT4 "xEb30fasAd|8W¦=ҴsH7 H s&L`19]`.pQG~rvAEd"1̙BS3}0t`gn3D潟D9`xEb30fasܟ23l#{?9ts0g MaÜA韫^ry!l#{?9t "R+Ü ,41g sN YGْi9AW$9Xh c&.0"w3)!#[#M|?8t "R+Ü ,41g sN f "wǫCϦBurH3!"D0"1̙BS3}0t` r |oaSi9AW$9Xh c&.0Df||~"7XdK fdCD]lqa˜ 39 Aw=e|z;<߻In駟qT¦8TrѲ"1̙BS3}0t 2~rL F9T "R+Ü ,41g sN ?hAx'@9A+I$I'DA$I$-AD "nD\D$I$ R}KrH".9\?] 0g MaÜ悙?eVwCؔsG|pvAEW$9Xh c&.0DV¦;y"D0"1̙BS3}0tAD i<n"^L`)>cs\ 4|H".""1̙BS3}0tAD i,n"^L`)>cs\ 4|H".""1̙BS3}0tAD in"^L`)>cs\3M4|H7 H s&L`19]`.HD~\!lʹ#> ƒHH s&L`19]`.=<{׿zF; cs\0g Ug3D]|qa˜ 39 3!_֯4!lʙ#yH7 H s&L`19]`.hD}~ǯ<v4Cؔ3Gz4nD\DxEb30fasA ~>@~'?h;3U)g2I"D0"1̙BS3}0t}W摯/MT¦˼'9ts0g MaÜA䣃M}r4lKU)";0g MaÜAnSeq CJ"ZW$9Xh c&.0D>9Ï du_"R¦8T "R+Ü ,41g sN ׀/ >E:Mq eyEb30fasAb>;W¦8T "R+Ü ,41g sN D^}~+6̑^cs\= >D:M>=9ts0g MaÜ悙?eCByu?x:M>=9ts0g MaÜAd?VDlH/x ҭhq9ԣ}6MϣD9`xEb30fasA 12rcfv MGz(nUyEb30fasA 913%Cؔ#yH7 H s&L`19]`.hD>4cTn^cs\0c}S}G$)W¦LE#D9`xEb30fasAdv_$r7<~֫aS" 0g MaÜݧ<|R@`E!dK>>9ts0g MaÜ9>|Rg'EU)Gz0n"^L`)>cs\0k_D?)䣣=dEd>>9t+D\D.+Ü ,41g sN "}̚a5}~<r a˜ 39 3G>U!G#yHAE䢼"1̙BS3}0t` ||; !l^cs\1 ҭzqH s&L`19]`.9<7sK_0Ou2yo"D0"1̙BS3}0t` #{ ߜ7aS&|A[ "r)^L`)>cs\0kk{6eHKD9`xEb30fasA䋷׏/}?}wymHu2y%oc"D0"1̙BS3}0t` |~_`_:M<Ғ1nȅxEb30fasA)ݗqߚ:M:Қ1n"^L`)>cs\>D:M:Ҳn"^L`)>cs\0s A+Ü ,41g sN D`H˞DcqH s&L`19]`."2D m:Һ>n"^L`)>cs\0̌~3D)34uuO}"V20g MaÜA韫^ry!lԑ>8ts0g MaÜAd{fV"G۵f {DeqIa˜ 39 7Ō|J@Ȗ RAD0g MaÜ"里P¦8T "."ټ"1̙BS3}0t` r |oaS&җ#n"^L`)>cs\0kyy|cU{-Aa˜ 39 Aw=e|z;<߻In駟qT¦8T "."ɼ"1̙BS3}0t 2~rL FMiH7 H s&L`19]`.pQ#}9?p鶢AE$W$9Xh c&.08đ9ts0g MaÜ58=nkD\DRyEb30fasLiH7 H s&L`19]`.SfxoE?y:M8nD\D2yEb30fasAdn!lđV?8ts0g MaÜ58n"^L`)>cs\ GZp m]H"H s&L`19]`.pQ#~7q a˜ 39 "j0q;"V60g MaÜ58gn"^L`)>cs\7cs\0{;7I:M?G=n"^L`)>cs\0w׆9/LS¦iH "."Y"1̙BS3}0t` #s _YgT)GZf A+Ü ,41g sN f ">CVT)GZf m}HH s&L`19]`.5!o_{uߙ:M<;O{"D0"1̙BS3}0t` G~??񗦩aSD*q^L`)>cs\0cݳǿVM3!lH%-+Ü ,41g sN f "!?8߽/aSD*^L`)>cs\><򰈬KDCJ"ZW$9Xh c&.0"w7Fu RAD0g MaÜAd7y}\R:M?=nD\D2xEb30fasA ~nu2~{"D0"1̙BS3}0tyяMlm"R¦i{Hu"." "1̙BS3}0ty}O>bd;j T)GZZ~ A+Ü ,41g sN f"ǹ߿6eH_DtqYW$9Xh c&.08V?8ts0g MaÜeF FAW$9Xh c&.0j0zՏ":,+Ü ,41g sN j0zo"D0"1̙BS3}0t}-cCؔ#~+?pAEdi^L`)>cs\>>$dl%1ry!lV?8ts0g MaÜACD?Ed "k#[V?8t[ "0H s&L`19]`.hDG5}nj-iH_D9`xEb30fasAKD|@}-iH_DxqYW$9Xh c&.0DpyC&~&o6eHD9`xEb30fasASw~ko?EiCH7 H s&L`19]`.5g#nD\Da˜ 39 s~80<ƚ~Nu RAoDa˜ 39 ϟ97٢D*9t+Ü ,41g sN :~G_MiCJ"/]0g MaÜA T)"D^a˜ 39 3߽Nu RA,H s&L`19]`.1|S 2Cؔ#~#?r a˜ 39 v?]b6eHȏD}qYW$9Xh c&.0"!_YT¦iH7 H s&L`19]`.hD}v+S¦iH"."K0g MaÜA䋷^!O~aSƎAW$9Xh c&.0D^+S¦i H"." 0g MaÜ悙2 4vG"D0"1̙BS3}0tCU?3DƎA "."0g MaÜ悹?vן24vG"D0"1̙BS3}0t}o5Cؔ#~qv ""1̙BS3}0t` 7kDCؔ#~q a˜ 39 s?x Hu2rկ="ݮbqYW$9Xh c&.0D;VT)#GZ8 A+Ü ,41g sN /޺9kM?:M9n1,+Ü ,41g sN D`H{D9`xEb30fasixH7 H s&L`19]`.pQ#~qv%H?H s&L`19]`.pQ#~q a˜ 39 ȳ\ n:n2t0g MaÜy?vT)"D`c"1̙BS3}0tAD D*9)hxEb30fas8Tr9RЀm0g MaÜ5p ra˜ 39 "j0g>nW40g MaÜ5>W>n"^L`)>cs\6G{&^æRu2|կ}"ݮiqa˜ 39 -ϾzaSQAW$9Xh c&.0߾9G˅Cؔ#~svUHH s&L`19]`."?{xPu2|o}"D0"1̙BS3}0t :|v:,"!lV9ts0g MaÜ 򻁯y+6eHD] "+Ü ,41g sN bȳo?n _zWf4Q:M>'>n"^L`)>cs\DaGVM3!lV?9tAEd>H s&L`19]`. "֏'?xY]T)GZ" A+Ü ,41g sN Bo/+6eH_D] "2W$9Xh c&.0_FV:M>>n"^L`)>cs\D>Z<!l؊ IDATV?9tAEd.H s&L`19]`. " Z/Lu2x"D0"1̙BS3}0t 2~pP:M<n7a˜ 39 =gW!lV8ts0g MaÜ !3||q<nW8a˜ 39 =دKuqCGX> A+Ü ,41g sN DAȸ:\=H s&L`19]`.pQJ"Jp"1̙BS3}0tAD D*9L(õ0g MaÜ5p 2+Ü ,41g sN DAȔB\9H s&L`19]`.pQ#~ p a˜ 39 "j0x/"ݮtqia˜ 39 "j0x"D0"1̙BS3}0tAD !|A۵"."ͼ"1̙BS3}0tAD |AW$9Xh c&.08БV?9tAEW$9Xh c&.08БV9ts0g MaÜ5:W!n;40g MaÜ5:G!n"^L`)>cs\Dn 믿:M:G!nW<0g MaÜ悆Aj:M:7!n"^L`)>cs\ CGZ> &^L`)>cs\ CGZ> A+Ü ,41g sN D`HD9`xEb30fas iHD\DZxEb30fasAdyw"Uu2t/C"D0"1̙BS3}0t 2\6eH_D] "+Ü ,41g sN D`HD9`xEb30fas>"תD] "a˜ 39 "j RA$W+Ü ,41g sN D``~qv탈HW$9Xh c&.08H%r\!H s&L`19]`.pQJ"AE>^L`)>cs\ "Dq}"1̙BS3}0tAD {A"."A^L`)>cs\ GZ= A+Ü ,41g sN D`H߾GDm`q0g MaÜ58n"^L`)>cs\ GZ= A+Ü ,41g sN ?|Ϳ_O?C:M8n[D\DB"1̙BS3}0tvÿ}Og&?6eH_GD9`xEb30fasA /ogo)?E iHM "."^L`)>cs\P9!o'OG!lV?|8ts0g MaÜAc>׈T)GZ= mHW$9Xh c&.0"y!_n_?:M8wn"^L`)>cs\P7v7?FCؔ#~s鶕AE,H s&L`19]`(D~sg_wnדP!!lV?z9ts0g MaÜvA/O׏ߌ~oF"R¦iH "."xEb30faD 24gL|?>JD iH7 H s&L`19]`DƏı:=71nD\D0g MaÜfAd`n?DsV[!!lͭ_~s a˜ 39 |Ȩ`UvT?y9t RI$IנjМ1#~HH&J"ͪ"I$ \ r$C?A$H%vUM$I~Uwnjc:"D*9j$I[ 2"Cv{:dr 2CUU$I$U"25"U= AD$I5"}o?hQGZ= m[?zwW$9Xh c&.0j}ٴْN{AW$9Xh c&.0\Oÿ}i?%ikH ".""1̙BS3}0t?9E:M99 n"^L`)>cs\P5 ~\_w2U)'GZ= mk(H s&L`19]`.(Dncrel_aSN{AW$9Xh c&.0T "_O4|7d@urro"670g MaÜ悪AdhI|ȭ6H'D9`xEb30fasA 2swU¦iSH "."#"1̙BS3}0tlɇd}}aSN{AW$9Xh c&.0 "s:"w{HLJܪaSN{Aa^L`)>cs\P6gfoDݱ|慄P¦iCH7 H s&L`19]`.D~{tw{'L%Cؔ#~pAEdW$9Xh c&.0 "w^_ٛS_:gT)'GZ= A+Ü ,41g sN 8ߞ;9S¦i;HM"."C"1̙BS3}0tUpyEdU/&qvr"D0"1̙BS3}0tQr9yiZGZ= mH s&L`19]`(D?~njȅzG)n"^L`)>cs$Q;<_~7 F"Df"1̙BS3}0tAzW¦8TrRW0g MaÜ5p ҧ7ka˜ 39 "j RAO]o+Ü ,41g sN DAH⬔W$9Xh c&.08VpO9ts0g MaÜ58>)nD\DxEb30fasiH ".""1̙BS3}0tAD }{AW$9Xh c&.08VoO9t "r+Ü ,41g sN DHD9`xEb30fasiHM".""1̙BS3}0tAD q;AW$9Xh c&.08ёV?n8t "+Ü ,41g sN DH߶D9`xEb30fasivH".""1̙BS3}0tAD i;AW$9Xh c&.08ёV?m8t "+Ü ,41g sN DH_D9`xEb30fasivH".""1̙BS3}0tAD a;AW$9Xh c&.08ёV?l8t "r+Ü ,41g sN DHߵD9`xEb30faszwnAEd+Ü ,41g sN DAb ;&^L`)>cs\ "DSX0g MaÜ58DCD9`xEb30fas8TrYPeV+Ü ,41g sN DAȂ*k^L`)>cs\ GZ ҍ1g-s&L`19]`.pQ#~q a˜ 39 "jpxO!" 9Xh c&.08Vh8ts0g MaÜ58<nAE{Ü ,41g sN DHCD9`xEb30fasivH7 "a˜ 39 "jpx!"8~0g MaÜ58<n"^L`)>cs\ GZ 4H s&L`19]`.pQ#~r a˜ 39 "jppՏA"H|0g MaÜ588 n"^L`)>cs\ GZ 5H s&L`19]`.pQ#~r a˜ 39 "jppOA"Xz0g MaÜ588 n"^L`)>cs\ GZ 6H s&L`19]`.pQ[wP A+Ü ,41g sN DAHJ0g MaÜ5p R"1̙BS3}0tAD D*9$)T%H s&L`19]`.pQJ"IJ;U+Ü ,41g sN D`~sDW$9Xh c&.08V?b9ts0g MaÜ5?G0nAxEb30fasivH7 H s&L`19]`.pQ#~sFDW$9Xh c&.08V?a9ts0g MaÜ5?'0nAxEb30fasi vH7 H s&L`19]`.pQ#~sDW$9Xh c&.08V?`9ts0g MaÜ5?0nAxEb30fasiuH7 \D"1̙BS3}0tAD :AW$9Xh c&.08V?_9t"E+Ü ,41g sN D`H#D9`xEb30fasiuH7 [D"1̙BS3}0tAD :AW$9Xh c&.08ޑV]G8t"E+Ü ,41g sN D`H#D9`xEb30fasܺV?]G8t#"E+Ü ,41g sN DA7첼"1̙BS3}0tAD D*9\@q.+Ü ,41g sN DA%W좼"1̙BS3}0tAD D*9\Bq.+Ü ,41g sN DAEw쒼"1̙BS3}0tAD :AW$9Xh c&.08ޑV?[G8t"E+Ü ,41g sN DHcD9`xEb30fas<iuH7 YD"1̙BS3}0tAD :Af0g MaÜ5x<'n""s&L`19]`.pQ#~q YD"1̙BS3}0tAD :AƜ ,41g sN DH_cD9-e0g MaÜ5x<n" 1g MaÜ5x<n"/UW""1̙BS3}0tAD :AKUH s&L`19]`.pQ#~q rka˜ 39 "jx1"Dvv^L`)>cs\ GZZ Adla˜ 39 "jpՏQ"DT-W$9Xh c&.08ÑVUG9tsSݶ|^L`)>cs\ GZV A^uyEb30fas"%D.n"1̙BS3}0tAD D*9\Zu߲yEb30fas8Tre0g MaÜ5p rqՅKa˜ 39 "j RA+Ü ,41g sN DAU7.W$9Xh c&.08ÑVSG9tsW]T^L`)>cs\ GZL Ad_uRyEb30fas<i3uH7՝a˜ 39 "jpկQ"DTw.W$9Xh c&.08V?R9ts9T]D^L`)>cs\ GZF APuyEb30fasiuH7#խa˜ 39 "jpOq"DU.W$9Xh c&.08VP9ts9V]4^L`)>cs\ GZB ADuxEb30fasiuH7սa˜ 39 "jpq"DNU/W$9Xh c&.08VO9ts9U]$^L`)>cs\ GZ> Ad@urxEb30fasitH7a˜ 39 "jpq"DTW/W$9Xh c&.08VN9tsTݽ ^L`)>cs\ GZ8 AdPu2xEb30fasZ AdXuxEb30fas8Tr)U]^L`)>cs\ "DjUoy^L`)>cs\ "DjUoy^L`)>cs\ "DUoq^L`)>cs\ "DUoi^L`)>cs\ wGZ. AdTua˜ 39 "jpw "DUpa^L`)>cs\ wGZ, Ad\ua˜ 39 ]b IDAT"jpw "D&T7pY^L`)>cs\ wGZ* AdBua˜ 39 "jpwկ "DTWpQ^L`)>cs\ wGZ( AdJua˜ 39 "j;7n";$H s&L`19]`.pQݑV?I8tsV]yEb30fas쎴E:Aȴ.+Ü ,41g sN D`w/)"DΨnr"1̙BS3}0tAD vGZ A.+Ü ,41g sN D`w)"DΩb"1̙BS3}0tAD vGZ A.+Ü ,41g sN D`w)"DΪR"1̙BS3}0tAD vGZ A.+Ü ,41g sN D`w)"DΫ.B"1̙BS3}0tAD vGZ A$0g MaÜ5U A$0g MaÜ5p "U\W$9Xh c&.08H%5"1̙BS3}0tAD D*9Iua˜ 39 "j RAdU˸H s&L`19]`.pQJ"R]xEb30fasܞhCtH760g MaÜ5=w$n"Qua˜ 39 "jp{I"D+Ü ,41g sN DDD9UW$9Xh c&.08VB'9ts.d'H s&L`19]`.pQ~Nr W]N^L`)>cs\ 'Z AAu#xEb30fasܞhtH7Սa˜ 39 "jp{oI"DZTWW$9Xh c&.08~Ns Ҥ="1̙BS3}0tAD ^htH7&՝a˜ 39 "j@D9.eH s&L`19]`.pQZ AQu+0g MaÜ5xyi"DUr>H s&L`19]`.pQZ AUu-g0g MaÜ5xyկi"DZUr6H s&L`19]`.pQZ AYu/0g MaÜ5xyՏi"DUs&H s&L`19]`.pQ[կ)"DUs&H s&L`19]`.pQJ"Uy"1̙BS3}0tAD D*9Xu5g0g MaÜ5p b՜+Ü ,41g sN DAȚUwsH s&L`19]`.pQJ"kV9"1̙BS3}0tAD :s3xEb30fascs\ D 9]u?[yEb30fascs\ D 9_uCxEb30fascs\ D 9\ꎶ0g MaÜ5X AOuIxEb30fas8Tr %ma˜ 39 "j RA:T4+Ü ,41g sN DAuniW$9Xh c&.08H%+Q]0H s&L`19]`.pQJ"עQ^L`)>cs\ 7SD9tiW$9Xh c&.08H%Q] H s&L`19]`.pQJ"ף1^L`)>cs\ "DGuSc"1̙BS3}0t` RATWU$IY"j RATWU$IY"jRcs\ "DJuY"1̙BS3}0tAD D*9\궞a˜ 39 "j RATcs\ "DNua0g MaÜ5p ru {W$9Xh c&.08H%Si^L`)>cs\ "DPue'yEb30fas8TrBՕa˜ 39 "j RAUwvW$9Xh c&.08A<n"H s&L`19]`.pQy"DU] ^L`)>cs\ oD9,"1̙BS3}0tAD 9tsYXukyEb30fasܪ~`Nq ڎ0g MaÜ5p r{;+Ü ,41g sN DAȵH s&L`19]`.pQJ"W#"1̙BS3}0tAD D*9\0g MaÜ5p r;+Ü ,41g sN DԠYyH7 a˜ 39 "jPH s&L`19]`.pQXS2AH0g MaÜU$#D9䪮H s&L`19]`.pQXK2AH?0g MaÜܪ~PNq "1̙BS3}0tADa"D6"1̙BS3}0tADa"D"1̙BS3}0tADa"D6w"1̙BS3}0tADa"D6w"1̙BS3}0tADa"D;^L`)>cs\ GdH7 W$9Xh c&.08( A|+Ü ,41g sN DU q ru~+Ü ,41g sN DU q ru~+Ü ,41g sNw/YfוcF5H<ǡ,T PI8 X@(Ahg4vX m!scvرb&=K!u=wy.PP5,Q,S\<޾D t 1'01ܮP(D~@(D)Ddz[A@sC uBcBdBJھD t 1'01ܮP(D~?(D)Dez[9@sC uBcBdBjGC t 1'01ܮP(D~=(D)Dfz1@sC uBcBdBzGC t 1'01ܮP(D~<(D)Dgx1@sC uBcBdBfGC t 1'01ܮP(D(~:(D)Dit1@sC uBcBdB&gC t 1'01ܮP(D(~8(D)Djr1@sC uBcBdBgC t 1'01ܮP(D(~7(D)Dln1@sC uBO7wQ,S\L;"c9N!9En I iGd 9'0)Ĝ<ǐsB]M!2I!WS3툌!:rnW "}x)D&)Dvkh1@sC uB6$n ʹ#2bN`cȹ].PPݦٯvDƐsB s 9+ 5 e #3툌!:rnW "L?""FC t 1'01ܮP(D~0)D)DFL #2bN`cȹ].PP3^,R,SjGd 9'0)Ĝ<ǐsB]fXYrvDƐsB s 9+ j_E e !jGd 9'0)Ĝ<ǐsB]fXYrvDƐsB s 9+ jE e 1jGd 9'0)Ĝ<ǐsB]fXYsvDƐsB s 9+ jߊE e 9WjGd 9'0)Ĝ<ǐsB]fXYtݡvDƐsB s 9+ j_E e IWjGd 9'0)Ĝ<ǐsB]fXYtաvDƐsB s 9+ J߉U e QלjGd 9'0)Ĝ<ǐsB]dXYuũvDƐsB s 9+ J_U e aכjGd 9'0)Ĝ<ǐsB]dXYvvDƐsB s 9+ JU e iWjGd 9'0)Ĝ<ǐsB]ku e qךjGd 9'0)Ĝ<ǐsB]qvDƐsB s 9+ Ji I WjGd 9'0)Ĝ<ǐsB]qvDƐsB s 9+ Ji I WjGd 9'0)Ĝ<ǐsB]IrvDƐsB s 9+ JU e pvDƐsB s 9+ *_e e pvDƐsB s 9+ *e e ?֎rN`S9y!v@!B۰L!L!r+C t 1'01ܮP(D~)D)DnEX;"c9N!9O22kGd 9'0)Ĝ<ǐsB]beXYc툌!:rnW "TL? ""7w1@sC uBwaBdB#2bN`cȹ].PP1.,S,SܒֱvDƐsB s 9+ *e e 9֎rN`S9y!v@!BL!L!rS:C t 1'01ܮP(D(~)D)DnK\;"c9N!9o:2ȍkGd 9'0)Ĝ<ǐsB]`IXY1}s툌!:rnW "L? ""m1@sC uBaBdBt͵#2bN`cȹ].PP0 S,SܞvDƐsB s 9+  >ܙ|Y=Ms툌!:rnW "|z)D&)Dr̵#2bN`cȹ].PPY 2׎rN`S9y!v@!BWBdB$X\;"c9N!9^e I ds툌!:rnW "|z)D&)D5̵#2bN`cȹ].PpS e 0؎rN`S9y!v@!qO(D)DnC t 1'01ܮP(D8n%x2ȭ`;"c9N!9M?OYYlGd 9'0)Ĝ<ǐsB]w ""ƒ툌!:rnW "7 ێrN`S9y!v@!ϼ(D)DvDƐsB s 9+ xzsgWY٪gC t 1'01ܮP(D8K!2I!NmGd 9#a IDAT'0)Ĝ<ǐsB]ሇ.$G8ێrN`S9y!v@![ I 9m1@sC uB#>""sl;"c9N!9{RLRpI툌!:rnW "nw"2Ȧ2܎rN`S9y!v@!w'R,Sl 툌!:rnW "nyw"2ՇC t 1'01ܮP(DxD e #2bN`cȹ].P݉""[WnGd 9'0)Ĝ<ǐsB]uӏ)D)D6:܎rN`S9y!v@!v'R,Sl_q1@sC uBM?NYپp;"c9N!9~ڝH!L!vDƐsB s 9+ ^7;BdBdJ툌!:rnW "naw"2.TC t 1'01ܮP(DxT e }(L#2bN`cȹ].PWݩ"";q|1@sC uBWM?NYً툌!:rnW "jMw*2nnGd 9'0)Ĝ<ǐsB]UOS)D)Dt;"c9N!9~ѝJ!L!GC t 1'01ܮP(DxT e y}1@sC uBW=~^3Q,Sɫ툌!:rnW "J!2I!^nGd 9'0)Ĝ<ǐsB]UW I t;"c9N!9zRLRpC t 1'01ܮP(Dx͓BdB8ގrN`S9y!v@!k""vDƐsB s 9+ ^3;BdBdw^oGd 9'0)Ĝ<ǐsB]5/)D)DvDƐsB s 9+ ^3;BdBd#2bN`cȹ].Pw""{tp1@sC uBL?NY٥C툌!:rnW "fw22>oGd 9'0)Ĝ<ǐsB]0]Ͼ~翫G߾}/aۦq'S,SvDƐsB s 9+~W[D޽U\d e z6ގrN`S9y!v`>_+uL?NY٭툌!:rnW & '}۷MO*Dew:2n=oGd 9'0)Ĝ<ǐsB]0X7OR\t e z2ߎrN`S9y!v`G~s߈w(Dbv:2Ȏ=oGd 9'0)Ĝ<ǐsB]0W{=!MDZ>B*oS,S٣vDƐsB s 9+sȷO~Fy[DF!rӯ)D)Dv|;"c9N!9ꂱBwGE/|B*oS,SۃvDƐsB s 9+cwjw/~Z_{5o)D)Dv|;"c9N!9ꂱBwI|% ~N!L!wC t 1'01ܮPL"wƓk:^ͻO~'MNYٻ툌!:rnW Cǡ*DK͝7\BdBd>ͷ#2bN`cȹ].*DdԻy9uB䊞=""\wDƐsB s 9+ 9"_B?=u?mg($2}-*D;}(D3J!2I!L-p"+J!2I!L.0o91"WႦOwSyF!r5O3(D)DBL/0M!˦_lgP,S>^` gϖMΠYIsB s 9+7U\3Da˦lgP,SpDƐsB s 9+-3Le3(D)Dr8"S9'rnW "lv2HGd9'sC uT!rB>Wt?U\c e $ rN`I!9BI"7ev2HGdWa;O 1'01ܮPL"wwM?U\S e ,vbN`cȹ].*DJ"2R;BdB$#20'rnW Cw[w"W3R;BdB$9 yR9y!v`ه}_29""i yR9y!v`5}LΡY3}UΓB s 9+cnh*L`I!9BO6"?QU!2nv2HNvbN`cȹ].,D~@!2͝?NY 5}UΓB s 9+ȟ~wɤCWa;O 1'01ܮP"_Cw q^L I zM9q&sC ut!o::`RLRl̡0'rnW "{I!2I!BC&vbN`cȹ].P%$Oz yR9y!v@!KfgQ,SD>vh*L`I!9/~E!L!mء0'rnW "dev2HsvbN`cȹ].Pwy""O yR9y!v@! eQ,S>z Wa;O 1'01ܮP(D8l!v&2LD\0'rnW "vqΕ_d'Q,SI% yR9y!v@!aF I &LE\0'rnW "viaY:Wa;O 1'01ܮP(D8H!2I!ŒÈevbN`cȹ].PpBdB!ӧ\ <)Ĝ<ǐsB]"$S# yR9y!v@!As)D)Dxn@b0'rnW "4:BdBO$ yR9y!v@!Aϯs)D)D8dHb0'rnW "4:BdB$ yR9y!v@!Aӏs)D)D8lP|vbN`cȹ].Pp\ e ^0}*q6Wa;O 1'01ܮP(D8hu.2/>80'rnW "4:BdBMLUΓB s 9+ ~wK!L!+O&*L`I!9M?ΥY5Gp&sC uBC]gS,SÉ3 yR9y!v@!!o)D)Dx\ <)Ĝ<ǐsB]'""1}8UΓB s 9+ ~T-P,S` Wa;O 1'01ܮP(DxnM@!L!ŠӋC\ <)Ĝ<ǐsB]'2K/p&sC uBgT+"">xUΓB s 9+ y^VS,ShWa;O 1'01ܮP(Dx凎BdB>xUΓB s 9+ ya+0p&sC uBg^~(D&)D،C\ <)Ĝ<ǐsB]ᙗ_9 I cWa;O 1'01ܮP(DxGBdB >xUΓB s 9+ ~IP,Sp_ yR9y!v@!3 e .d$3Wa;O 1'01ܮP(DxfB!L!¥Le|*L`I!9LV(D)D鳌\ <)Ĝ<ǐsB]723}qUΓB s 9+ ~B-Q,SpAw\ <)Ĝ<ǐsB]25}{V;O 1'01ܮP(DxjD!L!eMhط2yR9y!v@!Sϧ% e .lHþΓB s 9+ ~=-Q,SpigvbN`cȹ].PiBdB> yR9y!v@!So% e .oTK*L`I!9OM?(D)Dh0}s&sC uB_NK"">آ yR9y!v@!Ϧ5 e zLm\ <)Ĝ<ǐsB]W2M϶`vbN`cȹ].PiBdB.Ӈ[.Wa;O 1'01ܮP(DxbʹF!L!B-0'rnW "Lq\ <)Ĝ<ǐsB]2 I vb*L`I!9M?)D)Dh7}q&sC uBǦ_J""\AUΓB s 9+ ~'-R,Sp ']Wa;O 1'01ܮP(DxlH!L!ULuQ\ <)Ĝ<ǐsB]W"21}%q&sC uBǦI""\iUΓB s 9+ ~"-R,Sp5] Wa;O 1'01ܮP(DxlH!L!Lw)\ <)Ĝ<ǐsB]*2W4}p&sC uBGG""\UΓB s 9+ ~R,Sp]Ӈ^Wa;O 1'01ܮP(DxdmJ!L!•Mz\ <)Ĝ<ǐsB]*26}ퟫ0'rnW "<22ZYs&sC uBGE"" >vUΓB s 9+ ~-S,S0b5Wa;O 1'01ܮP(DxhML!L!Œo\ <)Ĝ<ǐsB]'22Cs&sC uBYL*L`I!9_L?}.@!L!M>wUΓB s 9+ ~\BdB3}00'rnW "|12fd>Wa;O 1'01ܮP(Dbs e nٸuvbN`cȹ].P""ܠq\ <)Ĝ<ǐsB]'%(D)DI㦹 yR9y!v@!gKP,Sp-s&sC uBϦ;YVM*L`I!99BdB5}Fn0'rnW "|v9Sr\ <)Ĝ<ǐsB]$hD*L`I!9" I 79\ <)Ĝ<ǐsB]KD!2I!L*L`I!9" I 7}Xn0'rnW "|2ƹ2oWa;O 1'01ܮP(Dds e `Wa;O 1'01ܮ*yg IDATP(Dds e 6aWa;O 1'01ܮP(Dd}s e 6bWa;O 1'01ܮP(Ddus e bWa;O 1'01ܮP(Dhms e c Wa;O 1'01ܮP(Dhes e 6dWa;O 1'01ܮP(Dhas e 6eWa;O 1'01ܮP(DhYs e eWa;O 1'01ܮP(DhUs e f}vbN`cȹ].P2""ly\ <)Ĝ<ǐsB]'e(D)D؞ֹ yR9y!v@!½ͅ(D)Dآ yR9y!v@!½̅(D)Dؤ# yR9y!v@!½̅(D)DبC yR9y!v@!½̅(D)DتSv yR9y!v@!½̅(D)Dجcf yR9y!v@!½̅(D)DذF yR9y!v@!½c͝2[6}&Wa;O 1'01ܮP(DWzk(D&)D`,EvbN`cȹ].PpPLR@iz\ <)Ĝ<ǐsB]N yR9y!v@!3C!2I!Eq&sC uB;WBdBʦ*L`I!9w_/Yaԛ*L`I!9w/Ya'O*L`I!9w.Ya/*L`I!9L?\.F!L!~LUΓB s 9+ >~\BdB=>[o0'rnW "|0h22}Wa;O 1'01ܮP(D`r1 e vfxvbN`cȹ].Pb"";UΓB s 9+ >~\BdB>a yR9y!v@!ϕQ,SGg,Wa;O 1'01ܮP(D`r1 e i*L`I!9QYaAvbN`cȹ].P{6(D`I;UΓB s 9+ ~>k yR9y!v@!"۠eӇ Wa;O 1'01ܮP(DBd"pWa;O 1'01ܮP(DBd"p Wa;O 1'01ܮP(DBd"p'չ yR9y!v@! }ț;e e o̽6Wa;O 1'01ܮP(DPlB.fԽ.Wa;O 1'01ܮP(DPlB.gؽ*Wa;O 1'01ܮP(DPlB.i"Wa;O 1'01ܮP(DPlB.kWa;O 1'01ܮPA!2I!6}0I! S,Se`BF(D "?ϑKR,Sg n*L`I!9 P@S0'rnW "(D6B!m^vbN`cȹ].P4>; yR9y!v@!Bd#"j(*L`I!9 P@ø0'rnW "(D6B!vbN`cȹ].P0(2٦OvbN`cȹ].P0(2s&sC uBE)D)D`T4Wa;O 1'01ܮP(D~z\BdBֈ yR9y!v@!""o;D\ <)Ĝ<ǐsB]aqQ e 3}4_0'rnW "L:.J!L!M*L`I!9H'e)D)DR\ <)Ĝ<ǐsB]7,2|1}@_0'rnW "Ny2<4}D_0'rnW "Ny(D&)D`!UΓB s 9+ x ""0c^*L`I!9HS I 2}N/r&sC uB$I$̙> yR9y!v@!'BdB&M \ <)Ĝ<ǐsB]I7Ƹ02`>0'rnW "_YMgr&sC uB$""#,vbN`cȹ].P~\\BdB^5}jUΓB s 9+ pOKS,S릏ӹ yR9y!v@!neqi e 8j>0'rnW "ᦟY$vbN`cȹ].P~T\BdBJO yR9y!v@!mEqq e (>\ <)Ĝ<ǐsB]682MEvbN`cȹ].Pd~M\BdBN0} yR9y!v@!m-qq e 81^*L`I!9H)D)DDQvbN`cȹ].Pd~G\BdBN6}*L`I!9HG)D)D ӧ\ <)Ĝ<ǐsB]6<2e<0'rnW "ѦY3M/r&sC uB$""p3vbN`cȹ].PD;usq) e X0}*L`I!9HW}$l0'rnW "N""gSvbN`cȹ].P$;yWLRfL yR9y!v@!_!2I!2}?*L`I!9H7|$l 0'rnW "_ ""p)Ӈ'vbN`cȹ].P~+tP,SL\ <)Ĝ<ǐsB] 6RY>o`I!9HgB2\Yoߊ`I!9HGB2\a*L`I!9H'B2t<] <)Ĝ<ǐsB]5] <)Ĝ<ǐsB]Iu΅_Y 罫0'rnW """k}罫0'rnW """wM罫0'rnW "Z""-罫0'rnW "[""w&sC uB$yBdBB\w&sC uB$E!L!.{ yR9y!v@!jzY[pUΓB s 9+ Pӻ~2܈vbN`cȹ].Pd(D)Df\w&sC uB$F!L!'0'rnW "W6 e -'0'rnW "6 e 9'0'rnW "6 e AK'0'rnW "W> e MvbN`cȹ].PD(D)Dfy yR9y!v@!izY[vΑ*L`I!9H鍾BdBnG0'rnW "> e }vbN`cȹ].PD(D)D`N8] <)Ĝ<ǐsB]I47R,SVT|Wa;O 1'01ܮP(DM/""!3UΓB s 9+ D|#2l3UΓB s 9+ D{#2lͱ3UΓB s 9+ Dgoo\p82lЫg0'rnW "_""+^>] <)Ĝ<ǐsB] t^¡*L`I!9Hm$pԡCUΓB s 9+ @o I ١*L`I!9Hl$Pw&sC uB$J!L!w&sC uB$J!L!{w&sC uB$J!L!;rw yR9y!v@!gz?oY}oesC uB$nK!L!* `I!9HżBdBhꡛ'rnW "q^ e ةۇVvbN`cȹ].Pę^{)D)D`/ yR9y!v@!fzoY=bI!9HeBdBn'rnW "if e H0}qivbN`cȹ].P^›)D)D }%yR9y!v@!fzY wcI!9H廛BdBLK\'rnW "an e 4}7ΓB s 9+ 0Kw72XbI!9H}BdBrMQΓB s 9+ ,v;2d8'rnW "Y7w.r7Q,S@髊3yR9y!v@!euIVLR3}_q";O 1'01ܮP(D. I દ,NaI!9HX!2I!\E'rnW "QV7c$0bΓB s 9+ $[BdB3}q'rnW "I+P,SM_aΓB s 9+ $(D)DM_cΓB s 9+ $ӛ(D)DWM_edI!9H%""QyR9y!v@!dz2P1}'rnW "AkP,SU_yR9y!v@!dzs2p雍<)Ĝ<ǐsB]15_BdB8=r9y!v@!czc 2p;O 1'01ܮP(DrLWY4}EsC uB$| e `]ΓB s 9+ ;u(D)De7^ ;O 1'01ܮP(DbLסY.b cI!9H:""L_|I<)Ĝ<ǐsB]I1_BdB/'rnW ")W+Q,S6}FsC uB$>|% e -wvbN`cȹ].P^D!L! wΓB s 9+ Yeܹ[=""@ q<)Ĝ<ǐsB] q-V!2I!;vbN`cȹ].Pvfj;O 1'01ܮP(DB\fwULR{4}?'rnW ".*D&)Dݚ%ΓB s 9+ X""M_fI!9H-j""r<)Ĝ<ǐsB]0^BdB6}gn'rnW "wQ,S7a;O 1'01ܮP(D"LףYnvbN`cȹ].PDhG!L!ܘ;t<)Ĝ<ǐsB]I0^BdBE7魳sC uB${E e fM_7ΓB s 9+ )D)D[7}";O 1'01ܮP(DLoפY6bv)vbN`cȹ].P^YI!L!l{+<)Ĝ<ǐsB]ٿe""L_7ΓB s 9+ SJ!L!lm;ΓB s 9+ ݛPK!L!l;ΓB s 9+ ݛLK!L!{evbN`cȹ].PFze e `?sC uBd聯+S,S;3}_'rnW ";7^BdBء۸'rnW ";7^BdBر['rnW "vE͝ ?zQ e `o˳sC uBd.A*D&)D.l;O 1'01ܮP(DvқBdBm}vbN`cȹ].PڥWF$@K{'rnW "v]Q!2I!p wY<)Ĝ<ǐsB]ٳ/ I kObI!9ȞM""M5vbN`cȹ].P9A!L!p'rnW ";6;NP,SbnAC s 9+ G(D)DJyr9y!v@!_2@UYΓC s 9+ ݚ^g(D)DN6}{@sC uBd e s];O1'01ܮP(Djz)Yk;O1'01ܮP(Djz Yk;O1'01ܮP(DvjzYhzGPLR3ﶣrt{ ?o=wT '0ϰ#<T!rBNjH''0ϰ#<)D8~g avW aaG 4y+B!B'3y1аhweNg ;ba?3\B8a?3숁0pES1_W aaG 4y+-D{"實<Îh W4Uo߾?{Yj8a?3숁0pESȡ:PqW aaG 4y+*D&W~i_M3y1аhǂ}( 6洯<Îh W4VˣD>䴯<Îh W4ṼFxg`Nj8a?3숁0pEsݷx?}=i|0ϰ#<"O~7|4q>~g a ȃߢɷuȟ=8~FB,D~OY!Wԍ"z͗㫿z0 ׾nӟg?@!WM"W(D8  B(D8  B(D8  Bo߾_w`/Տ믧( c74loч Ƞ?쫿p_?k7nrCÆ+PPg'4l?~ {nhش)Dx҇}L<?pf톆O]pu  |v@F1vC&3tSPpn}uN?pw>[g*&=~-o?ʂe:)D8`!`>o/DO%pEW??n/e؞/W?՟?gMA&??ӧ=pC3(D`k'بυȗo9 6?W fn "gTO9 J [I#߿sCC7-3kMX]"O_hئ|C>,w)D8Dž||'lη?m"?ۻɶpC;Ǽ; v[ ݡ !_[ۏA'8㓿e3B`!nZRp̡ ??fzlӻC?2C`7E)D8#\ئC:>C3Rp6o[f`o=qWh؜}_~6\B~>r|' umzeg?1"퓅 ؄wp [to74lճo7p(D8B~r>ݏWpߐ/ؤ__hب'u7@3};?՟߼ɰY?ݟ_ #~>'lԁ (6՛M!BŻ I|lԻg5 J,\_~\ؚ?u+lӉ+}lՓ}p jxm^O&?}M!B_On6<q}~+Rq"@G!Qq"@G!Qq"@G!Qq"@G!Qq"@G!Qq"@G!Qq"@G!Qq"˛?e {m"8܆Af4EF l1bFpZYY~J̢\};oyS=-QRe6"[I̽(`J}ȗBJeDF)AD-#D-#D-#D-#D-#D-#D-#D|8+bɧ 2W_ fH D젨<DoU~l}8$K6>nԊAS5&_߮"d "@̮bp$Ta?%뤌Q/z~e7pH ī\ Ҵs "W}Ycf/%Vp<9֚EDܱt7Ad ,A2XTԎHcpR.׆eH Dk؍a;jZ\ʧ>WO:A$tH Ī\Q+2"2ZA Ee+8}4G:+:fdU R$vN=^!ro;o"@ R厙rHy˰V7)DHGtA؀ DۀbqAq dH D`V ;H"5Zz+|HXL^D )DH pf8;h ~H DAKgG` R$ "3w"a@ RAdYxǧ)DHȰY4 8H"dL󖙫y<{{Q{km R$G/Fb1s||D "@f;c;_4p*|n6)DHesȏbQn|wA E=3͒1ZΣ|")DX-_Q.YDuw"A ErLe >iAD "@+>ui{=HzY?H5wgx,ZGOj]㪵E&xH j'ޢXAdM狳âX5z4_n<H"6*oAdMZ^)ӛkB "@̆TDM;cj~;tL "Ai;X6b痳![djmevH Aۗfm%K=vN@jKD8$H"7>]$}3 Ҏ&q;??4GwvxRCqf6]2ΧwR#D"@v ;AȎ dG#D"@v ;AȎ dG#D"@v ;AȎ dG#D"@v ;AȎ dG#D"@v ;AȎ dG#D"@v ;AȎ dG?mNãIENDB`bayestestR/man/figures/unnamed-chunk-10-1.png0000644000176200001440000022343413607554753020464 0ustar liggesusersPNG  IHDR .PLTE:f:::f:f!!!!!!!333::f:::::::f:ff:f:f:::MMMMMnMMMnnMnMnMMMMMMMMMff:f::f:ff:ff:ffffffffffnMMnMnnMnnnnnnnnnuuuuuuuuuuMMnMnnnȎ:f:ffffېnMnȫff::f۶ې۶ȎMȎnȫnȫȫې:ېf۶f۶۶۶ې۶n䫎ȎȫcKKKscKcKKssscKcKsssKcssscssfȎې۶޵ef_ pHYsE4E4Ƶ IDATxA$u.Xz.E=Bb 0,!= F[a/(``l4z,YUFGϢw***3Y=UE՗;yp02ok `t"(DQFG!B0: `t"(DQFG!B0: `t"(DQFG!B0: `t"(DQFG!B0: `t"(DQFG!B0: `t"(DQFG!~ 6vڷ_!rc҇wк צM!Uܞ!llBy!rM#S|y!b*"7cUأj*#{"_\*ص=߁L O ž~cUرY2(λ@V,Wj HlBdzbfvDf wۭY:2b*Nld~PcUv*D72;3c*.||a*" ~9cUv)DBJTgsEUۡzjZC!2=pCcUءZ}6f6j՝wܶ/D*G1VmBd{,_w6[ĝwԶ.Df'cGL u!r{u1'wRۺY1R@/l[TMNUu] m Y籖@b["S1.ז"ƪymY\܅q] HՍUڮ]1VkBHSƪYmUzXU k+22b*6ȬXcvcU)Df#U=h6H"WTWUrڢT]?/u:hĝw(Dn1MUr_l1λ@J ٴM1Vȭ~!rIyyHv!296J1VȨv!rXy^ySg~p+?_[o-Df#Uk =cU<[X|q O+$Rqvλ 9Ӧ'k?/)D mN|mǫbaH5 YQVwޭ~H倐RrTYn*=:qu_)ﮙ#2m;^x_o!IBd6&PU?5 ~Ǜg @l8wt^!2z/߱~wyiqGJ~|Wg3U7f:RuލUTo{DVn^1dVy+T*Df#UX)"q ū~yi ЩU),)w(f@. ~3Qے[/c*$))y!?,)"f@2 RPTd"%gf7]V:7[@(HIuu!r4Xz};)>Vl9;?]yl` HIm d6G,Nn=.(DJ ]"+ FD!]P4Q}IOx翞$/XQܳPI.iwޚӪ.%">Ź_~k:4.%{"UuR@'d= !KMG7߸栰/[KˬC;A.9ڧ_~ՏHA.9KoMƩZp6Q@+d u q5N֜R@ Yz|jd8|R@Y5G{\X^ypōtW0CeUz ͑@Yj\Ѧ >J8zy# h]V)0LUFі<^l"Y{-"G+O&TWS6B k/8ZZK]s[Eſ^_USe P`y 7XSnLOԼ5ŝ-z7oQe Ph:!?W7lqtrx[ߜ> 2,j:\RzehSs7XgB,Ke/BzB! R,j*,Xn P{V!%?ʝtBϋMNJM wޜVܢwB kWWN^ǥBdS1w.J hB k6ک)D vj hB k6ک)D vj hB k6ک)D vj hB k6ک)D vj hB,K"ddY'Ԥ!#?&5Y@ 5)DȲOIM!BF%BMj hB k6ک)D vj hB k6ک)D vj hB k6ک)D vj hB k6ک)D vj hB k!#?&5Y@ 5)DȲOIM!BF%BMj 2,jRSe PB k6ک)D vj hB k6ک)D vj hB k6ک)D vj hB k6ک)D vj hB k6ک)DȲOIM!BF%BMj 2,jRSe PB,K"ddY'Ԥ6ک)D vj hB k6ک)D vj hB k6ک)D vj hB k6ک)D vj hB k6ک)D vj 2,jRSe PB,K"ddY'Ԥ!#?&5Y@ 5)D vj hB k6ک)D vj hB k6ک)D vj hB k6ک)D vj hB k6ک)D vj hB,K"ddY'Ԥ!#?&5Y@ 5)DȲOIM!BF%BMj hB k6ک)D vj hB k6ک)D vj hB k6ک)D vj hB k6ک)D vj hB k!#?&5Y@ 5)DȲOIM!BF%BMj 2,jRSe PB k6ک)D vj hB ka ANM!UՄECvj M^ kAٶ Q@kd"0;!:hBa6D)͓SS@5RDavj &4GNM!|fک)DZjC"Y;5UmN%k> щ~d"C!*ؕB,˵CT"#&5Ykt_D`'BMj 2,W CT"=&5Y!*ؒPB,jușzDIM!BFe.d"cjRS@?D s? Y;5Bt >SS@D?ONM!Ew+D,B=ֈh 7Y;5]z@fvj H-(d"XtQGgiک)D 讣 SS@V=?*HINM!IEۉ !Y;5pl+|dj_󇇇֟j>g\{S/]Df]ˇ^_ys W~_B6v@.v[%Wi O% H'CGʋ{~ /=^rB!D{ Y{ryaCG_rJ!DW Y{џ/W~_~GㅷN/~owg4 #$dwU+&eƏ7=;~)"d4e]f4#Sj({<~}K =*PK%)Dh2hJ2Ԭs}(6tqb"ȭ_Bƺ,k&EofyinGq/Wy|Ǘ!.Yџ&_YbGf7ѝ/<~9 2粌n0@х n[<;jls"d4e]_ #Xc 5`{\83Y:W"CtyъB%;7nUXo%"Btuі%gAսKս^R!D-h ]RSl]ܚ,K*D Ң]џ.K{MDo2S%bBE7m| ]R=cBltlV/tDA.n/$wEfK*D 貢2K(D>dC!}]Ut%s% "E2̶/\{D5YdBd҇K*D 薢Kџ5tL.ٻyC%"ItGѱ:%kYդ{^/͋.(:@dn{2BD?s莬]rO{qyBDw1?u茬]r^|~h2N^R!a(џ;tE.޸Q;_D!a{@=tC.uZ=d:N;6"#@'d풢o/Iwٝ>%(D Dt%-eUE[VS% 2.$EоaU8 2g҇\y_wz  YF DкA[7;=oQd |YFyDЪᆚM{|wc2dnڍ_B,[L h`CΦR_>}cva̖/9G! r6Ego~ns+Ҽ[Gˏ:?$%WR@lhO@!g>n\\Slzɕ"Э"oRs񍛝6t,l("[n*D6J TtQw-*Dy8љ;/ϕ/Bd2dc!%WR@hE`!rppᑴGgӪq("kF /B:]<@B {6љx NtXW-.Dr DE;и H3"Е!?~rqtB:7@W o_rtf#t$m|}NF Ft4)8kţ38:3G!n"{gq"P@{98Gg"Ѕ蚡G*hLx9D e@Sdlj\ttF!z&ڋ3 h]t;_4#[DFMD!BFZBEgЄd\x6MRѰetK_4 SY"2%2ރ3 2Բz*k 5hC<(DhH2X/#|tRg>IB,k`_ BWo/!_7D!BFZѭBEwPsCc~wߧD#)DMѝBE}ج=*frTٛ6-" hQt{_ #0kb2=*Sg`o0'NǨ̹qܬaS@k˄A`w!YxiQs_\&Kov\xLqfF!4)Iv}֞9GeOpdhPt0 _%&Ƣx@[!.`'2S G[c)D°DBdQ~T'ãVD7C} otH!.' "(D EBu7{K!-.)[-ک)DyPEY;54/8/}>;f˗Fz h\tm0`_-l#.3][/.>R^ h\tk0d-l!c!R<wQ@;avȺGBz,.Hj 2 /2"" 2n ;z"LU)xYFw-@-.yS_^Ҥ0TD!BN=^UHDPG'B,шnB :yg)DA5DQGY}3"Р`Tlؤ]LR݂g"М`dnؠ}c>{"М`loX]{˗]DZg#1(D)EB$54%oVSD7cJ "Ќ^`yX%{VԡfD# vj hDt)0j_>TSS@+B^ vj h@tFd"Ѐ6 ک)D`]W, kSO,E7, kYZM7-`"z!@B$5kYFD/(Q!^-9ы)DRSQet= IDATs")DȨO2,z=9Hj 2Ѳ,^0ӏP3Z 2Ѳ,^0ՏP3Z GtB Y;5#:S)zYY;5!:BSvj Ctg' "jk")D`gљuW(DrS΢#?kE/H;fHE!lB4Y&o%*:Q`²_zT: QtاEe/_/r7O;Sn>D/F-&kt,?7FBv'z0f!Y$?:}PNs>uEF,"kqi,~. ))D`'1ڢ { G>']D|X=; s3E!2 Ra9":㳍XdIqk( n;H|(D`+qE/!ƣ}{~fHՉKSVK!ۈ4 z0g}v1{m:^ Ft ѫړ <9≙/__HFL![N4#z0Y{:G~2CJCEFL![4$z!0YI19Lc9iNZ` "|1~L'. Y3iPb`BFi1:rDՂBj4*z90|1YGfAn@!E'x gSB +:Ӵew~_3?k{9i\`Re_ )Q@MD/*MNM!5Egw4Y;5iG`d"dqYFwZd2%\ѱD/-+WaB.N{+԰@!BFetfMѫ JjX!|2:Ӫ@~}joƽ<"dnYFvZPw-蝥͢:^b RLX(D ,:Ӿ5d/.mC"g"QtV ѫ ulD2`xk5lHB`p:}BB6t&z00g "~ӕ>e C:^l KY{:AQ.E6}Ñ"^tB[ !>k_-?G X/:ӱt'#DB֊t.z0go8uwI!kEsBĉz"Nt6'B`( #UQɜ!G!kDsD/<A!BV儉^z BmwG!ErD=},?%ɉv1U>RQeɉ轀Ps,L=!Xyh+ 5n[Br,8N%@EbC35(D(Ų$蹐PS^XY !jFv! 2:C*߂BG&QOUzZWrU8ID/Dz,궻zJ!c8iD/EK!B*Ep^B$5Td+Hj I%z9S "P!:L")DBt& @/ک)D`Yt&E @ک)D`Yt&U @ک)D`It&u @ک)D`It& @ک)D`Qt& @ک)D`Qt& @ک)D`At& @ک)D`At& @DgOS߻|kg?fIF!eс(=?~r+ /.|wBʢ6EP#.kI!rKdF!%i(߾t\8M"S 2 \aYPP>~ࠢZt1*)DH-di?1'")-M?DM'$|Ae!rosD(DH!Brމ5Džo'O""d,c6:蝈Psc/q2-2)DN|f"d! @遙BKSMDV ړ "?-"'M9s^^}֞LKV3n4sJ!3^d}8Bdrƙ̉N BdEYB P B!"S Q γv1B|BHE!RQ(D`&:YO:ڋHE!RRDt.y)DRS@!:V[K")D @V bBD!љ>^$q(DLtע/9en;g )DTt0)uo,"o+1Skbfgf 19xwBN!/z PY{dEd)FzF!' H( kOvLTu&|c/x?Uv_M!H' kOLB'GjByyxqJW"wE'i!zMD֞l9xL9~{RnYQccBxYFh% @?Dd&9·ޛ/D!DϷ3^.PN(D ٚBࡢO}`N-f*}#R!>C"U2:D3 ]-Zz!&k7"U yWn+?{?|J]|餐BVcB"UW>gcԙI1k@&ȏ7?s6u)^y 2tYF'he @De/_[Y`8;gQMOlNR!r6SuѬ"d:Z@\h&{ĹT{D6l暉#gRa3CH#2kG> |C'ۏ`=W"Gǐ,R0v^d?R~/ݜ:Qi+yBы P YS;CvFDܨ;T"TU0vљA^ k⼐vx*Bd .:93Ld 69}x_3U"\tnf6 %3@6x֤YL?{ߪ=_U!¸Ef+zmO.>6"_ B:l?f -:43`ыpvIuQoJ#FyW^|_,S0^ёA^DgwUO ٯ),YzF!s3_t0sgލx ݝ*D|e!Rو(D\t^fW8ڟ~Pg2i9 ?~iAR 8%@ۗ*3Th9{5- 1 ˌ@" RY{mi9&Ib3s'?[jn,DS8WޚN'iU0Fѩ^t]y7mҀ\y~ú9Ο8+trx[:9W!EbF,zС [>nNUq))Ul,D^V#@w:ړ.DnNUNW:ܲ}!R5Za1LgH)_\ .DwtTIݪ9 }ȋc+t)"]k=mp5.ɝ7>\~D%!" mߊ?xx\ʱS!rrr?Ka|0hDA!BщqPat0B$5c}%>Hj &:T@")D 3-Sad#0̉hB$5#a^@")D eWmR!etm,sP!֖et%-sP!##B Hj 2jkYF'_J Hj 2R0*/uRٖBB"Kk ")D D_B!BR@")D D_4O!BV@")D D_"4M!BшNI5@ú_}:xc)D E_%4KNM!XDG]!2Qvj F":B=W SS01jThBqP[@cd"Ctƅ"ka.l#z!vj F!:V/!ka-l'vj  :¶ ka -l-vj F :/'k!et]4vENM!BF.\ i B"ޓSSB4uBNM!BF.H {h* B"0e@Y;5Cga?WSS0tq} 3Y;5faoSS0pY}Y;5d;SS0l9}% Y;5c!ї;SS0h!}1=Y;5a9W[SS0d }A7x[ ,:B(P^|T;DD!DR}( k/LQ̌B :BgLsZqTGf`3)ĉ(.D.(`* @t"P B Ge"2R00ybE_ ,D6qpNu2< 2}YFQ otH!BF Yӗ#; )D.<3 ;P2:B_({RB nd"d4~M Y;5,S(dU ǿ{߬"տR^ $:B"ї#9 BANI@Bd#FC!pDO%@!BሎM5 0v  =!`"LU)=!`:]P¥I `BR 0fd綻' #:vBRї&uoP<;KN!PDNH+խ'fN)D E_cQ.&nH3 ":qBf'Huolׇ\|BZ 0N]esU/\nG*=7!K`GbjQ0 iFH!BAΚї)(DRS0I :Hj !:hB/D_cP|=+DPѶ2:fBOwPANM!BF hG{W-d"d岌Θ-^,SSBZ @B[kyvj 2R@ڼx#k%M5 0d?u雥7>|"^tjFgk7>|"^tlA!BK.((DRSwѹ)Hj z.:UB_E_#IM!BEJ`")DL } B~Ng/ک)D@ } B^Nw0ک)D0 } B>N01pک)D }! B02@ک)D }1 Bΐ0W3 ک)D C}= QYO"dToYFH..m k;O!BFet|Hj 2R@N.oQ!#Hj 2,# Q7W8{‚K+hքi![x=e{& cYf4!z7CX"Fh/cXk` nX[Rs<~2*3d~>7SUY]9}NB 59@a~&B 5.adZ?aFD5.aFC5e>`Og IDATa̢B5e.aܢA5EaFA5E.a@5%.a ʧΚ@E0 ѿSkgM BDtҩ&De"LD:@_|[Ng~l2 D(QtSPj퟾'~l2 D(QtP4H"(F u(@$kJ]!”DK 5%.aZJ%ɚ@E01ѿdM BCy2&!GM_gPkgM B"PkgM B"ZPkgM B"PkgM B"qPkgM B"QPkgM BB_~&D5!LWo?@Ik|kK"j,(JdGo9|6x2$$@h+PZt." rU 9m Uk?CnR"D m TkzbU4IdN BA"Ц1qR"9 D(U#5(CH_-IP$9(BD27q?[TU33qU Y mJPk򩿼{߆/\w;ۥ@ D8M@2kQ"^煥D~|GM r."hkyU  i-r6|=zE._XS3#ռ] Dȑ@J\dlZE5;Cӯw5iǡU!U@4sWlm'tīޥ@DM@njUDfOM9]4x᡻g,qvL*8| bgޔND~DTcWTaۛ\,K*6Y gŜU1k?kgaq?մQw5P%IL]?֍ǟȹ;\*f:s\p @]@aB | PklysU{?٫{c7xKwlGxjzȯޱ@qB l PkWi}yt-1#ؚ2ADU'ȅ@d]jlǻ7 vY;)ȃ@d0{EouyvSϟvY(ShȚ̍ +~>_"9l@BD֤.yVqU]V"dN  m;2 Y)ҋ,fz$ҏvY(WhO&S'yDjv"dL % m>©פ8*krU e mAפӋ7#٥@DtM@,D@@$'1mFD=Bh;GF ͪAo!(ThE&:ttT}ыfD r."J ڮ +~E$wM^s."J ڰ *[!7rmwk{=&3@] DT26-C /̧n_r"@] DȔ@+q頻?>x>WϞyeXREk9fsl-t"qm^3|3xiߟ=jaUL޸7ݭ Dإ@< D`B _kWW3gTG6an s W?Em'#v)!O`[\$sj2?+ns7~xNI"GR BjGh0kj ȫ^#Kj/|aZqyt ҸZ$ me,wm+ >^{] gy2- @f3Fv@\6?] }zZBR#.6@݊)" kO<7.yI &S<Y\&HH~S'!/ PH}izT,%W̜ "־z\CL9#!+ y@!LP[B Vp&%HT{O HD!9iC"0m@7j?\YGlZ"D ,F]ȧ~U$EJ>"d1GȪ~?؃B$㩵&!y@&&M8Z;k!JQkgM B>"@Bhp v"dE") mΚ@lDmk|kK"M" mYk?[έ < Dȅ@!}8L\o:Z hD`B[(DϦnwA#6Q?y̪W$2'!k)քR/CNDQeH B"@f `o!F3ϟD$E B DB*}E?Z^/s}3LE\53#! Ж `O~y3xDuW"`35B*g|<|՛>8qH B"@k>Dj_O+O2H>jTDc@ k*xEݺMO@ D6Ym _kWy/{ mZ־62559y@ mZ֮]?.@x@?}cMdfXDD BC"Ц YP 3hN (h$ɚ@h!`Mh$h H; D#_Eefע5D,z"!@8Jh 2;!!^y@Hm _k7^3f}h&C B0pV V@4skBC`%DvED" M""#7x= f !UFxD Z"DP";ӎ/ :.=m1dM °"@BE`"Y0#ГЦEU[!bQ mqv䒻 Dұy@OhMqGjcʖ@! D6 _k?sᚙ361eK €C"@BH`tgKO&\ed2" H d/F&֞]3S{IL"a8!w$0.3U^WR+fN D@(BhK I@]]s+?ܳc@tDA6xDժ7'} +D@(Ghs EH=,4_8{oY>fz" %49-40!jjIBE\3@DQm Qk_ >qL]4 m Qk[s;jб^0M50DJ%cvBk`ڳ 7}pg@ B=f_K0r5п@dڪ@@n Db}֞u7u3Y 63"t}v /}-"!pbי&vo8/ίI ""'/'dM ѺC"{)6ٕjͳo- 2` rr~";~<8O "J D{s3kZNfɮOjWM6ڳΚ@#D 1aκǾ2v PVSDFr>~vȄjRk侻pmw.|! D8N/yH]kd,Vپdj }~pg$#' ]_2ȤtQk\JnqWHd/}H BHD.ZL(us cN$yr@ 2u@ 9ncwD0pDA IDAT"i-@dW?KV$WMe#5k ""sDG 8+[}?C BHD@*E+ibBI:u?' ,_Bgj~7U9n}᡻gv#Ҽd@_~ԓ_n|+gޥx$DC'jh DfouKkC"eTҷ 7T.*x1[^tM@# DZ;$yVu_~rWy}K?X2<p"#M'/wl DfDXh~Pko]rr~Q_#8[SҬ"A Gn忯W/>8/ۭ!2?+fRWn?Ad@$}{"kl7\܊?f3>꧈4oqH@ DZ;R"⨫=}yu6 ˠn2͘?R[S&n+7>v6f7]O?.\E!#p<>9y>5#'{% ?87ٴ=$^e4GM0YDjG]KT&^[@! Dj3DW;/[<QY@dM5e5wyN^ DRV=^ 2R&j*p|yߕ4_]"J?«M2_gVvPke\1ã*`y@RT61^fty_5DW,*X6>GgK35~[פ'\ޱH-_SU ¡BHk{"s/<Ӓ\]4u_?/mU[|.E"*xbxGu9F?YA Bd F,ؚqmǍw[lXS>+7_$_Y|LU+! kw}|A֍~Wf~O6$] "C"ޮ_3oj5cW bٚ/wv3ޙ_D8LL"im@d_k2mߋ_4s~=ʦa5O,uGy>MiuRDDmo(tzE~Vɽ6dn@duVzxˣ.vzd 5|<+@:. 2h xkQQkI+ibȖuXDDP"im@d{"=uc2Gq7%1DB?M,y'D(þ_Sj5xc}w@$DDϹJ. D}[mmW7y:Gv"<&D(ž_Tj5}"תw}LJ< IP BzC"3Ijo,ӏNRƛnCNmE"Ky&7䬬LR{, Dhe/*>W\{OgHbV;o@$YI.2Z.5 5'trGv|˛7~+vvP}@kDN>~W_qo@$y:EkD7]vz}l˄<7&lcs3[Dx%;;D(ľ_Tj5"u[mo&!$_un7ij鬌+wVhQ<_Ʒe(o69 ekP{ҔSk*yCo\bVxGb/q DōwW翲Z>K77~^>\X"1ג0|>n[َĤ"ao!C+ W N$V5Wi\<~3#yxk쓓}v@bTߓZ{͵X y5\G +X|3Ere\F_gro̓ D{X"R}OM*kɻ=t3kjno:BVGuEb,#-gˣm|mt'o̯iy![ D8@=I6@Ok3;voqF B?b{t]Qt!|mP1KX^:DRwy[﷞k항"{J]#؟Z{Mz1:!n@ 躢"M%u;7WhT嬋[N0YEY@Vֿkfw\b-XC^ $uED3 Hd, q; Z؜ Rթ  Dk|3@ֿR\KFmHGwsA ^ ?vأlWٚ RjR4~vjl[v_rq M[.٘B2{p"#31\{]W@h~݇V /'hfnf"Ң?kj7\ܚ1QGEF֧Kbzt]Q>y}| 2_9i7Ic;<'T:"m,"Ҳ!?sꨵ7Œp ݦ-Y'J!"V#躢}"1F["Új+Ky[,Y+"Ҿ!?oyvqˮ4m1­>۠{=hVIt]Q>=+͆'v#i5f@dE B+6*ϸ᡻/4Ohb6…;K}q]y"sJu(uED[Z[o~??<ÚСy ""Em7'DheXr–\ޚ2n +DCIGKաD#i_7i5BQCb "[@Vm#Onkl"M[$oDhǘX, {jo5@v_BJS& RJ 8.ҖO 3:f{$-NNj|gwy'ʓwV^xKzbՙpgH I B+ֹ8Κ@6@O rf,˛s-bMjhG!sJ=9n"y= D[ >#弊~!ѣ;_tS)6O8?_D DrzZ'4m0yj DhE-iJDhO؝^uHuEz D~K%-t\F~^5\;wbiJDZ!";-UߓdZ;ku9z>VuHuEz D&|2s~;z$ Gk[W2;7-yov'Z'=5@Z;kt=x>JuLuE D~WEy{_8Hq4﬿qHSOJvz D^x~ϛ?Vצ~v"4a|ꠢ N]/,yҐ/~>vtB{*YHuTu@qhգ*ȖZ;kvu|갢 ӬGV)v"yoaEY Kj Dإ㊮+3di@vY T]Wg=&aU]W'a j D7䰹Ȣ ĴQdB55RZt]P=&fաE lڣ1Κ@:FձE mۣz2Κ@1c&E oܣ3Κ@qcf_Eɡq Z;kBeѺ_01j DH / D:G~&k᩵&!%z,X&\TRkgM BJ@Y бLQ] H5 уd@riDus0v"l Pat]PL[Κ@m#+m Y%zlKuu@qn:<Z;k6Ew1(Nm|TRkgM ¦1N1F)PkgM †nAF)06j DX=nPet]P2Z3Q= tO5kGMÌ+SH3_cj DX=nRft]PBQ=!tH5G㌮+SJ;&7&aEȷYuu@qi7Euv"64(N1 }BTGSkgM R􈷅H ҧEupv",Dvۨ5(NAM}Κ@nաF)%è&a.zJuu@qJjw&j DDp۩6(NQ}Κ@mKF)o!}&a&z`nLt,#Q(Κ@ w'YX бl[.EPG5EC+S`RY=tKuu@qJlաv"=lOuu@qlիv"=dOuu@qlձZ;k:8eG]`F5ETUvt]PBDp"ɜ@dڢG{;(N~gzZM5ȴEOVwt]PRNELZ;kI:86q(v"S=*=@uu@qm0Aj D&,z@zУ !`jYLW` ձG)IT &Ac+Srߟ.PkgM 2YуT]WƿoQ]1Κ@d>(N٭cM5DE;U~t]P›ADZ;kt:87)v"=c^U]Wg }r쨵&X+3NJ-j D&z ֳ, L`)j v"#=[uu@q&M*j0Z;kqz:8S'5(D 9ȨDWht]PtԉLZ;k1j :8)-j0Qj DF$z5T L Ԩ&0+3΂֢F-Κ@d,GT6(δz 5z?v"#=Juu@q&]pQ &Tf_FiAF3cΚ@dGNtlZ;k11uO б-jt0j DF z@e0Q@Y/z@Fg0@Y/zh @i0A@yY.zD4괣 L۠WQ&)[HhpyGřjFA%PkgM RP̣ L`(Q!&)X'Buu@q&u0a@Y+z:8S;5<ȌZ;kbEtbT']WgҝqIPkgM RN L Zp Z;kBElT]Wgy6QkgM RL+3D&)Q8&P Dу Y(z B\԰ _j D=^U}u@qt" IDAT!j|v">8z5Z;kDOUCt]PwO5HQ#"(EΚ@ ÐTEt]P] 58Z;kbD>P}u@qt&MԈ `/j D =EqDѝ0nQ#4j D=kx< L?ajFjuY!zt18@Ǧڡ0MQ#6-j D = t&/jpJ5H"pgrĩ&]!@cU`)jDLZ;kED:6njD PkgM QB~&(E PkgM A&(ڊΚ@$[уF@v"KQ}\u@qt7'0v"9 R}bu@q8P(v"ݢTYt]P}+j t@5H0է]WGc5hΚ@$TtZꃋ+QMY9KT}tu@qt=0YQC]Z;k0}c/(5 PkgM #kx E8&3QSΚ@'-XUot]PZ Qk侻pmw^D7cV}u@qtQMFMrj_4n.O Dݎ\)GKUG0Njo.DxO@IGS$`,6‹}rF ҙz;( Q5Sko/&t4n.+ND7Q}u@qtX݉:(Z{ӏ?/@SUqf9EzG-n;.I/+ib] D8Țtz&[K] Y#f@ߴ3@ߴ30n4qkDD&7 7 [@ F |7igig`%5с@D0bo@ !Poo@ E2gM;M;D6TG[n.!s@ߴ3@ߴ3@dCzƮqw 3"ki@}} /}4xƣO-%dvvlJo۸@}}M=#LfM;M;D6ͦn_rqz[Kț7 7 rqk>lE@b]B igigȖYV2y;۸@}}-\x?i6GfM;M;DUqŭw}}[Kș7 7 I|a`.ohڢ">^{@i/$Iؘб1#4lp")߹^'S-Z@$'}mwVv Dvn@"D#&G L@09`r"D#&G L@O .v=>`._H=g p`"@{߹Ret D@J ۚ_!rF/80Y>$`\N|+zŚK ߟPN]K!<$1v1 T]|㇇ 2[C@?Kme80ej TÇ0.\useS74l"?\?[ <&`dT5U.(lb p`"@+F? ЙfEt1Im>8"[S״*@^wl"80mk?85Uͯ7D p`"@?bC#TWU?I"80mYy9E:dMU{g7I*1-Vr5@ tt_"1-&V*^,TʍQǺ\!ьL@h!=20^4[SuqC ?'d5~ D#Usvª[3hdD/]˿esC$-80qxmd ":1-/V s?>V@$Z0^zw\{g .DMO+ 5"@ @٬``@hx҇. I t@˖U:ⶻ&5 ^ D%o~;G0uL@h!o+ьL@h!X[D$8FW_T=ЕD b'H ܔ v Б180mմג AE БT b&ژd}JEWݙ]s{A ЁT b&Z_s]tcv)cIR&M 2;ܓ""'8P21IT}ϳy~tf6s­>kwL: D p`"@;ֻ>w㇇@fp{>HyՏ} t" D._bvKfʆ@WҁL@h[؁Nm73+5L@hmO(|t(K D p`"@{ߩLzuëeɝ7oNm b%>Mv-x~WOk~M;t>1#&G L@09`r"D#&G L@09`r"D#&G L@09`r"D#&G L@09`r"D#&G L@09`r"D#IL}5ntq26hAk313 qހbp])qE_ugUwɪʪtwUˎxKDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD$-蹿&@} ",rQҫ7<ǍWZDF{/5fPv YAdxco`w/Κ "{&f'޸;OLl#UO)}E3@D?; z6#v9^v ]AE3;5~#Wv~o;^Dj"  +Aq ڃRw=U"+f4 ⦻@~:AH r[Dn!#W{u&sF2k ?^D H/YW&Dj(9Gtri&tŗoG+ԎW?z<}-2i"mxӓ2#X~z?_;xȚnD.vϞr=ʞ0;fuYDڎ y8Gd)"WK65+,O*@j "νp>7Xqqu."0sr;!ZI;%{}Dq Ş 2l|%{ג[/cOUHFQB:oAf=U AdF 6d&SDf9٭YOT{?}_WPȌq5HO_-Ӻ19AdFy(?odkl.Y̿Y`AdFn!rIGzL6"3ʃ.yWRi)-"AQG)z %w_/yHJ^$Pʬ= rk?|GmZhY{AdC^_?t5,hF6gDPg; 0k1<,qͨ_6`֞mwセx̪h}cD fwv "Ok)@3۩Y*YՈ 0k(?Jy0N•5Gd֞Q}j"3&۩M!ABg]c~kkŽHw {@ SUz!w8fYeۅY7ȸ\(N<}\߈ BF%}Y gz\]s1Ley%ϖ}s6Y@j\]8Eҫa Tm:} BF%}9w7G-K]q[EY3/oDȲP3g|Ə&{Y~HqcrEoߢw ")Y@jMvH}5'sT8dz/>cY@j_0_ꭙSF,>z些W 0k/B9c6\DJ^ZDfsgtOw&7YD|igV!4¬]+Lnt,9 {?ye-z"v?y(u<ʿ,1DVl!2wŨώ/R4 M0k&@ک "vj4 M0k&@ک "vj4 M0k&@ک "vj4 M0k&e t&5A,K 5 "ddYgIM!#>CMjY@jRDȲP M0k&@ک "vj4 M0k&@ک "vj4 M0k&@ک "vj4 M0k&@ک "vj4 M0k&@ک "ddYgIM!#>CMjY@jRDȲP BF%}2,3Ԥ&@ک "vj4 M0k&@ک "vj4 M0k&@ک "vj4 M0k&@ک "vj4 M0k&@ک "vjY@jRDȲP BF%}2,3Ԥ&e t&5A,K 5 "vj4 M0k&@ک "vj4 M0k&@ک "vj4 M0k&@ک "vj4 M0k&@ک "vj4 BF%}2,3Ԥ&e t&5A,K 5 "ddYgIM!#>CMj4 M0k&@ک "vj4 M0k&@ک "vj4 M0k&@ک "vj4 M0k&@ک "vj4 M0k&e t&5A,K 5 "ddYgIM!#>CMjY@jRDȲP M0k&@ک "vj4 M0k&@ک "oD{a0k&@O]ZdNMY_BthY;5Azc@L IDATک "]gNM+"P'vjt\5DSDÚ!¬ ]l D`WQfg "EmQvD]6s& "yfr "5MDzҥ3k "ddY.SC$B`vjYI6Dz%ά eY.6Aĥ3+ "ddYN!'C璈KgDȲ\]AfE6 bϟk".)%e9/:,@~aCg]: EǏrѿ.r־\{ EwU7Z}\tArnkDz Y{q;'ߍ~Oy"Yt Wieq̄ yEMTYx/7.?_U|_ώqg96ڟo'r&ҙ{zB/r)1Am@Bf&ṷu5O% ]86|qv?DCZ?,aw.|k<%@2AcwѿAH"5d/rpz7WZEqB9rkf_ DnjzD Y'每y򣩟ONyJ;?^[LWWmQ\7Wyv˗"@&%.ѿG v֞*f|K7grV)9ߣ{'=o;<6~)1xkR;S7SXFuRSߜmo37nT%2Ⲍ@fq㐽%ʜ(i'p̝wɝGuDh2:]4% j{zjɥ2Oh璙Nq5N/)ethRD3 TS\qMf,I -D7Ez HhY}Dؑ=6 ";L %``A$:X4. 1ȺKe&|񟪤ݕ׋7ɝ~W%`XA$:W!w șw[#=A,U$ "{[!u 5o1K "KthOoD~qfeqyuD HѪ_6̬=c 򇢇 W-j>Ӈ~."sg[n{ev}IADljѿuhM]fߜ=zSwvOuD Jt{d "#q+}wx;7{IADw@ѿzhG )?qJx0Nuۿ AD_?AQi6 3&۩^ DO1kbWkE)I۾1AD׈ ?h\ RDJC[oKh_tH"c2kvnŌ%Ύ#@Ȧ{{)dʸ\x[ iDЬVf탷F ԍ3=xҾKN"ж IgMjg־Eyww_ slvۿ AZ r4A-ڗ7!1WLZ~+{!KD]"Ҭ]줺-UO| (nlhUtH(#5k_۬hC}ŗoY;Ƶc c6|)):>@3ښ+﫺wG-R%03fK5s~ǝGO^$%DȨ2<@jg. b rJYK "dehH:N "[ "k^r)Az,CnџN)sxW? "A/ BF\!&7R3B "+ "+_r)Az,{CDDP s=<$!Qetl jճohEtj dNM6D>fhAte jcNME7n.fh\ta jbNME-pś3_3y "дEџԡYsC#Wf\gaDam?5 ͊. hTtVv%&@BEx#A$5A." "М!  )ک "И#lˬ M Ivک "А'џ%lŬ ͈n=qک "dԇe]&u|;Az,ADn5'Qet=6h9Ӈ~o#A:,AOE4CgO3FD2F†]9ѩ3Gt2&o?~3i?:Aj] , ڟ=_|}y4RD"Pfoџ.T2k__{ Aj] z/"foΎNjg6 "P^џ0T0kNJ{oyLD^ѵ`?d$`>@ыE Rl)r)"Ut*cM "rB:Egq8 3F^}K?((: Ig k?kߞ#e.܂fQt$i־6EdܶȄ NCyjڗ.cf;[ J !SW\[n]UOtVi}^-DNDN"P80Lџ:,Do[D&e`?wX*9C"K&fϟ 7D`?%~\Eά ۉ"z0lfNO%A Ӈ Kx3!V}*^( YԬ}j\ zg"9W 3k!HAmDOl z0X!7gA [Hr`"f*=D9&'|6`Y =D9&Qe=޳%  5A BFɗep CPszދXXF%_ѳ=[^6 OCdw[G!2zgK i撘2J,zt!i"AR/詞Efm$fgzvzg/v I`P+fD`#=^A HPj5l"zgwkCDFMD!z0Hjl z=DD`у<5^H C|n' "P]O}C].D:E& `־v|;~3$#J9}9k|x=RoΎ8#*ѣ9!=7k}{tjDn9! у9A5k|pvDqI"=%z A[QI\|w EOĉ^{A̬}Ѫ L"\PN@F1Aɉ輈Y{=Oן:C;X|JHeqE@.b־6ϼ{89[dDmD`qxkn 'ם "u)A ziNjmD\$N.k 2  Ad|79"@A,W"MqM/ "DNkj־6{Eb"x,!Ae N&+.k<{GIm AeN*Kk})9!$"9/d"P3@JHɷJ!e=Mb2^7 -G:KIM!e={Qz2bAPѓ7IE/L&㦪2sByc7iE/M:&mwG<;KHyS7yEMYAi22ADܤ<gₘkfHQLF/D`NMni29Ed![S m^tG=>d|IL]gl!RD`FM~kǧ\haD`FMD/R"b"2z誙 r88A QN^tDĬ}x{ IȘ Ӣ'm:"z !7'c4zH# ?Mq Sl:#z1t)s>]%$)^t@ЬDTMDV򋚵ᄉ4)ANEtJr Y%'{ 'l:&z]g'SQɿ8$$DpME/ZRK0k8̐2LDtR 1vj,jy!f2Xc5BCک "d$%Tì BF2zZ]tY;5Aյ @gDwV7.?_m[컋]j,j: @gsgG'=y񣈷ܽקū$.D`4]ȨYۣ2^s%g|,s\> "i^$}r̻eoUg>ZPD`Kѳ4HY{eIDJxHCJ}"PM$M?Dcis9cJŗoGUDnU CKR gksC{/ʌI_N AxͪLD@6kTZRy|[oTqu˳g;\?DT`kSDIj f3?F=gʂȝېD^Ҭ}$vhC>;9Od7wJN(ތ24l EOO vfU2lm(;8kdŞ =E7ʕuAQD=UODr>z=5_kxm>*6ˑ̔Fʔ>x ?›E=3khmڷ7 fG׬|݂H/ɂ[ "0%zbߢ7ژmt|̔=QDTDJ e.zY|CUA8<˿}?OI "tA[[D_ "OjA57"l*-Y!h|W &3]U"׼A.h'DO CZ "wIlςȲgDAiz!V vKA+Me "~_遣6k "I+Z "ǦS]o, "JKw "Žx;Cad$zBq/-S9WAdF;D.2h 2W+ŋ#D MmȬQeAV v =3k/n')AD# 2 ۉtfBA =*ɬ}wN.9&W}zpPQYDθtFVA=&լ=D\:#S}zPEY?'2DiUDC ߞ_u4{)MDhe)05l4(Ӭ,^8#reD:7f־;"d$@5uo4%Ǭ}wJG/yED!]"'r4"5d/rpdAc&D<1}̓M|r#Wb$Av\kz BsPY{2/ݜ}[C>ED&x`Y5dř`"N2Q6LvnGaB9ʅ ۨ.^*s꛳DDwA> =(0k/nwR53"O%ڟ{zjɥ2Op `y,"g]:'TzBDcAd2"c "  } ".6{NG_A =Dμ?E"0xG BAA^C-`J@ǭ%+("iSDj=DBC&ک " g?}W"^8wvٝ##{ #W?⛳Gx B"p*`H R<1wD蓪=DaBG3U AD`V BF[-=Da0>4H r{$ "d$@M>8@ R*DheY" I'2kvn0%Ύ#MUrD6Tʬ}hsn{(r"00Tά}{ r+,9AS(U4k_޸b B?lC'fb' R B?"R 0Tm6!g>n}%' A! =BY{}һJOX+gT"A =JHIM$8A$5Aذ" W 08Hj= @U*{IA A۴" Z 0,fOMbNM6!Cz Y;5AD`S,pSD{ -`SD:Az Y;5Aۢ"0k‘o|&ql)f\rÇM۶! =v@IM^ 0HjU^[A&<DRDH4y$ BFUv=Di&& -jh2vjU\[A4{@ Y;5AhWG4PSDm{ Bf2k&](h'vjuDL Kf:K=dNM!,zLY;5ADG5@ߘSDz ˅=cNMhF /-Po6D]z +}¬};OhN #HjSD`7:If}!&EA =zBIMh^A HjcDژ/Zxc "t - =:Ϭ BC,X:vj#@Bvn3k&esD`mcNM!#A2ivjZA6ڱ/f2D ~^1k&ъeYCD`saNM!#Ai@oSD:z [ =Yxsu&6AnD NA-;2: BCR=Hj"@?]#&%Av7[DB t BCMDRDAKf"BtY;5AD п`NM3j!!@'SD Ar f"@MB&gNM+H(@vf:"@B0fNM#H*/@j'=?݈}/ "tC=DzqH,rYg޽~}y6{ ZK "t u+nQ3${zu$ݥ%AdDhaYC] f9䡽Nڟ䌏2~[ BFtM_ f/XI"s_U|惫KjǶ/)"ЀV䐣DxQR] DҨy@rLB]* 7W6"GR B" %\~G "q"Z,[~c?t RK9R65#SkW}9(E\WK@@D`B7>~υ"g"df)Cb3!jLG D qjBH1A3kr1]" mT侚G B."@ @H13 Qh pRCfVY.6Q DB*ݭL"Y.y2LRM_kW&T+.mZT[}b D¤y@+iZw^(DJH"7"dB tT3}!0E$]DJyr Dy@f-#^d6Y320q" m SqJ/\IHR>R|r DȀ@'*NSD.\?Wy.@xS!F Z{ln$$/T~k"mSN6!r&C"p"B.v1Rq rN B8p MP{vBVY.XY DV~Q;YXJD!@h(->q_^=jlY"M,"@@|jHOFJ>"G meUkxo?TAdE BB ڐ4PkgM B,0Ц Z;kByD`yB[3j DDDgUj D$ڢ&!@Xh+־ӗo?{,9((I m"k®>tdH B 0v CR$R !NT"ڶE!\Cʑ@8`4JP2E$&!LX"EmW!gK|:2$!@Wh^t:!XV ~޿y/6q̊@(`tO}S9x}D+G!XЖXZzvȧj\zW"S m%^Oi+f}?V"ĈC"8\)xIӺm/@`R-T3"4"6z2M_k_i{L=37" m%NqǾW=ywM Bbg)3y7>DfEDn D!b6}"{ӎY Z 0XH"B?`Qt#az퉅@Sh,HSf-ZCĢ7"DB@`9WϐO,@ɍsDBA`![oYT94!arq@8PkMv3k76ګ)"6A@E!gҡDb@BDEګES/'"?| "[&I]YmRkn5wŬ’ $aZYȹt,UV8e1v=^}!EK]m5A}7T_|S=GkO>5̀@IEG!+`+0 #M#p&"=y.DTt&8I 5SNBt48I DRt.섶dM „t8ѕF85 DPt'LE 5ӉA6DW]@B[J&a:9F: Vh[ v"L&:J]q m.SΚ@D 鈢- g &p"Y0 Qtd.NZ;k&+XYDBM`Y0^@Dh ̝Z;k/m<YSkgM 4% ''0csxҥҥ{淟= DDB %f+~}^ɐ@)M,"@vB[Q`jg!gn@eH F6*f&(־8|Aǔ!@aEV6 CV.~02$aHY: Z7C$"[T㊮Y mOy \tf'yɧ#+CFT mR9 lP}Q_zo9?ҁET\DW7˧~AW"-:,f,]"^0s5o~7D[tQ#Yt=Zh C@}uʩ߹-#?C m\9־5l DYtQ'Zt-_h doZ;MW=zwC ¸Zآ )6@榯iNLD ¨z(4@֦WܽO,@QEGEQmh|M^k%Dz""k|4HG]A'$r5y!f !UFËSdM xsFK'6@v"Y0ܣQ: 8E.EU;!bQ՛FtDb,)K]6'*r|䪧̬ D@#E&wLxL0c 0s/kkZY1s )[rLb!/ 0}Z2yH w IDAT0 p B[a ꞙ[bZ^8.!!R_;fDha1FJ6@Z;sퟪ=Ϙ9#aё~ $`B[c TDVMOoXAdE (#AFIR@Z{}[Yc'l_fy"!:h2D$MW.gED 60#`QBe FPy+- DAt&ftu,Lh DC/!axyGtѵ<30Zه۪ϞY.Ë;Z.% m_Dn@eH v@"`B[h`Rsq' MOo(vt4(+^k|JUvt4",Κ@E]CE miҕ@aEgc m WnU(N" +:$kt)`=5 @  nFB"8mWKH FA+ 70k=/{I F@m70k" (:*ntP~ D^驿w" hbg9F?B[p` Dp@XF\"nF pK,"b2s;K3`b!,)4}{3ә@ XD~/֟Ӌ_ϽOtT\XK +^My~" eB  _Տ`t/?ko;|~Ͼ壿Z8*:W^KN\<~qd(^%"} F @FK/F9ܙ"?ڇo|{̱6~ "+Rk >'r9ؙY"/p}O$RC4F"wW.W>y!D"Kd&~T_xz?㖚q9ԙ9"o840W+S?$_>"  ddZot/8I]ƌK_V3=IşT? /_F B%@ a@dM #~AG81@9'i.3{sv|gz:,D&a GO騣 ٘m /;'/Dg޼&8f@޿P  DBtS:f6wy6)wO9AD Z_)N 58J]ۦޮ]2# Dvr~^z;Qxt,Qw^skQ{%Y NٞȁJAڝFa"Yp|tѕͼܽ->SuSw_k:NEizB4PtTk L-~.2$xFo鸣y?L(-Zxn J7ؙbJQy am60v"-:/xtU3o"A,Zʱ R}f/W_9US7 "j tSPk{cbg.xuN"+:8@:f޲D6G7 C6JxqKkNPoZ.o⅏Dn yt93o"juXRH.6SPrg}&Q~9v@Fx5J9cw Dn"zt-3o"u):b( %홤}'iK @ 90vUw":8H:Jff_+ ƭF(]t "as`j튚>Z戴nq.0l أy;,Wg/k}[W?P<%wT-ZWۇ@6C4(ei6O~cUi♎G@DGI]!gUwkP7Cd}ɝ$vB B4&7xҥ{<#|OwSG 1.` +M/?5}5D֧pL?uح"OQȔxa⛞[D>b5U;,Z%c%?˱.B@5GSYٳgQ]z4cJcN%tzD"tֱ3]CvoqH@#XD7y/h^p}vF厘ꤒB*Ѥ "m60jnT{WeHD8h@dz"GM{^uQ Q6Y@dMRL)n#E B?{6U<…;^Pzmq0Z`!~*U<ꏰGlB=o%d$W "ә֮e"R7d5iM-_SU F+"s+A!IyeVO uvmZ܁I%RNQ;K3K !ZG1M{ 絎*(Ğ8|MU-o9o?_+ESw-f'_k^^\{=ԝo٥9vǃ㹻t҆S־mr÷\|YdgaTpҊ SV@o/l &vB񃝧nA\ʃ~vv>'xח˻;AD LbZfO_3/GyvQq}σw;lYS?_XU"(:8F:Ze"6 |\o~]둼O%Z}ze.@D"eKlZJZ0ͅnY}d#>tb ;/}D8LtqtuʼDʟۼW7|ΊwzY朂Ta>NBFP?Ovwy. _^O LMQk W7gt|A_zvoJ BёqID)7n܄RCqk1&Nj9%h3Ad'#њbO9~Z@u5ET-VꞙB"[V(zN #㤓Pg RSYW۲-怼EQPN;gRMD6W hZjCq-{f}m⻏"@!D'GJg][@Sxc~wڔ">!9SDTֹ!7[r +soI:@At;3E}q2iHNϙ#~[g?~? D@tqtɼ Dj u]ym"!h㲪?ڰjpB7o]h,nW:o+oT]o}>[cg b@f";&8D?q'?y Dji0@X4+y㲾el*}>KoRzvP$jFr_|2Ad7߯ܲ@Aus7Arweo_O\oiF 󌣥.K_ R 07ޤ""*DPߦB ܗ{d1wV&>gGS9 :,4i~'mlObD+:@:bdDf WjfܵY.T7WLNYxt7"r";7o@wwD):B:Rdbl#&"wۣst:Y߳Y9: kR[[SOt1t.хȼ DJ [,asx!B__d?sg\&W"atz쨟ٱ/92%ʗ}m-:)(OΓIgq@uG ~1uBݢ@aEGH'][ށHk@_m %l?=wV|p<"?;Ȏ_K{<'9k0$c ly;3뒿Ϗfu').N[ a2:K2?Cmgj{Sf^t1t6ѥǼTv}'vST&*F/iovv@u@ n뵑F-꣏'sC B%?vN'옷1 eKZ3_lOZjED"Lm))[ؽ剽 m94dEFV"t7Z@Q!Ⱥ ?_/t0w`ͅ{@ e7"OT&|x4/Ѳj)"uhD@$c"1FW"Úf~6"D"e00ww~^ܲy0m[|/<ڠOo@dB5]sQ^?~B& "flK R[?|9ڶXM!|-͛/|}O]hDHD[CҲF;o;vl&xr`LEI7"t6Z: Rt1oc"ء3WЬm$j&D#X0H_U‹[H5wC"ta *St1o"i75T"! DPO B'ưIE6J Ѱm:u_H8JMӖӔtuح5D BFA`"&u<߯-ν g> #c`鬢 y'vM!Hd'/fGߤb_?}ōSm9dM BiE6N 1]y-(͛WŽ_`݋;{:l*ӏ@E`"Y.:\:bF D 9ͯ^}ӝ&Y 7|!P "z:n @§[op YDd}:O=V" $ɚ@V҉E6V RiI*8٤ӎ jrw։Gy #ɚ@6ҙE6Z ZwWqw$"ݽmM^rl`[#p@ e~<5O Bb Ԣky-imu?~5xu)}suqD)qG*!BRXv"/ƐN-~uL+jbYu\L+]B LCPkgM ~(ҹE6b r睯o 2ʏmqׇo_m=DnyY%震D^j D+:G:aF D&|2k~;zYM5}p&YI5+:v"\$]t0o"wy;Qk|+?uБqeuC&)N>D^pYGtp1tz-7U*9, Yy;;zێn}yrٿ )g>+f`& Kj D#:K:rഅ`v"4-F/T8ySkgM Bbg9 F ڕ@,v"4-@ pJB3Κ@&DNMhAY`4  Z;kFt"8Q}LM5F" &Κ@:EyHg],Wh'QkgM Bbd,E `j DX-ft-xO5UyyFD8mj D+ƗN4`%&":_:ЎƢΚ@bLGv08v"DSH=Nh/RkgM ®b\4 `8j DUL#lx=BCZ;kvDGH'=`Κ@b"l Hj D(*N7z@']#C5[9dF#&a#:N:=}ЛZ;k֢S 3pоQkgM BRL)rvНZ;kV3IspN:RkgM JtFkg99G8Zh mYpn0: @`v"-z8@>B;QhΚ@! HZ;k2C" 3j D0pNY0Z쐯t#vFڳZ;kŋ'"S0Ω&Yl"D::eX:v" MH'=F`"]-KΚ@d٢ L'`Y,Zt0%~Ʌv,Z;k%%¤"`YY,Xt,']9QB_D5rE%`AY,Vt()]8Q }aT? "&YL"TqvQ}1O5BE78aj D)&F q4vQ3'J5"MA%]8QQ3H5M?']8Q:tQ4'E5M;+]8Q:Q=5'C5L8d.]8QwQ5A5⌞6@qzmfN5Ҍ2 ig9ӵN1`Y,"^1 `Y,h"n1`vY,ʐ¨"i~1;`>Y, a$"iЎ1[`Y,0þID=cT@Y,pKW$z8@1ƨ&YA}3.Ip(#Q<YRkgM cf+]8Q{Κ@d!FP(qL;FdB52?⛕tUD{ȀZ;kEf7D2u5 Z;k%l7DAF Κ@d/]8Qb{ȨSkgM rF{9KW&z8@,ȨRkgM rb{yJ&z8@ȨPkgM ryYJ&z8@:ɨqRkgM rڢwJW'z8@<ɨQkgM rҢGvJ'z8@lɨ#QkgM rʢGuJ'z8@_ IDAT;ʨCSkgM r¢GsJ(z8@Q#Κ@tE2Pp(*FO5Ɋ,]8QfWF(8Z;k=h[Hq̹Zpv")zv՟hOW)z8@.Κ@$EҎ&`L_ErΚ@E `Lc&QcZ&9=c!D u;4QkgM rjb04!T&91CDiM#l&9-CHW+z8@EuQn D2'9)CH+z8@%QeSkgM rJZWp( 8) Κ@D#]8QQQkgM r2GVs.Yp($jj DNEjV5E+jpYԼ= ޳V(&9 #IW-z8@>QSΚ@$D&]8QE nNZ;k=RtᢇDу59v"=Bt墇Dх(j0[j D.zd4KE=NԨ`~Y[hՋE7:&ql= tQ!&| = tD1ìD aj DݏP~'qaFЏZ;kIDJ8QpFH]{{˗/?}{-R 2nE 5v}y]-@d\ѽo'z8@4,PTA©kb "kE &`%ÊWoo<~ߢ.7"ljn9HEU0'j'*YjBG|-rC rC_`p(zr`xQE ySkwU'.ѺE]n DrKEo-L!7j]kSZ-rK Wt2z8@:0څxj]uS7VS0QQk8Kem{ۺE]D趎Ƥ+`0j9žu,Xp ݒ@1 '&"kf9E$[忮t-_c_ ҺE] D8ȎbCr[K[ !v6igiguK";"&06 06 n @d@ƦƦ-@;ش3ش3pں%сA@Dp´3ش3@$0ش3ش3@zu@cc'ӫKȜ06 06 )/hݢ.!sش3ش3@~e@:n9`l`l@ RG΂_.ً>p@ccb#r[Kț06 06 )['´n7`l`l@ R{{hݢ.!oش3ش3@|E@zl5`l`l@ R* 7Wr[KȚ06 06 X=>~m9ZKȚ06 06 Jqo֭gsn3`l`l@ R;σy2m@cc:UI/S|m"5J:J9j-|@.ou-:}@>J+Em w&"G ,@X8`q"D#G ,@X8`q"@c^||чg.xuQ'VsԾc${݅F ^@wmD p`"@gm7C"X#׺ы,@:=чxቆK tTskBu.X!5cX0MnVW3`%*Y,@$#64|ЃN˪1Wfr b&:YMdL?_':sa@Q1E£@M tqU^85U7[ D.d*?bMU`x驺?u,@`Uo}?*0gVͺ@N tPo%ϸ5UᝍV~gU D&{l/@6kG=Sz&!X8AxjM!.oY`$5,@~dP*9m}߈>*TtD p`1"@7/Z_Q^05X8Axc}@D" s b!:0^Fw= ~X$Z`L t`ۼ^"6 "@ _~wn#"@ @լJП@(/!^e{!"@JxƲ@Of_c_;& D p`"@7ip[uo>vٿ[Coطo)hѳ/A"8hg.WXRM3ceC`(,@*؁AUy0@L tV'| -%oi D p`"@wϿ[˒|} nX,>h⁷0~&y3X*8`q"D#G ,@X8`q"D#G ,@X8`q"D#G ,@X8`q"D#G ,@X8`q"D#G ,@X8`q"D#G ,@X8`q"D#G ,@X8`q"D#G ,@X8`q"D#G ,@X8`q"D#G ,@X8`q"D#G ,@X8`q"D#G ,@X8`q"D#G ,@X8`q"D#G ,@X8wP{\IWmV|fwz|p "@@$y'pR鑿`}k^~@-pOEd}]]ն/Z58)@=_ԍǻzӭ0~kM@"@]e$"_či,vHҴ{&Zu DܸrX_ZVu ;fz.*3U D'} k]9pIU37pLĚ.YO؛Y\9tf@"@{tntzy}?RqDZCQNž0Y/gǵD@ to迚żLiTЪ-M\k[L ڻ `@d=^=6%D'#\Y""S"ұcD:"O<|ҏ}gM-_KgvƳxiu>/z?ɺ[͋GSE7%Tmb"Or$Z"=v]|S_l|OŪ??S/>TAq;A• eټéolgJA| HW*QNO2'Zv'?I-6t|6Xg95JSAvW@@"@@d]#oku|?\ۙ۔ųv/\0s oU@Ofs c]R7,jQk(5HjIoi3` j@"@@:U= G"5H}JoXb7}niϛ8>,b;X@ DV-mho||E%PE7i5N5{l\l~Ъ+iOmW j@"@@ҥɽ>w( Dv?LW RsL^z^1&Z-7֢%%貋.nvQ Dl~X ܃X@ DV"Ͼh"tOc߃^]1&Z;ڹ[ q'lvlbM ܸRW+g?ݚ.N<)}鞽bM TJ꫕H+!n)Ec izݟvb"O2$ZDn=)Ż"w1D m@懦 ;ۥCW@纰%Ъo y:KTJ):"=v1cw=+񱻝6?xzEqՐ%U_ɺЪo J\J8*IaAS"}6?8Y.y"ՙbu oU@$uwV="}vqT >U&GӉ"cs"M zlNvk g"ibE&+^|mqȮX@J)s oU@2#aJ@. D۵ ~?&v+ AWlKXvj@"@HeFByGH]k.w"=6?<I{o}5Kr*HZ7-_@"@fT?;鴋] 5On?Z@$5bwT>HæOZ7@RHntD:@dڗ|; Do~D ҼXa.G定sTtZ7EU˟u}]xgvqd )K~ᓵOH: 5_p $vn}j@"@z n|M}ű:JpǷ-;o~L ™7 1fsxM q kU@ڦO'[uqIDATxh;)%豋cb?>{߳克o>;zn~L rN4 1vt羘}W &Z D6*.VoL @%TWsc77z_cU>j@"@ށ]?]NS8>87] G"Ѹ}+s\5HYz" ~JzќtH Y/QܔnʧkW r&ZDO7Y[yevޓt5yWTV-("UZbď?'^-ș@hu@ r~N|ozI*ױޔ. Dn|@*qztb՘x D^-Ș@s>xg8n=<߮ =A[ٓul%v,O²<9Ln<~2g:k/ "@ "@ "@ "@ "@ "@ "@ "@ "@ "@ "@ "@ "@ "@ "@ "@ "@ "@ "@ "@ "@ "@ "@ "@ "@ "@ <31uIENDB`bayestestR/man/figures/unnamed-chunk-6-1.png0000644000176200001440000022134013603762237020375 0ustar liggesusersPNG  IHDR .PLTE:f:::f:fff!c!!!!!!!"c"d" c"c"""""""""""$ c$ $$$$$$$::::::::f:ff:f:f:::LPLQLQLRLMMMMMnMMMnnMnMnMMPMPMQMRMMNPNQNRNff:f::f:ff:ff:fffffffffffnMMnMnnnMnnnnnnnnnnnMMnMnnnȎ:ff:ffffېnMnȫȫff::f۶۶ȎMȎnȫnȫȫȫې:ېf۶f۶۶۶ې۶n䫎Ȏȫccd d e c c d e!d!e! c d !d!e! c!d!ePQRfȎې۶QRdeM pHYsE4E4Ƶ IDATxug}bPU ہ[C@N+QCG1N]=p!1L|x{ssNUPBd_=7s"KWKK;7bC v¹9:HJ5yq~ W"s"鼰w3q]aa9@PaHJW`:.DRr3ץ"لss0G E(B$HdB<ϼ\t$.8):@"%'ڈuS×]| ΄ssG E(ՈBd~#<FCi>m!ҙp;SS>:,DK{G "pRtR n/yO*QlJB$?j?U\꟭^8 ):@ +Sm)D.n`!R(D.%IJ5ULgh!r>; ):@"4;KC jg(D.%IJ5,˘6'op!fs ; ):@"au6#\l9OG!r/10HPz~_<{>>x"ȕ DX;wϳ=w=0m"w>įĝ}w^rT}zor ycw5,qw]Pa]3qp=w7jz;?F+<VCІBu^Gp'.w&C)gvQ з$9,\DXE<F~mӃ}GJJ5 ښֳ/yj*nƹH=lFeW =V#g)Dl-DUd!2C߻iQqwl^|t5;PPHn8Ygeq1";8u ݑnogw!2N>HvFrT}B${Jo\ju ;j䦪6?}wv+f:1l~c6<]4: 1vV>1*m f|lǙ{Tq}BdsKe#';]Ca#XAl:Yn[3]11.ڗQt"Zt6ʍv7Uf`U:Ȱ +D{"/~?ڗ避#V՞UC4mLnLVi;4w`/jSFL{vw"*Uܛ}Vhszp@):@"1=+gɭ65|!26AYs? ~kD GU )DzO]t8q][#T<9%&>%_چ_6G.S[\z]"ѧ7 xeI}s_"2YO׵ڡ>#gi~}У *DF|{"7^_:W =Fum=j@!r秺s/<+[3ܳ4_p4ZQ㽹&֬N_u?\ R|ijP/kev;?fޫw'+DnۜWT]}RtRm/D6CU7R<ԫW{տ[W_령3䦨Go=~K 0q㶶ul'6 w{iۯ.DF">>wI4;w?uTS"? Bki#ʭX#jdXՇY1oQHρmi<V\ywifGȨګecH^^!ɿ4RtR" \&G=W[3Zc͍v3lQӽ2`X!B ?Bd>Bd&6|GJJ5yq@אVy}kk<)f&bf[1k K)miؖ.m|62k-z QG5}!2C߱ɞ} _L(E(ՠB:veg>~s̗3{=nx߰;V쌚7h̃ϢBd'E(BrB{&۷Mbw5n&`]v6VvL!PgP\n!2tjh!ľ9=uk+VYv =]h#dv| !w9Bdԇ^fp@):@"ې\Fրz5~;_04Nǰ;"!Ș=ksمȨNPڨs zsnɮk{5$p]#O!R撙Ezp!  8T [ ;+(i5\5dW4VC1U;찻Á)D6 zl!צ~GJJ5 qG7fw#t" 3bfa'smnfXtݺzh!@ȹ 8T;"[,躁K{ĭɯYeV#|cw7Jɐvw:"z*oӎ9G";~m ~GJJW!ts.DC{/Ԙ7ׯ Q*vw:"}+I":?BdǯMmj%3Kg0PW .g4ׯTvw;"C.< =ki#ڧl1uW[M tYfT l`"z`!&6|GJJ]flsף_f]Եz\a,2lFS~HL"~mo3w8T 7cCZO;Wly<+jWA[v`3*D]Hx z`!צm&jp!6cjJE%53vEXȝ_ Wy<Wv`3*DOɛCQ";|聅Ȯ_;PPܵmt(9rskͻv5+RlN=Cq!xI#Z :Qm5w܀ym v"lNH9Ch  _;PP_+CT_k*]~C 6/6szJ\?#@ہͩ|`4;VO"rcBdޝp=DF~mO(E(ՠBds'֓]nDeˏ~CμoCtڈJBdԁͪ,ٜu?Bd>Bdfk~GJJ5^*柦|4U_g?_yOwB'Zrn6BzMP= 1; ]6'jK!6$ v=+5k}:@^&q=v`*D*W7gȶ垳C!2CNxBdׯM9=w8TRbWĮ끋z~^ikV1WnxcG>Vij[5UzÇ"ox̰ӁͫmF8Gjg9^^!y~kҠ; ]6RtR+D{ ~r*#pnޤxmfV (W!:aBdޝȎ_szp@):@F"^-oWcY˽vOTs.6BdA?+DZe0{"c?/DvCGJJ5dcKWwm2/?^]!r8/i/DF~ '(DvGJJ5&}޿ӛ_wlXF?H?8wcf9Nw々" H.i޳w'k{N(E(BFFl5+vl" ;s'6BtEwBy9À[[лNQ=jP!r[|"/7,?CoY3QltLU<}R{Os{`s,D>{W\n!ҘlIw!2CN8I!צ;PPBիүn뭹jӟ ]33|+'ߑOxod9+umKRtf?wC([d6C(]0`j):RtSKL-EZ0`j):RtSKL-EZ0`j):RtSKL-EZ0`j):RtSKL-EZ0`j):RtSKL-EZ0`j):RtSKL-EZ0`j):RtSKL-EZ0`j):RtSKL-EZ0`j):RtSKL-EZ0`j):RtSKL-EZ0`j):RtSKL-EZ0`j):RtSKL-EZ0`j):RtSKL-EZ0`j):RtSKL-EZ0`j):RtSKL-EZ0`j):Rt¤ )P`y&E(<@MPy 0s0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ;5):@a 1wjRtc8Ԥ1pI c0P`y&E(f E(Bf E(Bf E(Bf E(Bf E(Bf E(Bf E(Bf E(Bf E(Bf E(Bf E(Bf E(Bf E(Bf E(Bf E(Bf E(Bf E(Bf E(B$@MPy 0;5):@a 1wjRtc8Ԥ1pVl IDATI c0H H H H H H H H H H H H H H H H H H P`y&E(<@MPy 0;5):@a 1wjRt`Rt(D`Rt(D`Rt(D`Rt(D`Rt(D`Rt(D`Rt(D`Rt(D`Rt(D`Rt(D`Rt(D`Rt(D`Rt(D`Rt(D`Rt(D`Rt(D`Rt(D`Rt(Db8Ԥ1pI c0P`y&E(<@MPy 0s0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ;5):@a 1wjRtc8Ԥ1pI c0P`y&E(f E(Bf E(Bf E(Bf E(Bf E(Bf E(Bf E(Bf E(Bf E(Bf E(Bf E(Bf E(Bf E(Bf E(Bf E(Bf E(Bf E(Bf E(B$@M/z?9ݻc8Ԥ1e?޿;5):@fïyP!r|wjRtk<~_|wqI׼ȿqI6o [5t`rNGy?k?3~l{TUqH&u3?yBV0Պ?3|rOaW)D`RtOW?=U߿y `b__ , Pd)D`Rt=q)ȃCwYT'CۢH\,kCwEF"0)::{"[=TULʘ-sOUA0k3Su^#"0):w,-瞪 `R=GOWR瞪 `R=b6?SOH&շQȠ]U?9ο\\l7~h@0 ,D k~jrGn7-oyzwn/[}^MVys[V掟o|}H$gOٜ]ůzG!hK~'gfrȢyUך_<|ך_V!G)Eޅg Y]:u_9l}QZ~ξ.E/VYn>O*{;^Uy<[`Rt)]}~dȜsb,JXruBd;;ʍBdR'z̢X1sW />_3sb/)D(SڿZ4 ]rv"/_㵳ros,D_3sW"O\s 8N):(-ƿnBd[(DhZ;k?Qܵ4(>z 嚙MDxMG!hnzu_x/aW^,~,D~[84(DѺɬY"OU>v7si ̚劙V!;nvvجy~Wi(DVknzg?\j!RyΪ/{.D\ggjʊ }?oo\ȟٌU0FjYhlKL!XB%w-+l!RH[mU ۋLh]<(5ΖmU,ۉ֏] >+j/],SgUڃV!l3[ c]f^h\VR-D^h݃wF!R] 'rK[t c%W.YL!'E׺jA[<}Ȳ,_=RiS|ffb&W4V̬OW=wϪ 533BhV,~"cS Z(VQ+DuGmεk͵/O zQݥ=:%HzMBffb&W.YG}N GBl 暙z!ҺBᨼ}HwZ{+ėe =xOV|;I&`Ǫ\85SVӴӠR4Ey}=ȝ֤_/sVk/kfnm'ueG’ UO<-DT#&S4.3Q%K&q%ǰ oϟϚ`~|ޫHuLeLY{k󵏥|v!$K G)D`J):>sG^sLY_`YsOٜ{6ӵf5w4HḙyJ5S\{tf|ԝ o7}ȬS6^+D6kf+fZH"mU٬"-S+">y_Gw!Xkjɵkyɡɥrȏ6Dw~9ۇ>dփ)y"533Bdy?^FrdfLuLfSvV&=ȭ*hL!'VEҽjmĪ Q R/Zyd{7a)sOٜ{yzUu[OyKkmc+Dk+fȭty!_tvw`EqZײHZ.Yݮ,<L!E_?t"gl:`Nz!X3F!RۯtќO!(3^Y_1+DnɽBd٠ܜ)D]>XXO!R/;|4l<a~x?Ȱ.5W}w,?yBz!XF!tsԍ;:BdU[V ~ɅE-Dn BdYJ,*"_u5D>Ws \PMBdoi"wuY=pģ+Dڊz!|z G^<ȗ; W,^Y<-.V|1[,Vot5"00 b[j+fȳ{T|"桶b^,/h\"|eeQD摅BdT\l!Qf,w&_,)Q(D*\R+D^1zd"g;n*z!5ru#A6?+D꯷||Ynqb,_|i}l<7dY \PMBu!Y{zL;9TK,DVMDgBdY`[k A%J"d}#f!l..|!,oh8vO\SRt(Dhj"Ϯ5 B5 -mUYKHY^W5THe*Q,{L >z /BdST~Oyrnt"gbo<϶oUJ UTT W̬/0y浏Y]QK<+o.Dr-xiY^qGg!\__9˝E"00 ZٝRlY1䭛}THfV!qFg򊳾BUfQ|oYlvI4vUջ*D` ):@a"4 Zb6ȳ4Mߪlzȗ[Gn|Z66*[~_!,Xnf!X3BQE1jb*D`):@a"pIֻͽxkw+;/jkݾQ Q Q Q Q QpI c0P`y&E(<@MPy 0;5):@a 0):@a"0):@a"0):@a"0):@a"0):@a"0):@a"0):@a"0):@a"0):@a"0):@a"0):@a"0):@a"0):@a"0):@a"0):@a"0):@a"0):@a"0):@a"1wjRtc8Ԥ1pI c0P`y&E(<@MP9@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@PMkINPMuqKPH;ojpLRtœ`~Ng):@aNn0?'v޳uJ`0'6; }9hyC4"C9xyC4"9 Q>D#0LPR\0 ш F!ž"!E(B=)D.CP{R\0  ):@a"ik!gaf+E(B=*Dt"Rt(DBD%F!ž":0 S!hH a_;!*0 k!H a_{"*0 GXJ ao"*s):@a"1NׇhD"S;xy߯Q(Dvb8w!qbl:D%qZطшK sZ8,4CT"@Rtœ`NuoJF!ž6uJ(WPQhDRQjq)J(TP5ThD"QzqYF(QPkVDQfqF(NPjYhDҤQvqF(LPT[hDQ\qɅF(JP]hDQ|q酈F(HP;шH aJ(FPMYhDBQ0ZWuqBD#!E(B$1@F(BPc̏1 P0<#>ݽ P0G<#>F8}):@ax0?j{{Z"9y+-Yh IDATDShsb"ĥQ0FoeqBD#0 ƈ,D4"IK aЅF8e):@a" 8x!NXPÅ"tQ0ضbBD#0 SNPCm-+(D4"J a]$F8Q):@a" 4BD#0 TLThDF!03*D4")J a!=dF8A):@a" 1P.E(B!fVhDӓF!Z prRt(D`XI1e!NMPH:;I pbRt`~:(-D4"iI sT 9>ш'%E(Q 'B!04#: pJRt`~R'&/D4" I sD91"tQoD;]PFF8):@a"QND"TQkL7RhDF!B1ՄB`w):@a"SM"4QgT3ThDF!BQDT!NAP= ݥQc\/VhDF!BqD\!_PF ݥQmd+XhDF!BDd!]PvF8r):@a"t[I(DvF!BDl![PHc8BD#00c8 BD#00#8Rt`~༏# pRt`~༏# pRt`~wh#"K 3;3(D4"J !o2bF8V):@a"dE̢шG*E(B]Rt(DڥG!SP9;53)D4"QJ !g"B!0 2v+"RhDcF!Bn=l pRt(DحP.E(Bk"Qжc 1BD#0 v,!"K ebNF86):@a"A̪шG&E(B]+Rt(DhڹW!KPM;73+D4"QI 1{]P'm}bnF8&):@af<"`~{"ٻ^٪,| _m)[:EΑBn+8 &>&r:GA H4{F͚5|y`^U"{άw1oz_3"#zu?>0("@5"{άw1'9A/ ć5EEdAj "P3ם"{"\wVzXgQD:DD U:#pyA/ 5煇EAdAk "P3ל"{"\93;7("EDrfuDƋ:#p tFʙaAD. ¥s 0^dAK6UEX3Mx=@gN"n=@gk|.+"j=@gָ`A/u?;8=("EYbރEX3+\̻ tfyVD`"{άp1'A/ … jCADV+ … bC AD* … Z 0^dA *"TdAH x=@g"5D`"{"<4Ei$("*EDx` tFIBC-AD( tA`3L "B=@gOC* ">=@g ""{"0("DD/:#0U*("DD3("DDru(1"EYbޏu%"KdЙu-X>UaDƋ:ż#zݧ* EX3Z;}P[QD5:ż#kz' x=@gִcP]QD:#0Y^DƋ:#0Y^/("zDD7]]0("jDD7]\DƋ:#toPcQD:#to tF݄m +tF݄i tF݄eA/ һ BADV! ҹ) 0^dAsSZ"AdAsSvj"@dAofA`3H& EtFۤUA/ ҷIBADEDru6*"EYbޝF"d:ż;+yݧm UEHtf%ywVO"{άd1:^BADrEYbޟuA`3X1qQ<("@3HǦ EtF=A/ ұ{BADED:6uNDƋ:#tlPQD<=@gM]"& үc 0^dA_DŽ"d:#k tF- I"{"<%"ED5}Jh#("@3H/ EHtF!A/ ҭCB+AD2DD5}GDƋ:#jLQD=@g  ˋ:)u""E_P "3y_" 0^dЙżO K:w*u!4D`a=@gsȓ"{"#!D`Y=@gNQ"{"} 4D`Q=@g>"{"}% D`I=@g> "3Hfx=@g.͓ "3Hx=@g.S "3Hx=@g.Z "3Hfjx=@g "3HfJx=@g͔ "3Hf*EXDdA$G>W8DƋ:#Dj>}:#Dj>}:#DF]:#DF]:#|gE_dA?UA`3Hf E]dA?EA`3Hf-E[dA;5", ҝ 0^dA;%"+ ҝ 0^dA;ƃ"* қ 0^dA73փ") қ{ 0^dA73惈"( қs 0^dA3s"' ҙ9k 0^dA3sւ"& ҙ9cAADtFɑ "{"9^Y[AADftF!Tk7 CdA$ R5 =@gH.DDr"[mzw.[ "{a x< "]7D`=@_-) "0o'DƋ2;|m !vn" H O77O;-teL ,ý=?s['A'3g~"L/XܯS}Ou[Dp]~Da̕@/X3WQ~̳}s]A=fE\d3zg渋f7DkH ,쓿/鶈iO]A#AOAD,p]D[ ŽAWAD&,y\@ٯ "7w"DƋB>w%͐z-AN}EVdcson"A.x=?xei]cL>^္ AgAD&,j9}Ӿgfs>"="Ecߗ\7@ !ނ"L)XTzZo]qDa 0^du1'UkDA>{.(""{EnrB|N=w_"4a7h[d_2sJظv= HxENdR(Œ}ϲF ҆EAm=& "|AH#} E*ŋEq#ϹK!4bѷhZd7 "7o r_@AD,i Dg6 ,i NDg6eQDDK:?ܾ}AHx="|Y D`=";7/e tnȳ}㇂,"{E "Gs ҋ@AD,/~}79N,"{ED@sD:D6(""{EDZ` "X" "EPaKd~"/XTƩoy@KTA``Q:pԝC,Q:"p`QQh%7 0"Q@/Xցo_ pD"+Xᯙ9-"{em>?l.9|OՃ>,"p`YRr R_E 0^d\ki= nH{{7&E |GDxAH{{7&E .o0-QDjL<("Y"{m> rn!"UDZ (X#"w|7? 2c#H -~-w~> "j}QD3D{~/Dk d@"{NDk ("x=@}E;ECaj "E)~=_0#0B5@QD"{W?"P D` okOFB1@"tFB-@ tFB-@y jtFR)@y jtFR)@y(*tFR%@y(*tFR%@~u:#t` \~:ED:TD6pJ=@g,7RdA}uAd+-jtFib@:#Xu_"ygDDr"[ZtF!4nhIdA$ Һe[!=@g 5AE[%=@gHysDD\D~DD\Dn~DD\Dn~DD\Dn~DDZ`Dn~DDZ`Dn~DDZ/ܒU:#n_-jtFi݂ r[$ ҺAdGT$ ҺAdGT$ Ҹ%AdW# Ҹ%AdW# Ҹ%Ad?=@g- "{dՈ:#4n_'/EdAqK>@-"{"m[tD*tFɱ/5 4! Co3Ђ3HA#@ "{"9> :#Dz2 4  Co4P3HӖ]CDD/T  Ҵe~A?=@g- "e%tFiڲK rX_~=@g- " EDZ/d1ktFi+ R]dAe HQ_r=@g- "E EDZ/e9tFi R[dAaK/ED/ZdAaK1ED/YdAaKQED/'XdA$"۾ ro9P3H%^ŗ}Ayrf=@gH}EDr"=*tF!tk7WdA$ ҭyt^=@gv- "d]+tFi r\V( Ҭ}Awn& Ҭ7~AwvetFi; r؞7DD/7H" =@gf- ""q=@gV%{"\:#*aD+EtFiU/'CDDZ "{{"lEDZ "{ " =@gV%l>Gz"lDDZ ""0=@gFe>0LdAQ  tFiT/#DD ""0=@g  tF!tC:#uOY}`3HA0LdA$ =$ C  tFiS/%DDڔ "!=@g&,~0DdAI9 r@"OX3Hr~A=D"{"MYC AD6"{"MYA؊:#4)gD E؈:#4)gDA؈:#4)gDEtFiQ/ "@IdAEI Rv(("=@g%-H DDZ "e"D&(iDED&(gDD3Hr}ABQDAdaHR}AA(:#uO!S "tF!tHJ"{"9"݋:#D:$%=@gHADEDr"D3H_/"eG":#4hMC)D3H{_cADEDڳI)D3H{/ "eG"]:#8k IDATg5 R&%=@g,_DʎEztFiϲ[5H DDڳ R6 ("б3H{]DʆEtFiΒ+MH DD R6(("Э3HsoD$ Ҝ7[aAD^ED܂ R&%=@g,& "t* ҜH DDZzK)DSdA$|RH DDr"DRdA$ !A(:#D:4<("УqctC!tHJbę;/7$}Dr":!("Сq,:,=DZj R&%1E.9 ҘE6S"g.HtTHcXD$FD\:s"A1K,"e'E# ".9 Ҙ2A(gĭ"ҙ,$DM;+;MW5̿& "@I>H&""-iJ)Ds7»#kfA!3-C "eト"=g7~"HCD$FD}A6gCDi<{`HADĈ37ȝglm!)HKfYD$FD:viįk ҐY"eE#lȣ/ F" c? R&%1 r7HCXO!Dh^8sثS A!3'D$F]1 " a? Rv^QDu=@gLŸF)D3HC_O#Dh\~?ٽMi2A(g>~Q1_ȱ%cTHٹADň3Ȁ _Ii2A(gsS'D"4-F9!#A$ !A(gĎ_>!#D:t~QDe1̐ A""9 "@I83 sM!th @bУ'v>yHl ""9 "@I z3!{|#4c}Al @b>D^u "͘ruG)DFW&4c}Al @b6_{;9UD1> R&%1o!.!4c}$Al @bOo8wfL% "@I ~G?oß{^H3[D& "4*F|XM=LfL& "@I8#'b} R&%=@g&L]G)%("Ќ~Oi%H&ۮ" "@IL\^bߏG_i"4a,H"4a H ĸcsz\O'A.D "!{m=rz6L\A)Du~!kyˋg?4DZ0r}Al @b̡k㑋,$ 9@1>r= R&%1e»?^BI ;mHACEZcmz?~n-E6 C Ĉ3qC.?2ry!hA$ ҡ" g>عC;f>>AH$Fy B.ozDd/A$ǤĻhHٜADň3;C2]]UCHޭDf "T/N?<Ȟ+fhswUEiHH Gv[[gHOiHHټADGvZΗ{ " ~G)D8N븼 !A"e3E*i{n!$-HfحGD$N?D2ef?A~3"esE-3n!Ԉ r R9VQ2A(g6yi󿗷y!hrp_i(HADň3oߌ[\_6|ntL,H Ĉ3 4kYA~c"eEj#lorMqC#n!r RyV2A(1_+kwbl+fnD7j= R@QDb1eyKNx狱HD7j}:ALJbԩbv!|@d RV "eKENm"?.o"{ "՛k> R&%1E3;#.C\E"Պnl`&{s "TodH ؃^C~my2"9&|g۬O&-DU?&yk?!;XH$:g> Z]-s CBADJOx=~ "9 "@IdA$ ҡ"u:#us> R&%=@g͹XH)[,("P3Hf]O# "@IdAv.֧Dʖ "( Ry"ePtFݼI"N?xϿtߛfv"y> R&%1O_cd6B̋)%""Dj7b}ALJb乷Ad RS"eE}PDv"}> R&%1z C{ e"1>~NI{pH ĘC7n O]_C{pHADĈ3!w~s'Tnz0Al @]bę_>!!D*^= R&%1!dHثD"T%Fyms_N=L-V% "@I~d{ 'HYDʖ"$N?ݗ&H^e2A(ӏl+fDr"J"T$N? "HACPD D:DG~dsSUAd A$ !A(g^sHR"Ոg-3 "u[lB)DqO<z"U[lD) ""rhHՖ۪D$ |Dd$AjmC"eIADJĨSHHܪD$F9͌#TmzA,+("Pwls_;/N:L-U'E#mHy)i RE2A(?bl=ԗ{AjnG "eyADĈ3?>=筝 Re2A(g6TDFDjR} RD@8#'lALJbAd߽MD*S"eAD53O#?z_>qwϞUHrvEV,9|rD>xϼ7p DꕴR& "@Iq's wr#+i>L)[CQD`b{߉D޾;/N4a[ze "ePc~D^KSA^Y+AH*"#=dD>8⪙=zԇ"eP}Nk"J[DDXw;Om +Es A^y+H ĨSlï׽Dg{9DꕷR $("R1e1W㣛A^+~HZ"c}w[~)fl R̍z?ALJbęlsn&_4s R̍z?Al5ADUgxi{"T+uK)Dq[ "ۯy "T+uK)[OQD`bęn}c_MDDQ# "@I~d{ 3{AAZ>Hي"+ "~ RzALJ#;CN1랼Q!)(">qA H_zAl-A/M L"N?"A!thAo8_~dsS=DTu?A$ ҡ55џ4p8Ƞo "9 !E8v "<~Dr"J"{s$k#μ}{抙hAV H Ĉ3ڱffO|@>#\DjP!e=DUg7~Dd7l[&*a>F)K"ĘCkf^WDjB} R&%1Эq;|?߸*"Z~>N) " ĨSۏ>&Vqp R2A(Qwx[AC|_Nv9AR"eAHQD _;vy̧Ë<"Tj}zALJbɏX!w^nƩ-wo`""Tj}zALJb{?C{s '10i) "ZzB)D8G!N4~o_p|yP@C~ RA2A({;97&l.oc3m^ "ZzD)D8'/_wϟiVۏx\^|sU/43'C>a"iuzALJ"{}Wo?;rz[cSDܖU H D pgA>v[z}AVKӃ"eAX "[nցۧn>"2ANC"eP,ksB>M1ӻ|Hs_!2A(u{l-RN?2CHACA\D&F+cr_Oo˻?DVN Dڄ}E=j/M!%RtXq=$,as}7?\{ CPv9\D~! s??|2<ԗXA)TMcAd}H!thAD|1̽bs7obb_r/}@R6} RD$!&̻8ҍBUu[t RmAlAd_K4'>Cz9/9#|ww@DS6} R D!'=9gV{7R(ŒFR6]$#@D [<1D<3M{'ݯlrDL "e+"$a9Sw^kH"lf?vJQ*} R&%1톏'/f o95+z< RUAlAD\17{c/'V_Df "ɿ <%(i>F)DאW>W߿D^jLD>{a)AFI1Hꃈ"b~y:{~ˇ=)ԤA|c"(g>J)D{߹ [R{s'oZ?"yoDjH%?(")=msu/?KrK4\=m?XQ} RVAQD Q zDD[GD "ч "5XD$=ꃓ>>"l{"ͻgNQ= RVCQD O z+w~ۧ=|9 rqgzo?# "JXD$ Ҭ&"fDDq = R&%1A7N?ͅ-//}5D:TIQD I xo!ry9_u IDATZV2 jH$~nlY~x([l.)Su)}WuD:TMQD E x&R7FYqyоA>ˮ'D "di(孃\l/D| ALJbcZ "Orȁ߲\LsABnϧD* "$i)(zƦ|ï bFВI2A(i)\67r7 R%"e5ETl}攻<}G "h4H Ā4D.;ݻ_O?ꁞ:Bۃ˟`Fтi"i+\k7ݾ`fo?Nr2A(i,(";Knn"gTHY]ADŀǴD\rg>=nzAb2A(i.no;> R "eE^z} "_݈C -7N)-("ȧ~o?,_ OÏ|S "YhmA)D6HA: #"eE#LG2[H ĀlmHmYGD ",) R&%=@g̻+G)7("3Hef]$ "@IdA.nD*",  R9 "ePtF˜":#e=|HYADED2ߚ<ALJ"{"uoM RVwQD`n=@g̶%OA)D3H]fے UDYdA.s-ɓD$ RI"eEtF1uiE RV}QD`V=@gH$ CPAD9EDr"D3HAC EftF!tHJ"{"Uc? RBQD`>=@g̰OH)D3HUfX'$5DMdA*oSD"% Rwi "eP#|_ޜzn"5|7 RHQD`&1ŝޛz>"5z5 R&%1=X{uYz dxbHY+ADyĈ3ASOtTHM&ތ& "@I8sD¥3Dj2f<5A ,bękAĥ3'Dj2b<9ALJbęAĥ3Dj2b<9A bę{?qtf(A&D " 蕝&ثqHM\g "@I>H"rA2A(N{;/?w񯏟 ufHYADSC2׼}WKz"TcufHYADs{n\*sOH5X]% "@I9tyґ53 R)Vy "eE#lorR7DZLJ)DqvywٜqADʚ "L"FDH Ĉ3?: DrxŶߊ,fq3JLII3&Mˁ@heZ&42&[X~^}̱vKD ʌk A3"Ad+\W" "@Mq 2 Kn+DD8Oq A "[ANjʌkĎ_,#lŢ:ERf\3%<.1F,CD2 AGo Xvq] R 9ʤW?\|ꕻK F,B ("p2UN=F,AD2eg7W~Xzq] R7BQD`2uo!>1s /+DL|ktOkIF,@"("0[G3z"l1"s/<_so +,DL~G^y߽CD6buqH ADʌkoy齥 k쭋D̸FOن5 "uE)3DDauiH Ԕ" "۰޺4An ,e5OS繩D2N.L'("0GI0A$C ԔD2 D ! HjJz"ȀF " ! HjJz"ں(An ʄOyoD6auIHXADS ϶{'AdV\[$ "@MAd9&.H,("p25rMXsm] R'5ek"ںAn iʄ"D6aյu1H Ԕ D#lªkbႈ"')^kw#lºkR"(#l[R:A)#l[R"'(#l[B:A)#l[B"ӕD`uH ԔD`uHݐADJz"[ֺAn T%=`-h.AD`04Z D qֺ߽An 4ekӻ3kϿ5HI&K:A)^WL/[#d"6("0IAd9H 2 A)^#,GD4nQD`25r Ad@E&(^#,GـVK:A)^#,GـVK"+^ r ͖3 "uCEU FـvKy:A)#l@AND ҿ;|H @M'yYgDAjʄ|g[K\] ҽ+|H sE*ʄ"D|eMDJOЯ25r_YDLx A{Wֹ:A *^#,G8XYgD Z]ek"Ȁ Z]ek|r Ad@lVI0A$C rU FD$\Z*#d"D{wFDzXD+ T F]hcCDKVУ`0HR H \DzXgDkt һz:AN.}c@ٿɏy wvM]nc= R'\? `;%.LD^;國̃/~t#.JD^;e!ϒ,6" nDo-M};sSr3 һz"AN!}o@g랼]!/93D 7Փ"u]ҷ̸ 2o^%-DD e5wTcǔh2"AoE$H r}](3 ҷzAN[F9D4wD^SO# "wKiЃ2zGEZzM= R'To5@uՓEC|`6Ak-4H R ̻cᡇ|vJZzI= R'Ԥ5+3{o&]|^NHKi:A*}@\'yWB7 "]K﨧Diekߡ[O|Ǟ"r ҵzANKmV&fB r ҵzANGv25r7"u}d D#t-FD Lx AgDH rQek"=K'D{8*^skw֞k "uҷ$D2:Ad7 ! H*#d"Dhp@Jz"ȀI~Ч`0H 2 Ad ! H]*#t,JD&JxR FXz/= R'L U "K不Dwd~}j~S "uT;2YW޽~7xg r ұZz*AN,}@D}w>W#$t,JDK{Pf^䝗!mHK:Ad eeO5!" "K/Do>(.{{RDnD:IO% "H}^uգi=DEXz%= R'"}@{eEuRyv~7ғ "uI4W\t "z>m|}IAWz!= R'$}@se5WOT}rxț{e&+LDN2_ǃ^ #_:G]g|`6A_}dH r<./C ]``H:AD;+3ygbX}.AsH:AT[*39+8DFO& "'K߃T9۟yt ~ѓ "u 4UNbn!XOIɘwO/'D5KnOUz/ID$v>/ ˟\|f(A$c=NDfhs?@"\h|fA$C 2GzPN r O Ad@mnHA9#9|rS4VID$/9~߻+'t+NDI߉Jq~b ҭz:AN'}'@+e5r!qHk:Adr-ȵq'<%t+NDJߋFq!v\{ ~r& HK:Admz-"oz ҭz:AN-}3@eE\nÛH7Dz^AgDҷ#P\tL)_'H.Cr<%* "oGh̺I઀/O#]{"Jo3"u#4Pf]uO\~<G"J/3"u#4P]fH񉙛^:AWf^wǦ>>C|-H H r +s/M3o! r ҫ9 R'%}G+ڳzꕯ/1"J3"uyҷ$q^{] OC#^:AL{VVκמ/|߼WH H r= ++ D:=DsJXWI0ASsAN9WuD2W9:AlO(#d"D`0H 2 A|nPH(#d"D`0H 2 Ad nQh7^ "Ȁ%Er|sWZdD:^;DEoMXM9v<,4"Jos"u2&̿9YAOsANYFގR IDATW>3Y:AdA;VPf\{j ҧ9 R',)}ʌkY/ ҥ9 R',*}ʌk޾Kd.?4337"]J"uȢ7),~#D!7%t)k# "Jߥr%wԿB r ҥ9 R',+}HǪz Hҫ "Ȁ5`a國9;EB ! Him */9ٻD؄"d"DZhz+|w)חqO7:A C҂S ҟF9 R'4`2R\rνD^(gD670U]럗ykg_:໋NHw|H Fʬ|*{=g|-Hw|H Hʬ] ëwx-Hw|H Jyʜ>C~Y ҝ29 R'4`2G`⹪7 "I "uH3f)3|OM|檧$t'K' "b̸2wT't'J' " oc̸OЌ t'I' " oc̸틭fDHwқ|H R>ӕ/|H=D(A;Er>ANi*}#H "7"IgDҷ2~ GL|рޤ3"uH[[NUND9 қyANi,}/"gDz^" "of8M9C AA7 H ZfӔ/-3gD2w3"uHsok8OqjytΛj Ad@H{k8KqͣqXM=HD$4e5>3sLc5#D2 "ol8Gq?~b ! HIh|gʜ !O޾D2s"uHD{f+s.LLj\7ILz{< R'Doi̺x|?~[Τs"uHFʬ.Rʃxȓw^7oELzy< R'oj̻[d!<߽v|mHgһ9:A$%}W4euo^yO|`A3H `2'ȭr􁫣D:^! "1&)s/|zr ҙxANI0E_;7oWowӊȿzs՛L\.%2EDҷ7ԕ<>|w3S3w?d{D7Ƴ"uH\2㚷/.=K|ȗ|WH W?> "}I/gD 5K|bsaLz/{/zwA$㎿{z]< R'䵿`r%Gl3I/6C3_"Ȁ`r% {e ?,"}岦|o-"H 2 Aov~#3 "/ȇӞ"]/EE D$S׼gGxSDo9>Ň ! HB&*3yo9B>4_Ps}$)$H 2 Aow̸->UqxG=hTɤ+t&*GD.eExyEw~2 OE |HWҫy:APf]w0d<_{ЈwlRzS< R'"}qeeO}??,:<3 IEH"~kW3 "uHWw;Rf\s-3SW~ rxȤDz^$ "}IpSI:A䲇Lz ғx&ANL~JzV "=dG3 "uHo7<hi rC&^(t$KDzE%=@K.D:^% "IŃ>|͉W"IoDyhi ??!HW:AC)zu0_T_$dws "uHw?ܡhD~zw<2A$D ңǕ?Oe} SN@D$t)qQ崗rS_`VTXrǩ"] Ad@HSNxw^./ךk.DOO!H 2 AO)_m_ԻD*N~ͭsCyH 2 AS(_Hħ&;yϞN5?![s ҏJx6ANXN "WOT}[v?E;o9_/s #MDz`byC?SD><<;~|ˡLyqH?:AkO9._AlH ҷeʋ߹໷ѝҫˇ~~g rDGz< R'-}@Gw~0-"yex;?anmft "HDΥWOy)"GW??] "lVz< R'.}025@~H~zG̸[AyJDv ODz>]C9[{o*"Zƭ rWID6) OD>\Z9ZOoYwFFz< R'lA`dekD2#"x>ANل̉DZD2nk:AdBg" "Ȁm 4&d"D6"t(@Ad9H 2 Ad+B+^#,GD$lFX`tek"H DH Lx ApH 1 D#".@D6$}025r^"uȆT&FY ҋANْʄ"D:^ ">PAd9H':Ad[g)^#,GDz[ R'lKL`KAd9H'ҋ:AdC)^#,GCz[ R'lPd`(ek"}H}D-J Lxǯ2ӫ H[":Adg)#!-BD)}80`0HͿ{z[ R'lT`4%=` Ad@FŎ FS FD$lU`0%=` Ad@f R FD$lW`(%=` Ad@vN R FBz[ R'lX`%=`.e"uȖFP FBz[ R'lZ`%=`.we"uȦ P FBz[ R'l[`Jz"]HzDK^I0A MoH us+#t!-CD.}Nw%=`.e"u v ҃ANپQDz" ";>+ص`0HKB:AdgV FAz[ R'A`Jz"=Hx D]HXI0Ao!H *# -DD!}\_%=`7"uN v ҃ANً^D2^׻"un$v ! Hُ~D2 "<<د`0H 2 AdGU FD$I`Jz"Ȁ]ITI0An)H /s)#t -ED%}n?%=`"uΤv ҁ^ANٛޔD[b:Adwg;S FK/uDIKI0A$/-FD'}x/%=`NAN١D+b:Ad{R FKotD]JHI0A$/-FD)}%=`>AN٧~Db:Ad'Q FKosDJ!EI0A$.-GDv+}%=`.AN٭!NDһr:AdҧP FKrDK#BI0A$=-GD,{%=` Ad@ȮeOv ! HٷD2 "=R؃`0H{z[ R'\P`Jz"Ȁ *l^I0A$--HDv/}q%=` ANٿqD܂:AdM+# "u V FIK/p D $ "CH(lXI0A$--HDƐ>Rخ`0HZz{[ R' "}Y%=`$ANET`Jz"amIH 2VDҫے:Adc*#7% "u8 T F K/nKD6 ۖ$ "#I,lQI0A$,-ID>Zؠ`0HXzk[ R' %}A%=`ֶ$ANKl`{Jz"YmQH 2D2.mQH 21֔D2 "I3lLI0A$C 2AD2 "J4lJI0A$C 2QD2 "#J5lII0A$+-JD>cؐ`0HVzY[ R')}%=`(ANT`3Jz"QMmYH 29VDҋڲ:AdX(#De "uȸ' P FJiDqO ^Ӗ% "K5lBI0A$*-KDF>k؂`0HTzI[ R' -}%=`,AN[%=`,AN\{%=`0AN]w%=`~0AN]w%=`~0AN^s%=`z0AN!}з`0HRz;[ R'PS FIJ/g DjJz"n0AN%=` Ad@?D(#d"Dx*}Я`0H 2 Agҧ*#d"D>~UI0A$C AS%=`V4ANRO%=`R4ANROu IDAT%=`R4ANJK%=`N4AN@Jz"AliH 5D:A)# "u/H@t ^' " }Н`0HNz[ R'@oJz"9mlqH 3ΔDr:A҇})#w "uO!R FIbDnIBt ^' "ܖ>II0A$'-ND8"}Б`0HNz[ R'pL %=`@I0A$&-ODIHDR :A@\I0A$%|@DKJDRһ :A{O%Jz")kH }Y%=`AN^s `0HJzZ R'p@RI0A$%w@D }2T FII]+DHMDR[ :A)G9%=`ֵANa@LI0A$$sAD&}:R F I\kD&JODB:A%=`µANa@DI0A$$oAD.}BP F I[kDN>(#5"uHQW Fɸ7 "u'IR4W FD$p1@k%=` Ad@ISV FD$p9@c%=` Ad@JTU FD$pI@S%=` ANt Jz"-kH Jz"%kH  vJz"kH , fJz"kH < VJz"kH L FJz"jH L FJz"jH \ 6Jz"jH l# &Jz"jH |3 Jz"jH ҇ D"һ::AsO-W FHVDΒ>X]I0A$"YCD8O`m%=`^ANL DkJ:As.U FIHoU+DΖ>XUI0A$!TDD8_`M%=`ޭ& ", }z`0H 2$A%/S FID$jJz" Ȑ>XKI0A$Ȑ>XII0A$Ȑ>XGI0A$ LEDXL`%=`.ANa9S 5DҫZ:AO1P F HRkD>XAI0A$ IEDXR`y%=`"ANaQ ŕD{Z:AeO2V Fi/EFDXX,`a%=`Kj:A3U Fi/CFDX\4`Q%=`+j:A奏3T Fi/AFDXA<`A%=` j:A54S Fi/?FDXED`1%=`j:Au4R Fi.8_I0Ag 2 AU6V Fi" Ha] sDZ\ "-}p`0HkiEH g)#^V$ "/}p`0Hk}iEH B3DZKK+DZHqW Fi--HDh"}0[I0A"AN!\%=`һҊ:AFҧ3DZKJ+DZIsS Fi,(IDh&}0KI0A&ANI%=`kҚ:AG3DKoIkDZJu XzIZ R'Td%=`;Қ:Aҧ*#4^$ "4>8QI0A&ANyiJz"mU "uͥO<NR Fi+JDh/}p`0H[[='@DHy " H!!}0]I0AE BD``0H[+ 2 AT%=`.7Ad@!sJz"m].(ȀRӔDJEDbGDJoEDrgSDJ/ED҇DJDDҧ+#4^% "D?U Fi*KDJܧ Tz!Z R'>GI0A>.AN!-}PW Fi*KDKTDZJ/C+D҇ 5%=`һ:AOA*Jz"-W "u=Hܭ RzZ R'ЇA]Jz"- "uHܡ Rz Z R'ЋQq%=`[:AnB*#^V& "#}pLI0A 6AN##Jz" ][F.]hH BO!D"DJ@`0HCVAd@}IT Fi&" H3#Jz" ][D "&}&`0HCAd@I PzY R'Пu%=`ҫ:AE)#^|V' "(}0\I0A޳:ANK+%=`vk:A>F.DIo=D:>8(#^zV' "t+}:LI0Aʳ:AN_Jz"7 "uKViMi'NDY@iMi&ODZ@iLi&OD[ LzY R'й! 0`0H3]g}H Bҧ$Jz"ͤW "uKc+#4^t' "l@ZI0A>ANa '%Jz"ͤל "u>*V FiŕYwV# "lCWI0A7Ad@>-U FiŅC VKQDZyqD$`P%=`V^\7 "lGSI0A Ad@[>2FT Fi%4 "lJPI0ArӀ R'-C`<%=`VһMH ƤOMDIo6-"u[>7FS Fi$ش "lOKI0AZӂ R'A`(%=`F[M H NDI/5-"u><R Fi$Ӵ "lSGI0AJӂ R'U`%=`F M H fPADH3M"uە>BP Fi#4! "lX BI0A2ӄ R'eC`%=`6һLH ¦OQDH2M"u>Gv FziBDغA w%=`6{LH OR+#qs' "uۗ>J ĭB OS=+#4qkD$ `Jz"MZ) "C<د`0H6 Ad@;>Pv ĭB nT*#4^D#}SI0A҆ R'#C`Jz"Mw6:A=I{T Fi"! "K\؟`0H 齥ANag'+DZH-"u{>Z BzkiDD؝ 3%=`KK#H OW])#^YD(}II0A҈ R'K`GJz"-F:A}JQ Fi!4" "T؍`0HmANa҇,NDH*"u>e @zUiEDر1 %=`қJ+H žY=(#4^TZDv-}@I?|o3~͇//DH)"u;>j6ϞŐK򃹿LΑ|_Y R'w`Jz_tD(t Ha҇-ƕ ?C>17 "]96"Dؿq m%=@ΊQD>|( Ha`Jz>?5 ":4"DAز_]>N_>?,"{o7 "=93"DCخo=FgeW-DND>s6h3yg?4PSz9iFDEت>Ku~?Z8̣n1]EPh5BY$6p#P0 fH c{"_Q-fMG3ܬz=[ufz0cDV%}lҌ R'Џ.6=>W9FYI;H BG/T4ĩ O)NDV&}\Ҏ R'Г+/m+'.|4>'GAdu҇%"u]IlPIq×Uy1/gꤏJD:~؜{f>xU֏;AY\AN7W_):Ԍ|\/:DY!I;H Bw/S4uЇɼq9OHD~ؖJ[^WyMYHCH B/[R4Uy_ث~uvEꤏFD~ ؐڅBF^UƟI4$ ")" %=@S~1cfF Adic:AN_6h=nn$,-}(Ґ R'Э0FM ">iHDW`JzjAv}I4$ ",R %=@KODv!}Ғ R'е1- "whdyH BүW$߉#ACK]IВ '  "t/ r%=@K8,D:$@`Jz;qT tHIhIٿHŧ3o=Dx% ^%=@S rvۘt#AdY#:AH0UIД w㎦:Aү+U4UX2 "Jv4% "p~iXΥ:D~qXʅS@s 2c:AnH:PI0 ӏ>s "+>hJD3MUGT">hKD+4ڔm==ʻi*uIn% "pG%`eJzNITDV&}і R'=iU):?Ls5UIk% "p_U`MJzNvNYF[H /+R4ĥB[6D%}і R' 5j{ȉ~ .㌶:A_֢h=g3ʤ2D8!Z %=@ksAn]-䃯LhND<qJz"IK4' "p8@[%=@g夏$D8_3r "uL~%h QD{H _):#,&} ў R'4WsVJz"IB' "0U3b "uL~Ah C{H H4PtFYL=ANK_WtFYJ!@D2uDR׿yC4# "p:Jz"Ko "p+;Jz"Ko "pK;Jz"K "0;rJz"K "0;bJz"Ko "0;BJz" I/$"u% 3B҇ H I, B R'/ (:#,$} "0<Jz" I)$"u+:03B H sK̫ aB R'/*:#,#}! "=Jz"H"D"u,!r032GH H̦ B R'Bү3):#,#}x! "k>,Jz"HD"u,'032H KJ\ A R'/+:#,"}\! "K?Jz"Hd"u,.p3"H K\ o- "u~ wHh"LVtFYăvAC4^&*:#, !AZI/ӔDn] )Jz"Kxx.tH Jz"KH"u4^ VtFYB0 DDZp3!H D>Dh/ @ R'@BzE ?ED "$VtFY@z" "^F*:#, ODHI D"u1Jz" HoS:A %=@g)H QA%=@g ?o IDAT71H a%=@g1H q 2?FD /@ԔD"uAz8 2n?FD`ҋ)%=@g1H k^&D"uFzTtFݨ}kH +^*Df7j+.tHUI/Df7j#.tHuIwDf7j.tHIDf7j.tHI7Df7j.tH5JJz"Kos:AV)h\):#-DXpPtF[zc$ "Z൒3 ANK/7AH k^<AH ^>Jz"sK:A.+:#,ODXt 2n>ID`ҋе3{$ANMH/#@Jz"3K:A6"*:#,OD،Rt 2&>JD`Cҫ Х3ȼ[(ANMI'@Jz"Jo:A6&):#+}D؜t 2=JD`ҫ Е3Ȭ;,ANMJ/,@GJz"~/ODبt 2kAClVzq:QtF͵ !A6,](:#jZ [^_D4~g-tHmK/1D4~_-tHK/2ޕDޫ "ul_z 2N=LD` k%=@g97aH ^i+:#(KOD؉bVIAdF=z R'^W`Jz"3Jo:Av$TtFQz& "+%ء3Ȍһ4ANI/:Dޚ "uNzv 2-y R'n`'Jz"I:Av,PtFMz7' "kE؁3l{ R'c5  ry5:A')%=@g˥wΫ! "h (:#\.m^ ANΐ^3қD< h ry=:A8Wz*:#\,]^ANΗ^Jz"KoCDILIAbz"u0Qz):#\,O^AN&K/u@%=@gK7+" "%3ȥ;D2XZIAR"up, rxE:AAzTtFTzk"H "):#\(/^AN^eD. R'I~Jz"JoDD9?`~%=@g MǾyg "u0̬ rXAC0"̪ r[XACB̧ rWACR̥ rWACḅ r{WACz̠ rxU:AXTzE.VtFHz.H ,,(*:#\$]AN^KD. R'MF`3EһuDLStFHz˻.H ^)Jz"HwWFDk$p3%қݕD2  rNwe:Ah/P(:#\"]AN"k%0ZIAM"u^.qJz"HopFD PtF@zw6H DMA%=@g k# "@ZzJz"HolFDH/@EIAdvu:AX TtF.]AN"Jz"ӥ# "P`@IAdK6o+! "Qஒ3dB "ꤗR3dmA "WSZIAdH` *pPtFͧ !AX VtF겭 !AX " "Sw+$ "ʥWV^IAdu:AX }+:#L޳ R'[^^g%=@g5D`#K,t 2QzFH lGz>D&JoUHD-IУ34}* "u5zStF&G]%AN6(@WJz"Ӥ7$ "6\GIAdt:Aج (:#Lޙ R'[^y%=@gIuD`ҋ/^IAdt:A؁ VtF$]'AN!~DHEWJDHS%=@g)ѕD`OK1QIAd.t:Aؙj StF"])AN' D&aqDD}J%=@g f= "DJ/%=@g f: "D=K%=@g f7 "DK/y%=@gͱiD:$^`Jz"c(tH^`Jz"Ko5WKDnWjب3һD' rs:ALz):#-\/AN^`[Jz"gKo.KD.mؐ3ȹDW rr:AYz-(:#+\1ANz^`Jz"J&WLDD):#)\3ANx#ZDΔD R'\KF%=@g37k& "tX3șқ5D:IIA<} "ukQtF9OzϸjH pZz5(:#'_\5ANJWtF9OzjH :Q%=@g& "{):#eޛ}[AN-@DIA,m "gI/\IAm "J/VIA "Sh rvwHh rvvHK73[DKw3D73{D73D73x "u;YIAdo:A`̨ 2^z˷zH RtF/[=ANXPz3(:#魟 R',-W˔DFKoODHo`3h "u@3MLRtF+ANh+usDJ6@D8CIAdn:A %qJz"#r[ "Q< 2RzH N@UIAdn :A`[ 8 2Nz H } *:#3F> "upOIAd7aHI/ඒ3( "+fAIAdٷ^HUKo6@iLcM !A`{VtFc !A`+zUtF!- ANؔDFHȶBD'/%=@gқDmJCGIAa}f"uw#t l3:A`[ k;:A`v k;:A`7қv k;:A`_; nkC:A`v FkC:A`қ kK:A`; jK:A VtFy@z_%H Џm*:#ԥS" "Iocؚ3H]z')H Щn(:#TwP" "]Koj؀3HUz-H ,@MIA*kAN(`Jz"5< R'p[zڔDj "D%=@g>H*V RG қJz" nw "RtF9mɍ !ABVtF9- AN`vJz"'?# "LBIAg{:AG, rRzdz=H Lқ$RtF9% AN`V+:#l R' DNHonHDXPz Jz"'w5[$ ",."%=@g-DJo 2,$AN !`3Ȱe:A Jz"{mD[,Jz"һmDV"3Ȑ6e:AI 2$9(AN`қ/^):# HoIJDX& s%=@g[D6"VIA&d:AMI:TtF[# "lTzsЍ3]m6H.Mػ3]mH#aا3vH(wؓ3mH-ؾ3mv H:lVIAa:AvStF)i8AN_M6DnJ6ND@):#ܐ l R'  D' "0(X3ȵf`:AқA3ȕ`:AΖ 4UtF9J/; "p^3QzANYw):#W=DYz 03|:AJ̣ FzANһK Jz"W}DHo4F+:#^wBD +xHIAH ޅRtFyED` ;Sg%=@g=d6H ےޤ):#!DبvFIA$y3{1H ;Tt ZB "o}-}%=@gz"R Iu):y-H@#jJz>#o7[wD+ !AQdxu8ҷ[ݼ \ "c x9y\pHzANJ ݞ񑷾*CH ,fV G>5\tnHz#ANe({>4=\vNHz'AN"[o`C}} Ǥ73D`-q`XIC#c72D`[teA2]/޼ ^lvKDKoaJz^ޛxo.|.Hz/ANv)){>|2zgH t,}P4ĹO]›wD "uMcX>f,_ND&HD@C%=@[' \›*_OD>؀Ym=;֖7ozwˎHH >(MNG|P̅7 ~ R'lC~M2i0ᫀ\xW"^"urأJĎnJA$BANU4Uyc˘ˢ^xWvD/v=D.>bq%=@S+},7eA$AN`=k ):)1ƅ7eA$7{z "u>ۏZՎynH` AC<(}<U4%,M S3)B+X؈D0kvQ0 oZ ؿ` |n D`a# A:0D#ٍu hI^)z:Z|;C6h\xs`%JzjE{X)Ax[UZJM=h\xs`%Jz*W>~<7VhϿO_yOYJMUE%vus`%Jz|cK0Xӟ3ꢨXÉO0\›Pu2\LsX'a9 %=@cS9q9 %=@c/~G>4*Nu2 oAIG;q >XcxKgoVI޳GĕAX `8i|;wԥRGX a(iu/hq7VxwƗ鎺9Z%=@Ƌo}ftus`Jz^WSN@PIZIZIZIZIKs\Ջo~N=~xR:/z:Ypo~?ҳ̮ExG$ɲy7#<}wc^MK|[;ΞR%=,>;{ CS !0g=VaCͪ=ӬIDAT-M <>$=D2/23=I53C4aїӣ+)p:ί_IH8^@DϢC{? |kwpnN.`ô'* Ow*`χyg0rrhLqĂ38Gė'ў:o&82=ԛ/k M=;z8_wb! \u'k֓Lu㩽LqX\:vTR'\!8 "0p0ɩ5d{i %=,ɳ#m7aSgM9dNoۿڏ&vUvv4Oz3D2 dxj-yy lؕ㋿a%=,A6o/uS>_g^|~<`s\ݾxA})wu< &Lt!ޞ热'Ɲ5ƒP$Afs~4-ݾKA&;>8xSD/uY;:= > "ı8~)]DuWz4L7UƝ @ 3w. RnrwL"W0o r̔A8&D^ "0ٍ rPꚐOBzڮZX?o k@ D7|LuM_?|p7ݒ Ү?Izؤ{y)dA:G;?8K߾ AՅSyٝ "0Ցߠ7JzXBmO)Ův=` D`ei`U:"gy~W_/mh%=,p%=` D`b>OXp OM*` ,ro}KA&;K?sr0pВt۞Pp˩d!NM`4xu6|KzXsAr}$Y3n߀aꬫPpjh{n,HK\?usr0靧_? 0k"/ "A.% f'fSr5v*/zf̉"sK츇"A[6p1.'Ohyÿ{\3*`I/g; Af$^|d?zKj%=,ŷnzB+4!>Duݝ$㩒(~ovo~/ӣS翸TIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIʇO|>Kz~|1;HWΟ6VD]s7Oxr @Jz`U"?KgGu8_2j"VeT)!Q)ȱr<|?۔T2.:iaS4 "c? vo"AXNc;Tm}''^~DjWo~x>_jןk?3{|>?-O̩`q7o{q?3*s ~;jDN?r޷j\p?7#Qz?Pz /Ǹ>%UG=H%NX3XXAu|A?_ _Ss?| g "XXAѝAdxx8޾&w? \hon11g>'qz g "cXX |;c5D;xCy޹%WG?ˁ/}5ژt^ȨxgvM}Ŀ~ "XX 92s;0Ͽ{܄xǠDǃ?) ՝}7^ g"XXڛ: r<Goc_z?ҟx#kOv 2>OA;mz /"g<%=[o(W>}fn "Ǔn\:>|w}'73n_:~S>w_}Cɛ?:f?} ?|}xOjAd}DXXɃͿ0 XRWDλ?{N=0? ș"EI,x~tKNry~_>~\W8I?M8ސyqg:VSIݝ@^D|`-Jz`iǃ8ԇsݿgs2d |s ]o䵻STƽ3W8dp8灼, ZֱAd{>N3O?sAħ޽ }1g?a23N9 Z90[2g${?=v?:?>^T'Mf">%=Aw4 "?;3is.Q{?׉O^t GCoݟy~[gg䆡 rkQKO "?#?m&gty+'o?>&#8@^D}`-Jz`il2/|~xBgr;u)1"?py( Z*gg~?T+?D.}rKG=}*M "_; a(p "ǯ{OD"LEA֢&T8~D_0O@ ""|G "sNp)ЧXZ rJ?{ޱueYgAm+RO%=Ag~;qAd93. "wzf}3k{"ZDv`i"3|+ 3ͩ-D&' 3"ŭu YUɤz`-"; ~wo|-Ls"݀("3f"ɜzjBsjE/DVе~>L.D~R!R9cV!r\ w 2q! mײ/Dj"K[9yȶDRdkB9BrƬB+d{ŵ\ȪBq!RX҅HW@n )25BaC-W/~Ǐ ^ ljA 94p@}!RXZBtn@Mq9"GKN>w3"d} 9^ K$W."EdCdy;D^kHw{_¿ 3 m{5eT.d鶑/^!RXZBdx8ל ;S R?cf!= y9.\ 5\DX,KU?wq[ᶗΝ3ٓݭ|~[5cf!m*.zz?r! Hɸ!ZX罿8z&SQvL+?(JՌ6SB҃Kn`-"; ƃ_x~|—V42|qse⭗#o1?cT.dW*qy[wSW-"Edx!rp>bJ!r95yׅͯ1韫Ji-d?vدBj`-"; ?y3(8tϘ]KoV-d7*"U+-_o|XOs9uxaw_cB=/Yb!r6Bf`%";Gx*1\?<+wz^cBdީU,d)6"o<ߌ"+(D7ďnq"g O og/Dvj Y.D6ѫ)D*!KkR n/Ot[vۧKs ؝'͙4 H4+UyFr\ݟpѥ6f"*Dvg/o/~/>7\w'׳Sg2!.]=p!1L|x{ssNUPBd_=7s"KWKK;7bC v¹9:HJ5yq~ W"s"鼰w3q]aa9@PaHJW`:.DRr3ץ"لss0G E(B$HdB<ϼ\t$.8):@"%'ڈuS×]| ΄ssG E(ՈBd~#<FCi>m!ҙp;SS>:,DK{G "pRtR n/yO*QlJB$?j?U\꟭^8 ):@ +Sm)D.n`!R(D.%IJ5ULgh!r>; ):@"4;KC jg(D.%IJ5,˘6'op!fs ; ):@"au6#\l9OG!r/10HPz~_<{>>x"ȕ DX;wϳ=w=0m"w>įĝ}w^rT}zor ycw5,qw]Pa]3qp=w7jz;?F+<VCІBu^Gp'.w&C)gvQ з$9,\DXE<F~mӃ}GJJ5 ښֳ/yj*nƹH=lFeW =V#g)Dl-DUd!2C߻iQqwl^|t5;PPHn8Ygeq1";8u ݑnogw!2N>HvFrT}B${Jo\ju ;j䦪6?}wv+f:1l~c6<]4: 1vV>1*m f|lǙ{Tq}BdsKe#';]Ca#XAl:Yn[3]11.ڗQt"Zt6ʍv7Uf`U:Ȱ +D{"/~?ڗ避#V՞UC4mLnLVi;4w`/jSFL{vw"*Uܛ}Vhszp@):@"1=+gɭ65|!26AYs? ~kD GU )DzO]t8q][#T<9%&>%_چ_6G.S[\z]"ѧ7 xeI}s_"2YO׵ڡ>#gi~}У *DF|{"7^_:W =Fum=j@!r秺s/<+[3ܳ4_p4ZQ㽹&֬N_u?\ R|ijP/kev;?fޫw'+DnۜWT]}RtRm/D6CU7R<ԫW{տ[W_령3䦨Go=~K 0q㶶ul'6 w{iۯ.DF">>wI4;w?uTS"? Bki#ʭX#jdXՇY1oQHρmi<V\ywifGȨګecH^^!ɿ4RtR" \&G=W[3Zc͍v3lQӽ2`X!B ?Bd>Bd&6|GJJ5yq@אVy}kk<)f&bf[1k K)miؖ.m|62k-z QG5}!2C߱ɞ} _L(E(ՠB:veg>~s̗3{=nx߰;V쌚7h̃ϢBd'E(BrB{&۷Mbw5n&`]v6VvL!PgP\n!2tjh!ľ9=uk+VYv =]h#dv| !w9Bdԇ^fp@):@"ې\Fրz5~;_04Nǰ;"!Ș=ksمȨNPڨs zsnɮk{5$p]#O!R撙Ezp!  8T [ ;+(i5\5dW4VC1U;찻Á)D6 zl!צ~GJJ5 qG7fw#t" 3bfa'smnfXtݺzh!@ȹ 8T;"[,躁K{ĭɯYeV#|cw7Jɐvw:"z*oӎ9G";~m ~GJJW!ts.DC{/Ԙ7ׯ Q*vw:"}+I":?BdǯMmj%3Kg0PW .g4ׯTvw;"C.< =ki#ڧl1uW[M tYfT l`"z`!&6|GJJ]flsף_f]Եz\a,2lFS~HL"~mo3w8T 7cCZO;Wly<+jWA[v`3*D]Hx z`!צm&jp!6cjJE%53vEXȝ_ Wy<Wv`3*DOɛCQ";|聅Ȯ_;PPܵmt(9rskͻv5+RlN=Cq!xI#Z :Qm5w܀ym v"lNH9Ch  _;PP_+CT_k*]~C 6/6szJ\?#@ہͩ|`4;VO"rcBdޝp=DF~mO(E(ՠBds'֓]nDeˏ~CμoCtڈJBdԁͪ,ٜu?Bd>Bdfk~GJJ5^*柦|4U_g?_yOwB'Zrn6BzMP= 1; ]6'jK!6$ v=+5k}:@^&q=v`*D*W7gȶ垳C!2CNxBdׯM9=w8TRbWĮ끋z~^ikV1WnxcG>Vij[5UzÇ"ox̰ӁͫmF8Gjg9^^!y~kҠ; ]6RtR+D{ ~r*#pnޤxmfV (W!:aBdޝȎ_szp@):@F"^-oWcY˽vOTs.6BdA?+DZe0{"c?/DvCGJJ5dcKWwm2/?^]!r8/i/DF~ '(DvGJJ5&}޿ӛ_wlXF?H?8wcf9Nw々" H.i޳w'k{N(E(BFFl5+vl" ;s'6BtEwBy9À[[лNQ=jP!r[|"/7,?CoY3QltLU<}R{Os{`s,D>{W\n!ҘlIw!2CN8I!צ;PPBիүn뭹jӟ ]33|+'ߑOxod9+umKRtf?wC([d6C(]0`j):RtSKL-EZ0`j):RtSKL-EZ0`j):RtSKL-EZ0`j):RtSKL-EZ0`j):RtSKL-EZ0`j):RtSKL-EZ0`j):RtSKL-EZ0`j):RtSKL-EZ0`j):RtSKL-EZ0`j):RtSKL-EZ0`j):RtSKL-EZ0`j):RtSKL-EZ0`j):RtSKL-EZ0`j):RtSKL-EZ0`j):RtSKL-EZ0`j):Rt¤ )P`y&E(<@MPy 0s0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ;5):@a 1wjRtc8Ԥ1pI c0P`y&E(f E(Bf E(Bf E(Bf E(Bf E(Bf E(Bf E(Bf E(Bf E(Bf E(Bf E(Bf E(Bf E(Bf E(Bf E(Bf E(Bf E(Bf E(B$@MPy 0;5):@a 1wjRtc8Ԥ1pVl IDATI c0H H H H H H H H H H H H H H H H H H P`y&E(<@MPy 0;5):@a 1wjRt`Rt(D`Rt(D`Rt(D`Rt(D`Rt(D`Rt(D`Rt(D`Rt(D`Rt(D`Rt(D`Rt(D`Rt(D`Rt(D`Rt(D`Rt(D`Rt(D`Rt(D`Rt(Db8Ԥ1pI c0P`y&E(<@MPy 0s0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ;5):@a 1wjRtc8Ԥ1pI c0P`y&E(f E(Bf E(Bf E(Bf E(Bf E(Bf E(Bf E(Bf E(Bf E(Bf E(Bf E(Bf E(Bf E(Bf E(Bf E(Bf E(Bf E(Bf E(B$@M/z?9ݻc8Ԥ1e?޿;5):@fïyP!r|wjRtk<~_|wqI׼ȿqI6o [5t`rNGy?k?3~l{TUqH&u3?yBV0Պ?3|rOaW)D`RtOW?=U߿y `b__ , Pd)D`Rt=q)ȃCwYT'CۢH\,kCwEF"0)::{"[=TULʘ-sOUA0k3Su^#"0):w,-瞪 `R=GOWR瞪 `R=b6?SOH&շQȠ]U?9ο\\l7~h@0 ,D k~jrGn7-oyzwn/[}^MVys[V掟o|}H$gOٜ]ůzG!hK~'gfrȢyUך_<|ך_V!G)Eޅg Y]:u_9l}QZ~ξ.E/VYn>O*{;^Uy<[`Rt)]}~dȜsb,JXruBd;;ʍBdR'z̢X1sW />_3sb/)D(SڿZ4 ]rv"/_㵳ros,D_3sW"O\s 8N):(-ƿnBd[(DhZ;k?Qܵ4(>z 嚙MDxMG!hnzu_x/aW^,~,D~[84(DѺɬY"OU>v7si ̚劙V!;nvvجy~Wi(DVknzg?\j!RyΪ/{.D\ggjʊ }?oo\ȟٌU0FjYhlKL!XB%w-+l!RH[mU ۋLh]<(5ΖmU,ۉ֏] >+j/],SgUڃV!l3[ c]f^h\VR-D^h݃wF!R] 'rK[t c%W.YL!'E׺jA[<}Ȳ,_=RiS|ffb&W4V̬OW=wϪ 533BhV,~"cS Z(VQ+DuGmεk͵/O zQݥ=:%HzMBffb&W.YG}N GBl 暙z!ҺBᨼ}HwZ{+ėe =xOV|;I&`Ǫ\85SVӴӠR4Ey}=ȝ֤_/sVk/kfnm'ueG’ UO<-DT#&S4.3Q%K&q%ǰ oϟϚ`~|ޫHuLeLY{k󵏥|v!$K G)D`J):>sG^sLY_`YsOٜ{6ӵf5w4HḙyJ5S\{tf|ԝ o7}ȬS6^+D6kf+fZH"mU٬"-S+">y_Gw!Xkjɵkyɡɥrȏ6Dw~9ۇ>dփ)y"533Bdy?^FrdfLuLfSvV&=ȭ*hL!'VEҽjmĪ Q R/Zyd{7a)sOٜ{yzUu[OyKkmc+Dk+fȭty!_tvw`EqZײHZ.Yݮ,<L!E_?t"gl:`Nz!X3F!RۯtќO!(3^Y_1+DnɽBd٠ܜ)D]>XXO!R/;|4l<a~x?Ȱ.5W}w,?yBz!XF!tsԍ;:BdU[V ~ɅE-Dn BdYJ,*"_u5D>Ws \PMBdoi"wuY=pģ+Dڊz!|z G^<ȗ; W,^Y<-.V|1[,Vot5"00 b[j+fȳ{T|"桶b^,/h\"|eeQD摅BdT\l!Qf,w&_,)Q(D*\R+D^1zd"g;n*z!5ru#A6?+D꯷||Ynqb,_|i}l<7dY \PMBu!Y{zL;9TK,DVMDgBdY`[k A%J"d}#f!l..|!,oh8vO\SRt(Dhj"Ϯ5 B5 -mUYKHY^W5THe*Q,{L >z /BdST~Oyrnt"gbo<϶oUJ UTT W̬/0y浏Y]QK<+o.Dr-xiY^qGg!\__9˝E"00 ZٝRlY1䭛}THfV!qFg򊳾BUfQ|oYlvI4vUջ*D` ):@a"4 Zb6ȳ4Mߪlzȗ[Gn|Z66*[~_!,Xnf!X3BQE1jb*D`):@a"pIֻͽxkw+;/jkݾQ Q Q Q Q QpI c0P`y&E(<@MPy 0;5):@a 0):@a"0):@a"0):@a"0):@a"0):@a"0):@a"0):@a"0):@a"0):@a"0):@a"0):@a"0):@a"0):@a"0):@a"0):@a"0):@a"0):@a"0):@a"1wjRtc8Ԥ1pI c0P`y&E(<@MP9@P@P@P@P@P@P@P@P@P@P@P@P@P@P@P@PMkINPMuqKPH;ojpLRtœ`~Ng):@aNn0?'v޳uJ`0'6; }9hyC4"C9xyC4"9 Q>D#0LPR\0 ш F!ž"!E(B=)D.CP{R\0  ):@a"ik!gaf+E(B=*Dt"Rt(DBD%F!ž":0 S!hH a_;!*0 k!H a_{"*0 GXJ ao"*s):@a"1NׇhD"S;xy߯Q(Dvb8w!qbl:D%qZطшK sZ8,4CT"@Rtœ`NuoJF!ž6uJ(WPQhDRQjq)J(TP5ThD"QzqYF(QPkVDQfqF(NPjYhDҤQvqF(LPT[hDQ\qɅF(JP]hDQ|q酈F(HP;шH aJ(FPMYhDBQ0ZWuqBD#!E(B$1@F(BPc̏1 P0<#>ݽ P0G<#>F8}):@ax0?j{{Z"9y+-Yh IDATDShsb"ĥQ0FoeqBD#0 ƈ,D4"IK aЅF8e):@a" 8x!NXPÅ"tQ0ضbBD#0 SNPCm-+(D4"J a]$F8Q):@a" 4BD#0 TLThDF!03*D4")J a!=dF8A):@a" 1P.E(B!fVhDӓF!Z prRt(D`XI1e!NMPH:;I pbRt`~:(-D4"iI sT 9>ш'%E(Q 'B!04#: pJRt`~R'&/D4" I sD91"tQoD;]PFF8):@a"QND"TQkL7RhDF!B1ՄB`w):@a"SM"4QgT3ThDF!BQDT!NAP= ݥQc\/VhDF!BqD\!_PF ݥQmd+XhDF!BDd!]PvF8r):@a"t[I(DvF!BDl![PHc8BD#00c8 BD#00#8Rt`~༏# pRt`~༏# pRt`~wh#"K 3;3(D4"J !o2bF8V):@a"dE̢шG*E(B]Rt(DڥG!SP9;53)D4"QJ !g"B!0 2v+"RhDcF!Bn=l pRt(DحP.E(Bk"Qжc 1BD#0 v,!"K ebNF86):@a"A̪шG&E(B]+Rt(DhڹW!KPM;73+D4"QI 1{]P'm}bnF8&):@af<"`~{"ٻ^٪,| _m)[:EΑBn+8 &>&r:GA H4{F͚5|y`^U"{άw1oz_3"#zu?>0("@5"{άw1'9A/ ć5EEdAj "P3ם"{"\wVzXgQD:DD U:#pyA/ 5煇EAdAk "P3ל"{"\93;7("EDrfuDƋ:#p tFʙaAD. ¥s 0^dAK6UEX3Mx=@gN"n=@gk|.+"j=@gָ`A/u?;8=("EYbރEX3+\̻ tfyVD`"{άp1'A/ … jCADV+ … bC AD* … Z 0^dA *"TdAH x=@g"5D`"{"<4Ei$("*EDx` tFIBC-AD( tA`3L "B=@gOC* ">=@g ""{"0("DD/:#0U*("DD3("DDru(1"EYbޏu%"KdЙu-X>UaDƋ:ż#zݧ* EX3Z;}P[QD5:ż#kz' x=@gִcP]QD:#0Y^DƋ:#0Y^/("zDD7]]0("jDD7]\DƋ:#toPcQD:#to tF݄m +tF݄i tF݄eA/ һ BADV! ҹ) 0^dAsSZ"AdAsSvj"@dAofA`3H& EtFۤUA/ ҷIBADEDru6*"EYbޝF"d:ż;+yݧm UEHtf%ywVO"{άd1:^BADrEYbޟuA`3X1qQ<("@3HǦ EtF=A/ ұ{BADED:6uNDƋ:#tlPQD<=@gM]"& үc 0^dA_DŽ"d:#k tF- I"{"<%"ED5}Jh#("@3H/ EHtF!A/ ҭCB+AD2DD5}GDƋ:#jLQD=@g  ˋ:)u""E_P "3y_" 0^dЙżO K:w*u!4D`a=@gsȓ"{"#!D`Y=@gNQ"{"} 4D`Q=@g>"{"}% D`I=@g> "3Hfx=@g.͓ "3Hx=@g.S "3Hx=@g.Z "3Hfjx=@g "3HfJx=@g͔ "3Hf*EXDdA$G>W8DƋ:#Dj>}:#Dj>}:#DF]:#DF]:#|gE_dA?UA`3Hf E]dA?EA`3Hf-E[dA;5", ҝ 0^dA;%"+ ҝ 0^dA;ƃ"* қ 0^dA73փ") қ{ 0^dA73惈"( қs 0^dA3s"' ҙ9k 0^dA3sւ"& ҙ9cAADtFɑ "{"9^Y[AADftF!Tk7 CdA$ R5 =@gH.DDr"[mzw.[ "{a x< "]7D`=@_-) "0o'DƋ2;|m !vn" H O77O;-teL ,ý=?s['A'3g~"L/XܯS}Ou[Dp]~Da̕@/X3WQ~̳}s]A=fE\d3zg渋f7DkH ,쓿/鶈iO]A#AOAD,p]D[ ŽAWAD&,y\@ٯ "7w"DƋB>w%͐z-AN}EVdcson"A.x=?xei]cL>^္ AgAD&,j9}Ӿgfs>"="Ecߗ\7@ !ނ"L)XTzZo]qDa 0^du1'UkDA>{.(""{EnrB|N=w_"4a7h[d_2sJظv= HxENdR(Œ}ϲF ҆EAm=& "|AH#} E*ŋEq#ϹK!4bѷhZd7 "7o r_@AD,i Dg6 ,i NDg6eQDDK:?ܾ}AHx="|Y D`=";7/e tnȳ}㇂,"{E "Gs ҋ@AD,/~}79N,"{ED@sD:D6(""{EDZ` "X" "EPaKd~"/XTƩoy@KTA``Q:pԝC,Q:"p`QQh%7 0"Q@/Xցo_ pD"+Xᯙ9-"{em>?l.9|OՃ>,"p`YRr R_E 0^d\ki= nH{{7&E |GDxAH{{7&E .o0-QDjL<("Y"{m> rn!"UDZ (X#"w|7? 2c#H -~-w~> "j}QD3D{~/Dk d@"{NDk ("x=@}E;ECaj "E)~=_0#0B5@QD"{W?"P D` okOFB1@"tFB-@ tFB-@y jtFR)@y jtFR)@y(*tFR%@y(*tFR%@~u:#t` \~:ED:TD6pJ=@g,7RdA}uAd+-jtFib@:#Xu_"ygDDr"[ZtF!4nhIdA$ Һe[!=@g 5AE[%=@gHysDD\D~DD\Dn~DD\Dn~DD\Dn~DDZ`Dn~DDZ`Dn~DDZ/ܒU:#n_-jtFi݂ r[$ ҺAdGT$ ҺAdGT$ Ҹ%AdW# Ҹ%AdW# Ҹ%Ad?=@g- "{dՈ:#4n_'/EdAqK>@-"{"m[tD*tFɱ/5 4! Co3Ђ3HA#@ "{"9> :#Dz2 4  Co4P3HӖ]CDD/T  Ҵe~A?=@g- "e%tFiڲK rX_~=@g- " EDZ/d1ktFi+ R]dAe HQ_r=@g- "E EDZ/e9tFi R[dAaK/ED/ZdAaK1ED/YdAaKQED/'XdA$"۾ ro9P3H%^ŗ}Ayrf=@gH}EDr"=*tF!tk7WdA$ ҭyt^=@gv- "d]+tFi r\V( Ҭ}Awn& Ҭ7~AwvetFi; r؞7DD/7H" =@gf- ""q=@gV%{"\:#*aD+EtFiU/'CDDZ "{{"lEDZ "{ " =@gV%l>Gz"lDDZ ""0=@gFe>0LdAQ  tFiT/#DD ""0=@g  tF!tC:#uOY}`3HA0LdA$ =$ C  tFiS/%DDڔ "!=@g&,~0DdAI9 r@"OX3Hr~A=D"{"MYC AD6"{"MYA؊:#4)gD E؈:#4)gDA؈:#4)gDEtFiQ/ "@IdAEI Rv(("=@g%-H DDZ "e"D&(iDED&(gDD3Hr}ABQDAdaHR}AA(:#uO!S "tF!tHJ"{"9"݋:#D:$%=@gHADEDr"D3H_/"eG":#4hMC)D3H{_cADEDڳI)D3H{/ "eG"]:#8k IDATg5 R&%=@g,_DʎEztFiϲ[5H DDڳ R6 ("б3H{]DʆEtFiΒ+MH DD R6(("Э3HsoD$ Ҝ7[aAD^ED܂ R&%=@g,& "t* ҜH DDZzK)DSdA$|RH DDr"DRdA$ !A(:#D:4<("УqctC!tHJbę;/7$}Dr":!("Сq,:,=DZj R&%1E.9 ҘE6S"g.HtTHcXD$FD\:s"A1K,"e'E# ".9 Ҙ2A(gĭ"ҙ,$DM;+;MW5̿& "@I>H&""-iJ)Ds7»#kfA!3-C "eト"=g7~"HCD$FD}A6gCDi<{`HADĈ37ȝglm!)HKfYD$FD:viįk ҐY"eE#lȣ/ F" c? R&%1 r7HCXO!Dh^8sثS A!3'D$F]1 " a? Rv^QDu=@gLŸF)D3HC_O#Dh\~?ٽMi2A(g>~Q1_ȱ%cTHٹADň3Ȁ _Ii2A(gsS'D"4-F9!#A$ !A(gĎ_>!#D:t~QDe1̐ A""9 "@I83 sM!th @bУ'v>yHl ""9 "@I z3!{|#4c}Al @b>D^u "͘ruG)DFW&4c}Al @b6_{;9UD1> R&%1o!.!4c}$Al @bOo8wfL% "@I ~G?oß{^H3[D& "4*F|XM=LfL& "@I8#'b} R&%=@g&L]G)%("Ќ~Oi%H&ۮ" "@IL\^bߏG_i"4a,H"4a H ĸcsz\O'A.D "!{m=rz6L\A)Du~!kyˋg?4DZ0r}Al @b̡k㑋,$ 9@1>r= R&%1e»?^BI ;mHACEZcmz?~n-E6 C Ĉ3qC.?2ry!hA$ ҡ" g>عC;f>>AH$Fy B.ozDd/A$ǤĻhHٜADň3;C2]]UCHޭDf "T/N?<Ȟ+fhswUEiHH Gv[[gHOiHHټADGvZΗ{ " ~G)D8N븼 !A"e3E*i{n!$-HfحGD$N?D2ef?A~3"esE-3n!Ԉ r R9VQ2A(g6yi󿗷y!hrp_i(HADň3oߌ[\_6|ntL,H Ĉ3 4kYA~c"eEj#lorMqC#n!r RyV2A(1_+kwbl+fnD7j= R@QDb1eyKNx狱HD7j}:ALJbԩbv!|@d RV "eKENm"?.o"{ "՛k> R&%1E3;#.C\E"Պnl`&{s "TodH ؃^C~my2"9&|g۬O&-DU?&yk?!;XH$:g> Z]-s CBADJOx=~ "9 "@IdA$ ҡ"u:#us> R&%=@g͹XH)[,("P3Hf]O# "@IdAv.֧Dʖ "( Ry"ePtFݼI"N?xϿtߛfv"y> R&%1O_cd6B̋)%""Dj7b}ALJb乷Ad RS"eE}PDv"}> R&%1z C{ e"1>~NI{pH ĘC7n O]_C{pHADĈ3!w~s'Tnz0Al @]bę_>!!D*^= R&%1!dHثD"T%Fyms_N=L-V% "@I~d{ 'HYDʖ"$N?ݗ&H^e2A(ӏl+fDr"J"T$N? "HACPD D:DG~dsSUAd A$ !A(g^sHR"Ոg-3 "u[lB)DqO<z"U[lD) ""rhHՖ۪D$ |Dd$AjmC"eIADJĨSHHܪD$F9͌#TmzA,+("Pwls_;/N:L-U'E#mHy)i RE2A(?bl=ԗ{AjnG "eyADĈ3?>=筝 Re2A(g6TDFDjR} RD@8#'lALJbAd߽MD*S"eAD53O#?z_>qwϞUHrvEV,9|rD>xϼ7p DꕴR& "@Iq's wr#+i>L)[CQD`b{߉D޾;/N4a[ze "ePc~D^KSA^Y+AH*"#=dD>8⪙=zԇ"eP}Nk"J[DDXw;Om +Es A^y+H ĨSlï׽Dg{9DꕷR $("R1e1W㣛A^+~HZ"c}w[~)fl R̍z?ALJbęlsn&_4s R̍z?Al5ADUgxi{"T+uK)Dq[ "ۯy "T+uK)[OQD`bęn}c_MDDQ# "@I~d{ 3{AAZ>Hي"+ "~ RzALJ#;CN1랼Q!)(">qA H_zAl-A/M L"N?"A!thAo8_~dsS=DTu?A$ ҡ55џ4p8Ƞo "9 !E8v "<~Dr"J"{s$k#μ}{抙hAV H Ĉ3ڱffO|@>#\DjP!e=DUg7~Dd7l[&*a>F)K"ĘCkf^WDjB} R&%1Эq;|?߸*"Z~>N) " ĨSۏ>&Vqp R2A(Qwx[AC|_Nv9AR"eAHQD _;vy̧Ë<"Tj}zALJbɏX!w^nƩ-wo`""Tj}zALJb{?C{s '10i) "ZzB)D8G!N4~o_p|yP@C~ RA2A({;97&l.oc3m^ "ZzD)D8'/_wϟiVۏx\^|sU/43'C>a"iuzALJ"{}Wo?;rz[cSDܖU H D pgA>v[z}AVKӃ"eAX "[nցۧn>"2ANC"eP,ksB>M1ӻ|Hs_!2A(u{l-RN?2CHACA\D&F+cr_Oo˻?DVN Dڄ}E=j/M!%RtXq=$,as}7?\{ CPv9\D~! s??|2<ԗXA)TMcAd}H!thAD|1̽bs7obb_r/}@R6} RD$!&̻8ҍBUu[t RmAlAd_K4'>Cz9/9#|ww@DS6} R D!'=9gV{7R(ŒFR6]$#@D [<1D<3M{'ݯlrDL "e+"$a9Sw^kH"lf?vJQ*} R&%1톏'/f o95+z< RUAlAD\17{c/'V_Df "ɿ <%(i>F)DאW>W߿D^jLD>{a)AFI1Hꃈ"b~y:{~ˇ=)ԤA|c"(g>J)D{߹ [R{s'oZ?"yoDjH%?(")=msu/?KrK4\=m?XQ} RVAQD Q zDD[GD "ч "5XD$=ꃓ>>"l{"ͻgNQ= RVCQD O z+w~ۧ=|9 rqgzo?# "JXD$ Ҭ&"fDDq = R&%1A7N?ͅ-//}5D:TIQD I xo!ry9_u IDATZV2 jH$~nlY~x([l.)Su)}WuD:TMQD E x&R7FYqyоA>ˮ'D "di(孃\l/D| ALJbcZ "Orȁ߲\LsABnϧD* "$i)(zƦ|ï bFВI2A(i)\67r7 R%"e5ETl}攻<}G "h4H Ā4D.;ݻ_O?ꁞ:Bۃ˟`Fтi"i+\k7ݾ`fo?Nr2A(i,(";Knn"gTHY]ADŀǴD\rg>=nzAb2A(i.no;> R "eE^z} "_݈C -7N)-("ȧ~o?,_ OÏ|S "YhmA)D6HA: #"eE#LG2[H ĀlmHmYGD ",) R&%=@g̻+G)7("3Hef]$ "@IdA.nD*",  R9 "ePtF˜":#e=|HYADED2ߚ<ALJ"{"uoM RVwQD`n=@g̶%OA)D3H]fے UDYdA.s-ɓD$ RI"eEtF1uiE RV}QD`V=@gH$ CPAD9EDr"D3HAC EftF!tHJ"{"Uc? RBQD`>=@g̰OH)D3HUfX'$5DMdA*oSD"% Rwi "eP#|_ޜzn"5|7 RHQD`&1ŝޛz>"5z5 R&%1=X{uYz dxbHY+ADyĈ3ASOtTHM&ތ& "@I8sD¥3Dj2f<5A ,bękAĥ3'Dj2b<9ALJbęAĥ3Dj2b<9A bę{?qtf(A&D " 蕝&ثqHM\g "@I>H"rA2A(N{;/?w񯏟 ufHYADSC2׼}WKz"TcufHYADs{n\*sOH5X]% "@I9tyґ53 R)Vy "eE#lorR7DZLJ)DqvywٜqADʚ "L"FDH Ĉ3?: DrxŶߊ,fq3JLII3&Mˁ@heZ&42&[X~^}̱vKD ʌk A3"Ad+\W" "@Mq 2 Kn+DD8Oq A "[ANjʌkĎ_,#lŢ:ERf\3%<.1F,CD2 AGo Xvq] R 9ʤW?\|ꕻK F,B ("p2UN=F,AD2eg7W~Xzq] R7BQD`2uo!>1s /+DL|ktOkIF,@"("0[G3z"l1"s/<_so +,DL~G^y߽CD6buqH ADʌkoy齥 k쭋D̸FOن5 "uE)3DDauiH Ԕ" "۰޺4An ,e5OS繩D2N.L'("0GI0A$C ԔD2 D ! HjJz"ȀF " ! HjJz"ں(An ʄOyoD6auIHXADS ϶{'AdV\[$ "@MAd9&.H,("p25rMXsm] R'5ek"ںAn iʄ"D6aյu1H Ԕ D#lªkbႈ"')^kw#lºkR"(#l[R:A)#l[R"'(#l[B:A)#l[B"ӕD`uH ԔD`uHݐADJz"[ֺAn T%=`-h.AD`04Z D qֺ߽An 4ekӻ3kϿ5HI&K:A)^WL/[#d"6("0IAd9H 2 A)^#,GD4nQD`25r Ad@E&(^#,GـVK:A)^#,GـVK"+^ r ͖3 "uCEU FـvKy:A)#l@AND ҿ;|H @M'yYgDAjʄ|g[K\] ҽ+|H sE*ʄ"D|eMDJOЯ25r_YDLx A{Wֹ:A *^#,G8XYgD Z]ek"Ȁ Z]ek|r Ad@lVI0A$C rU FD$\Z*#d"D{wFDzXD+ T F]hcCDKVУ`0HR H \DzXgDkt һz:AN.}c@ٿɏy wvM]nc= R'\? `;%.LD^;國̃/~t#.JD^;e!ϒ,6" nDo-M};sSr3 һz"AN!}o@g랼]!/93D 7Փ"u]ҷ̸ 2o^%-DD e5wTcǔh2"AoE$H r}](3 ҷzAN[F9D4wD^SO# "wKiЃ2zGEZzM= R'To5@uՓEC|`6Ak-4H R ̻cᡇ|vJZzI= R'Ԥ5+3{o&]|^NHKi:A*}@\'yWB7 "]K﨧Diekߡ[O|Ǟ"r ҵzANKmV&fB r ҵzANGv25r7"u}d D#t-FD Lx AgDH rQek"=K'D{8*^skw֞k "uҷ$D2:Ad7 ! H*#d"Dhp@Jz"ȀI~Ч`0H 2 Ad ! H]*#t,JD&JxR FXz/= R'L U "K不Dwd~}j~S "uT;2YW޽~7xg r ұZz*AN,}@D}w>W#$t,JDK{Pf^䝗!mHK:Ad eeO5!" "K/Do>(.{{RDnD:IO% "H}^uգi=DEXz%= R'"}@{eEuRyv~7ғ "uI4W\t "z>m|}IAWz!= R'$}@se5WOT}rxț{e&+LDN2_ǃ^ #_:G]g|`6A_}dH r<./C ]``H:AD;+3ygbX}.AsH:AT[*39+8DFO& "'K߃T9۟yt ~ѓ "u 4UNbn!XOIɘwO/'D5KnOUz/ID$v>/ ˟\|f(A$c=NDfhs?@"\h|fA$C 2GzPN r O Ad@mnHA9#9|rS4VID$/9~߻+'t+NDI߉Jq~b ҭz:AN'}'@+e5r!qHk:Adr-ȵq'<%t+NDJߋFq!v\{ ~r& HK:Admz-"oz ҭz:AN-}3@eE\nÛH7Dz^AgDҷ#P\tL)_'H.Cr<%* "oGh̺I઀/O#]{"Jo3"u#4Pf]uO\~<G"J/3"u#4P]fH񉙛^:AWf^wǦ>>C|-H H r +s/M3o! r ҫ9 R'%}G+ڳzꕯ/1"J3"uyҷ$q^{] OC#^:AL{VVκמ/|߼WH H r= ++ D:=DsJXWI0ASsAN9WuD2W9:AlO(#d"D`0H 2 A|nPH(#d"D`0H 2 Ad nQh7^ "Ȁ%Er|sWZdD:^;DEoMXM9v<,4"Jos"u2&̿9YAOsANYFގR IDATW>3Y:AdA;VPf\{j ҧ9 R',)}ʌkY/ ҥ9 R',*}ʌk޾Kd.?4337"]J"uȢ7),~#D!7%t)k# "Jߥr%wԿB r ҥ9 R',+}HǪz Hҫ "Ȁ5`a國9;EB ! Him */9ٻD؄"d"DZhz+|w)חqO7:A C҂S ҟF9 R'4`2R\rνD^(gD670U]럗ykg_:໋NHw|H Fʬ|*{=g|-Hw|H Hʬ] ëwx-Hw|H Jyʜ>C~Y ҝ29 R'4`2G`⹪7 "I "uH3f)3|OM|檧$t'K' "b̸2wT't'J' " oc̸OЌ t'I' " oc̸틭fDHwқ|H R>ӕ/|H=D(A;Er>ANi*}#H "7"IgDҷ2~ GL|рޤ3"uH[[NUND9 қyANi,}/"gDz^" "of8M9C AA7 H ZfӔ/-3gD2w3"uHsok8OqjytΛj Ad@H{k8KqͣqXM=HD$4e5>3sLc5#D2 "ol8Gq?~b ! HIh|gʜ !O޾D2s"uHD{f+s.LLj\7ILz{< R'Doi̺x|?~[Τs"uHFʬ.Rʃxȓw^7oELzy< R'oj̻[d!<߽v|mHgһ9:A$%}W4euo^yO|`A3H `2'ȭr􁫣D:^! "1&)s/|zr ҙxANI0E_;7oWowӊȿzs՛L\.%2EDҷ7ԕ<>|w3S3w?d{D7Ƴ"uH\2㚷/.=K|ȗ|WH W?> "}I/gD 5K|bsaLz/{/zwA$㎿{z]< R'䵿`r%Gl3I/6C3_"Ȁ`r% {e ?,"}岦|o-"H 2 Aov~#3 "/ȇӞ"]/EE D$S׼gGxSDo9>Ň ! HB&*3yo9B>4_Ps}$)$H 2 Aow̸->UqxG=hTɤ+t&*GD.eExyEw~2 OE |HWҫy:APf]w0d<_{ЈwlRzS< R'"}qeeO}??,:<3 IEH"~kW3 "uHWw;Rf\s-3SW~ rxȤDz^$ "}IpSI:A䲇Lz ғx&ANL~JzV "=dG3 "uHo7<hi rC&^(t$KDzE%=@K.D:^% "IŃ>|͉W"IoDyhi ??!HW:AC)zu0_T_$dws "uHw?ܡhD~zw<2A$D ңǕ?Oe} SN@D$t)qQ崗rS_`VTXrǩ"] Ad@HSNxw^./ךk.DOO!H 2 AO)_m_ԻD*N~ͭsCyH 2 AS(_Hħ&;yϞN5?![s ҏJx6ANXN "WOT}[v?E;o9_/s #MDz`byC?SD><<;~|ˡLyqH?:AkO9._AlH ҷeʋ߹໷ѝҫˇ~~g rDGz< R'-}@Gw~0-"yex;?anmft "HDΥWOy)"GW??] "lVz< R'.}025@~H~zG̸[AyJDv ODz>]C9[{o*"Zƭ rWID6) OD>\Z9ZOoYwFFz< R'lA`dekD2#"x>ANل̉DZD2nk:AdBg" "Ȁm 4&d"D6"t(@Ad9H 2 Ad+B+^#,GD$lFX`tek"H DH Lx ApH 1 D#".@D6$}025r^"uȆT&FY ҋANْʄ"D:^ ">PAd9H':Ad[g)^#,GDz[ R'lKL`KAd9H'ҋ:AdC)^#,GCz[ R'lPd`(ek"}H}D-J Lxǯ2ӫ H[":Adg)#!-BD)}80`0HͿ{z[ R'lT`4%=` Ad@FŎ FS FD$lU`0%=` Ad@f R FD$lW`(%=` Ad@vN R FBz[ R'lX`%=`.e"uȖFP FBz[ R'lZ`%=`.we"uȦ P FBz[ R'l[`Jz"]HzDK^I0A MoH us+#t!-CD.}Nw%=`.e"u v ҃ANپQDz" ";>+ص`0HKB:AdgV FAz[ R'A`Jz"=Hx D]HXI0Ao!H *# -DD!}\_%=`7"uN v ҃ANً^D2^׻"un$v ! Hُ~D2 "<<د`0H 2 AdGU FD$I`Jz"Ȁ]ITI0An)H /s)#t -ED%}n?%=`"uΤv ҁ^ANٛޔD[b:Adwg;S FK/uDIKI0A$/-FD'}x/%=`NAN١D+b:Ad{R FKotD]JHI0A$/-FD)}%=`>AN٧~Db:Ad'Q FKosDJ!EI0A$.-GDv+}%=`.AN٭!NDһr:AdҧP FKrDK#BI0A$=-GD,{%=` Ad@ȮeOv ! HٷD2 "=R؃`0H{z[ R'\P`Jz"Ȁ *l^I0A$--HDv/}q%=` ANٿqD܂:AdM+# "u V FIK/p D $ "CH(lXI0A$--HDƐ>Rخ`0HZz{[ R' "}Y%=`$ANET`Jz"amIH 2VDҫے:Adc*#7% "u8 T F K/nKD6 ۖ$ "#I,lQI0A$,-ID>Zؠ`0HXzk[ R' %}A%=`ֶ$ANKl`{Jz"YmQH 2D2.mQH 21֔D2 "I3lLI0A$C 2AD2 "J4lJI0A$C 2QD2 "#J5lII0A$+-JD>cؐ`0HVzY[ R')}%=`(ANT`3Jz"QMmYH 29VDҋڲ:AdX(#De "uȸ' P FJiDqO ^Ӗ% "K5lBI0A$*-KDF>k؂`0HTzI[ R' -}%=`,AN[%=`,AN\{%=`0AN]w%=`~0AN]w%=`~0AN^s%=`z0AN!}з`0HRz;[ R'PS FIJ/g DjJz"n0AN%=` Ad@?D(#d"Dx*}Я`0H 2 Agҧ*#d"D>~UI0A$C AS%=`V4ANRO%=`R4ANROu IDAT%=`R4ANJK%=`N4AN@Jz"AliH 5D:A)# "u/H@t ^' " }Н`0HNz[ R'@oJz"9mlqH 3ΔDr:A҇})#w "uO!R FIbDnIBt ^' "ܖ>II0A$'-ND8"}Б`0HNz[ R'pL %=`@I0A$&-ODIHDR :A@\I0A$%|@DKJDRһ :A{O%Jz")kH }Y%=`AN^s `0HJzZ R'p@RI0A$%w@D }2T FII]+DHMDR[ :A)G9%=`ֵANa@LI0A$$sAD&}:R F I\kD&JODB:A%=`µANa@DI0A$$oAD.}BP F I[kDN>(#5"uHQW Fɸ7 "u'IR4W FD$p1@k%=` Ad@ISV FD$p9@c%=` Ad@JTU FD$pI@S%=` ANt Jz"-kH Jz"%kH  vJz"kH , fJz"kH < VJz"kH L FJz"jH L FJz"jH \ 6Jz"jH l# &Jz"jH |3 Jz"jH ҇ D"һ::AsO-W FHVDΒ>X]I0A$"YCD8O`m%=`^ANL DkJ:As.U FIHoU+DΖ>XUI0A$!TDD8_`M%=`ޭ& ", }z`0H 2$A%/S FID$jJz" Ȑ>XKI0A$Ȑ>XII0A$Ȑ>XGI0A$ LEDXL`%=`.ANa9S 5DҫZ:AO1P F HRkD>XAI0A$ IEDXR`y%=`"ANaQ ŕD{Z:AeO2V Fi/EFDXX,`a%=`Kj:A3U Fi/CFDX\4`Q%=`+j:A奏3T Fi/AFDXA<`A%=` j:A54S Fi/?FDXED`1%=`j:Au4R Fi.8_I0Ag 2 AU6V Fi" Ha] sDZ\ "-}p`0HkiEH g)#^V$ "/}p`0Hk}iEH B3DZKK+DZHqW Fi--HDh"}0[I0A"AN!\%=`һҊ:AFҧ3DZKJ+DZIsS Fi,(IDh&}0KI0A&ANI%=`kҚ:AG3DKoIkDZJu XzIZ R'Td%=`;Қ:Aҧ*#4^$ "4>8QI0A&ANyiJz"mU "uͥO<NR Fi+JDh/}p`0H[[='@DHy " H!!}0]I0AE BD``0H[+ 2 AT%=`.7Ad@!sJz"m].(ȀRӔDJEDbGDJoEDrgSDJ/ED҇DJDDҧ+#4^% "D?U Fi*KDJܧ Tz!Z R'>GI0A>.AN!-}PW Fi*KDKTDZJ/C+D҇ 5%=`һ:AOA*Jz"-W "u=Hܭ RzZ R'ЇA]Jz"- "uHܡ Rz Z R'ЋQq%=`[:AnB*#^V& "#}pLI0A 6AN##Jz" ][F.]hH BO!D"DJ@`0HCVAd@}IT Fi&" H3#Jz" ][D "&}&`0HCAd@I PzY R'Пu%=`ҫ:AE)#^|V' "(}0\I0A޳:ANK+%=`vk:A>F.DIo=D:>8(#^zV' "t+}:LI0Aʳ:AN_Jz"7 "uKViMi'NDY@iMi&ODZ@iLi&OD[ LzY R'й! 0`0H3]g}H Bҧ$Jz"ͤW "uKc+#4^t' "l@ZI0A>ANa '%Jz"ͤל "u>*V FiŕYwV# "lCWI0A7Ad@>-U FiŅC VKQDZyqD$`P%=`V^\7 "lGSI0A Ad@[>2FT Fi%4 "lJPI0ArӀ R'-C`<%=`VһMH ƤOMDIo6-"u[>7FS Fi$ش "lOKI0AZӂ R'A`(%=`F[M H NDI/5-"u><R Fi$Ӵ "lSGI0AJӂ R'U`%=`F M H fPADH3M"uە>BP Fi#4! "lX BI0A2ӄ R'eC`%=`6һLH ¦OQDH2M"u>Gv FziBDغA w%=`6{LH OR+#qs' "uۗ>J ĭB OS=+#4qkD$ `Jz"MZ) "C<د`0H6 Ad@;>Pv ĭB nT*#4^D#}SI0A҆ R'#C`Jz"Mw6:A=I{T Fi"! "K\؟`0H 齥ANag'+DZH-"u{>Z BzkiDD؝ 3%=`KK#H OW])#^YD(}II0A҈ R'K`GJz"-F:A}JQ Fi!4" "T؍`0HmANa҇,NDH*"u>e @zUiEDر1 %=`қJ+H žY=(#4^TZDv-}@I?|o3~͇//DH)"u;>j6ϞŐK򃹿LΑ|_Y R'w`Jz_tD(t Ha҇-ƕ ?C>17 "]96"Dؿq m%=@ΊQD>|( Ha`Jz>?5 ":4"DAز_]>N_>?,"{o7 "=93"DCخo=FgeW-DND>s6h3yg?4PSz9iFDEت>Ku~?Z8̣n1]EPh5BY$6p#P0 fH c{"_Q-fMG3ܬz=[ufz0cDV%}lҌ R'Џ.6=>W9FYI;H BG/T4ĩ O)NDV&}\Ҏ R'Г+/m+'.|4>'GAdu҇%"u]IlPIq×Uy1/gꤏJD:~؜{f>xU֏;AY\AN7W_):Ԍ|\/:DY!I;H Bw/S4uЇɼq9OHD~ؖJ[^WyMYHCH B/[R4Uy_ث~uvEꤏFD~ ؐڅBF^UƟI4$ ")" %=@S~1cfF Adic:AN_6h=nn$,-}(Ґ R'Э0FM ">iHDW`JzjAv}I4$ ",R %=@KODv!}Ғ R'е1- "whdyH BүW$߉#ACK]IВ '  "t/ r%=@K8,D:$@`Jz;qT tHIhIٿHŧ3o=Dx% ^%=@S rvۘt#AdY#:AH0UIД w㎦:Aү+U4UX2 "Jv4% "p~iXΥ:D~qXʅS@s 2c:AnH:PI0 ӏ>s "+>hJD3MUGT">hKD+4ڔm==ʻi*uIn% "pG%`eJzNITDV&}і R'=iU):?Ls5UIk% "p_U`MJzNvNYF[H /+R4ĥB[6D%}і R' 5j{ȉ~ .㌶:A_֢h=g3ʤ2D8!Z %=@ksAn]-䃯LhND<qJz"IK4' "p8@[%=@g夏$D8_3r "uL~%h QD{H _):#,&} ў R'4WsVJz"IB' "0U3b "uL~Ah C{H H4PtFYL=ANK_WtFYJ!@D2uDR׿yC4# "p:Jz"Ko "p+;Jz"Ko "pK;Jz"K "0;rJz"K "0;bJz"Ko "0;BJz" I/$"u% 3B҇ H I, B R'/ (:#,$} "0<Jz" I)$"u+:03B H sK̫ aB R'/*:#,#}! "=Jz"H"D"u,!r032GH H̦ B R'Bү3):#,#}x! "k>,Jz"HD"u,'032H KJ\ A R'/+:#,"}\! "K?Jz"Hd"u,.p3"H K\ o- "u~ wHh"LVtFYăvAC4^&*:#, !AZI/ӔDn] )Jz"Kxx.tH Jz"KH"u4^ VtFYB0 DDZp3!H D>Dh/ @ R'@BzE ?ED "$VtFY@z" "^F*:#, ODHI D"u1Jz" HoS:A %=@g)H QA%=@g ?o IDAT71H a%=@g1H q 2?FD /@ԔD"uAz8 2n?FD`ҋ)%=@g1H k^&D"uFzTtFݨ}kH +^*Df7j+.tHUI/Df7j#.tHuIwDf7j.tHIDf7j.tHI7Df7j.tH5JJz"Kos:AV)h\):#-DXpPtF[zc$ "Z൒3 ANK/7AH k^<AH ^>Jz"sK:A.+:#,ODXt 2n>ID`ҋе3{$ANMH/#@Jz"3K:A6"*:#,OD،Rt 2&>JD`Cҫ Х3ȼ[(ANMI'@Jz"Jo:A6&):#+}D؜t 2=JD`ҫ Е3Ȭ;,ANMJ/,@GJz"~/ODبt 2kAClVzq:QtF͵ !A6,](:#jZ [^_D4~g-tHmK/1D4~_-tHK/2ޕDޫ "ul_z 2N=LD` k%=@g97aH ^i+:#(KOD؉bVIAdF=z R'^W`Jz"3Jo:Av$TtFQz& "+%ء3Ȍһ4ANI/:Dޚ "uNzv 2-y R'n`'Jz"I:Av,PtFMz7' "kE؁3l{ R'c5  ry5:A')%=@g˥wΫ! "h (:#\.m^ ANΐ^3қD< h ry=:A8Wz*:#\,]^ANΗ^Jz"KoCDILIAbz"u0Qz):#\,O^AN&K/u@%=@gK7+" "%3ȥ;D2XZIAR"up, rxE:AAzTtFTzk"H "):#\(/^AN^eD. R'I~Jz"JoDD9?`~%=@g MǾyg "u0̬ rXAC0"̪ r[XACB̧ rWACR̥ rWACḅ r{WACz̠ rxU:AXTzE.VtFHz.H ,,(*:#\$]AN^KD. R'MF`3EһuDLStFHz˻.H ^)Jz"HwWFDk$p3%қݕD2  rNwe:Ah/P(:#\"]AN"k%0ZIAM"u^.qJz"HopFD PtF@zw6H DMA%=@g k# "@ZzJz"HolFDH/@EIAdvu:AX TtF.]AN"Jz"ӥ# "P`@IAdK6o+! "Qஒ3dB "ꤗR3dmA "WSZIAdH` *pPtFͧ !AX VtF겭 !AX " "Sw+$ "ʥWV^IAdu:AX }+:#L޳ R'[^^g%=@g5D`#K,t 2QzFH lGz>D&JoUHD-IУ34}* "u5zStF&G]%AN6(@WJz"Ӥ7$ "6\GIAdt:Aج (:#Lޙ R'[^y%=@gIuD`ҋ/^IAdt:A؁ VtF$]'AN!~DHEWJDHS%=@g)ѕD`OK1QIAd.t:Aؙj StF"])AN' D&aqDD}J%=@g f= "DJ/%=@g f: "D=K%=@g f7 "DK/y%=@gͱiD:$^`Jz"c(tH^`Jz"Ko5WKDnWjب3һD' rs:ALz):#-\/AN^`[Jz"gKo.KD.mؐ3ȹDW rr:AYz-(:#+\1ANz^`Jz"J&WLDD):#)\3ANx#ZDΔD R'\KF%=@g37k& "tX3șқ5D:IIA<} "ukQtF9OzϸjH pZz5(:#'_\5ANJWtF9OzjH :Q%=@g& "{):#eޛ}[AN-@DIA,m "gI/\IAm "J/VIA "Sh rvwHh rvvHK73[DKw3D73{D73D73x "u;YIAdo:A`̨ 2^z˷zH RtF/[=ANXPz3(:#魟 R',-W˔DFKoODHo`3h "u@3MLRtF+ANh+usDJ6@D8CIAdn:A %qJz"#r[ "Q< 2RzH N@UIAdn :A`[ 8 2Nz H } *:#3F> "upOIAd7aHI/ඒ3( "+fAIAdٷ^HUKo6@iLcM !A`{VtFc !A`+zUtF!- ANؔDFHȶBD'/%=@gқDmJCGIAa}f"uw#t l3:A`[ k;:A`v k;:A`7қv k;:A`_; nkC:A`v FkC:A`қ kK:A`; jK:A VtFy@z_%H Џm*:#ԥS" "Iocؚ3H]z')H Щn(:#TwP" "]Koj؀3HUz-H ,@MIA*kAN(`Jz"5< R'p[zڔDj "D%=@g>H*V RG қJz" nw "RtF9mɍ !ABVtF9- AN`vJz"'?# "LBIAg{:AG, rRzdz=H Lқ$RtF9% AN`V+:#l R' DNHonHDXPz Jz"'w5[$ ",."%=@g-DJo 2,$AN !`3Ȱe:A Jz"{mD[,Jz"һmDV"3Ȑ6e:AI 2$9(AN`қ/^):# HoIJDX& s%=@g[D6"VIA&d:AMI:TtF[# "lTzsЍ3]m6H.Mػ3]mH#aا3vH(wؓ3mH-ؾ3mv H:lVIAa:AvStF)i8AN_M6DnJ6ND@):#ܐ l R'  D' "0(X3ȵf`:AқA3ȕ`:AΖ 4UtF9J/; "p^3QzANYw):#W=DYz 03|:AJ̣ FzANһK Jz"W}DHo4F+:#^wBD +xHIAH ޅRtFyED` ;Sg%=@g=d6H ےޤ):#!DبvFIA$y3{1H ;Tt ZB "o}-}%=@gz"R Iu):y-H@#jJz>#o7[wD+ !AQdxu8ҷ[ݼ \ "c x9y\pHzANJ ݞ񑷾*CH ,fV G>5\tnHz#ANe({>4=\vNHz'AN"[o`C}} Ǥ73D`-q`XIC#c72D`[teA2]/޼ ^lvKDKoaJz^ޛxo.|.Hz/ANv)){>|2zgH t,}P4ĹO]›wD "uMcX>f,_ND&HD@C%=@[' \›*_OD>؀Ym=;֖7ozwˎHH >(MNG|P̅7 ~ R'lC~M2i0ᫀ\xW"^"urأJĎnJA$BANU4Uyc˘ˢ^xWvD/v=D.>bq%=@S+},7eA$AN`=k ):)1ƅ7eA$7{z "u>ۏZՎynH` AC<(}<U4%,M S3)B+X؈D0kvQ0 oZ ؿ` |n D`a# A:0D#ٍu hI^)z:Z|;C6h\xs`%JzjE{X)Ax[UZJM=h\xs`%Jz*W>~<7VhϿO_yOYJMUE%vus`%Jz|cK0Xӟ3ꢨXÉO0\›Pu2\LsX'a9 %=@cS9q9 %=@c/~G>4*Nu2 oAIG;q >XcxKgoVI޳GĕAX `8i|;wԥRGX a(iu/hq7VxwƗ鎺9Z%=@Ƌo}ftus`Jz^WSN@PIZIZIZIZIKs\Ջo~N=~xR:/z:Ypo~?ҳ̮ExG$ɲy7#<}wc^MK|[;ΞR%=,>;{ CS !0g=VaCͪ=ӬIDAT-M <>$=D2/23=I53C4aїӣ+)p:ί_IH8^@DϢC{? |kwpnN.`ô'* Ow*`χyg0rrhLqĂ38Gė'ў:o&82=ԛ/k M=;z8_wb! \u'k֓Lu㩽LqX\:vTR'\!8 "0p0ɩ5d{i %=,ɳ#m7aSgM9dNoۿڏ&vUvv4Oz3D2 dxj-yy lؕ㋿a%=,A6o/uS>_g^|~<`s\ݾxA})wu< &Lt!ޞ热'Ɲ5ƒP$Afs~4-ݾKA&;>8xSD/uY;:= > "ı8~)]DuWz4L7UƝ @ 3w. RnrwL"W0o r̔A8&D^ "0ٍ rPꚐOBzڮZX?o k@ D7|LuM_?|p7ݒ Ү?Izؤ{y)dA:G;?8K߾ AՅSyٝ "0Ցߠ7JzXBmO)Ův=` D`ei`U:"gy~W_/mh%=,p%=` D`b>OXp OM*` ,ro}KA&;K?sr0pВt۞Pp˩d!NM`4xu6|KzXsAr}$Y3n߀aꬫPpjh{n,HK\?usr0靧_? 0k"/ "A.% f'fSr5v*/zf̉"sK츇"A[6p1.'Ohyÿ{\3*`I/g; Af$^|d?zKj%=,ŷnzB+4!>Duݝ$㩒(~ovo~/ӣS翸TIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIZIʇO|>Kz~|1;HWΟ6VD]s7Oxr @Jz`U"?KgGu8_2j"VeT)!Q)ȱr<|?۔T2.:iaS4 "c? vo"AXNc;Tm}''^~DjWo~x>_jןk?3{|>?-O̩`q7o{q?3*s ~;jDN?r޷j\p?7#Qz?Pz /Ǹ>%UG=H%NX3XXAu|A?_ _Ss?| g "XXAѝAdxx8޾&w? \hon11g>'qz g "cXX |;c5D;xCy޹%WG?ˁ/}5ژt^ȨxgvM}Ŀ~ "XX 92s;0Ͽ{܄xǠDǃ?) ՝}7^ g"XXڛ: r<Goc_z?ҟx#kOv 2>OA;mz /"g<%=[o(W>}fn "Ǔn\:>|w}'73n_:~S>w_}Cɛ?:f?} ?|}xOjAd}DXXɃͿ0 XRWDλ?{N=0? ș"EI,x~tKNry~_>~\W8I?M8ސyqg:VSIݝ@^D|`-Jz`iǃ8ԇsݿgs2d |s ]o䵻STƽ3W8dp8灼, ZֱAd{>N3O?sAħ޽ }1g?a23N9 Z90[2g${?=v?:?>^T'Mf">%=Aw4 "?;3is.Q{?׉O^t GCoݟy~[gg䆡 rkQKO "?#?m&gty+'o?>&#8@^D}`-Jz`il2/|~xBgr;u)1"?py( Z*gg~?T+?D.}rKG=}*M "_; a(p "ǯ{OD"LEA֢&T8~D_0O@ ""|G "sNp)ЧXZ rJ?{ޱueYgAm+RO%=Ag~;qAd93. "wzf}3k{"ZDv`i"3|+ 3ͩ-D&' 3"ŭu YUɤz`-"; ~wo|-Ls"݀("3f"ɜzjBsjE/DVе~>L.D~R!R9cV!r\ w 2q! mײ/Dj"K[9yȶDRdkB9BrƬB+d{ŵ\ȪBq!RX҅HW@n )25BaC-W/~Ǐ ^ ljA 94p@}!RXZBtn@Mq9"GKN>w3"d} 9^ K$W."EdCdy;D^kHw{_¿ 3 m{5eT.d鶑/^!RXZBdx8ל ;S R?cf!= y9.\ 5\DX,KU?wq[ᶗΝ3ٓݭ|~[5cf!m*.zz?r! Hɸ!ZX罿8z&SQvL+?(JՌ6SB҃Kn`-"; ƃ_x~|—V42|qse⭗#o1?cT.dW*qy[wSW-"Edx!rp>bJ!r95yׅͯ1韫Ji-d?vدBj`-"; ?y3(8tϘ]KoV-d7*"U+-_o|XOs9uxaw_cB=/Yb!r6Bf`%";Gx*1\?<+wz^cBdީU,d)6"o<ߌ"+(D7ďnq"g O og/Dvj Y.D6ѫ)D*!KkR n/Ot[vۧKs ؝'͙4 H4+UyFr\ݟpѥ6f"*Dvg/o/~/>7\w'׳Sg2!.rL:xo4_{ 5wY/?\d M:n\y9} ɯXi7/_>V|Obp+O0?\gtt9?W} IozFqdM:m}/zPќ?zhA`7pWD~Țtx<#ndn᮷y 77󟮝`nஷy+*nmt{?`c&`6?yW7M:byHܹ(% w|޾tw/O>o7M:J%~Wᬟ>ZغIX?ӟQޙ^`&`%?C|7(nnVwˇ|ܷQ@- lݤ,rch6p 2M:W=ީ|{q7[7K}Z9_V`IXg<|sJq,2+|sҗ I]Hw1/\L:@= @y%nb)n^yen`)n\y֛tzn_.xjPOqXk)nWuEVtzhn\%}&J+J_J`IJҗXd)nQh֘tznYH_T`I6eutz(WJ_Z&˕I*jel)nCJ+˥/0p]POqL/QV%j)nIw*5M:@= 2p8FfIZ&]RZ27P±K_nJ&K_q*&/]D/:p POqPRܤS֥.67m$*}KM:@= lZ: K_~BPOq[.N7̤Sv[-H"POqL6"} L:@= lU0ٌ^o)n`uɆojPOq۔.K%}7WtzؤtS5ΤSk JU&']lS079dxII#5M:@= lKٴ^j)n}JW#= M:@= 2G^dwxIZv(݊4H#E&j^؟t'!}tzezawҍH}^`)n酽I!=w 8ߤS6ې&{m)n`]H57&M~gtz؀tR(}L:@= KJc)n .]J6 POqiVϛtzKҷx֤S@Vys&G17.>o It[ p`POqk >> p\POqK >>pXPOq+ >pTPOq+{ >pTPOq k H 8IK>pLPOqW(8)},&-]PpZ\!M:@= \[G47pezGФSu >p<POqW&xRxL:@= \UiG37pM^O̤S[ >#p,POqL/lPS27P Β>&p(POqL/lO)p POqL/lM|17PƤ^ }X8&"]E"17p&I8IkHPQL:@= \AGb)nrWH8I+^#}j&. xCtzT`)nBWK8Iˤ.><POqIw\"}z`&.nLM:@= \";p tz@ur367z΁+H"طIKw\CM:@= Zq:l)n}W>HgPOqL/ĥ%}`&j^HK \O,~M:@= 2atzez!+]5pU57PBVi j)n酨tդS+k.}`&^.]2p}3;57r钁҇ i)nKҤSK I,أIJ ,>XGPOq/X&}`&^&].PpL:@= L[`tzxtR37b f)nҵOͤS kK1ؙIKn }`_&Ζ1]tz8WPF d)nL:[I4ؓI3n&}`G&j^t ǤS@- qݘtzezE7>pPOqL/RF'b)n[J\NL:@= 2pC}tzx^C }`&H;؅I B`)n9=tzxF> &}`&.J>77tw@P@I'M:@= <%]>Po)n)‰vPOqOHĥ tzxB5 /}ۤSҝ>m)nQʀMHC67tc6!4tzxL/`+'M:@= <"&n ؎Y^POqL/, ؒiZPOqL/n ؔqZPOqL/n ؖyVPOqL/, ؚRPOqL/n ؜RPOqL/n ؞NPOq;6(}(ӤS銀MJK47@!`M:@= |)Q &جBPOq_HlVh@Iϥ6,}8ϤSg[>g)n3jMKO37t1ƥ(tzT`'L:@= |" y# e&~.(>e)nNS U&>J7tHSh27AD@I}-'L:@= KHU17P•+tzez]MҧjL:@= 2p]&. -&j^t@POqL/\UM@IZ)P'}dĤSކWHZ075ҧ*L:@= mx POqR &?L:@= ^>P`)n8OPOqѥ/lߤSpp՟j 77\zZM:@= ǖ^)>uPOq~ʥ0lݤSph鵟z# 77Yzg҇m)n81lۤSp`镟]HcشI?>iPOqq~v"}`&j^Tzg/'l)nK}v#}`&j^PzgG҇k)n w}v$}`&j^LzgW6k)n酋}v&}`&cJL@VM:@= ^ٝtz);# 57Qzg҇i)n8.5lҤSp@ ]JkؤIx >;>EPOq{*}`&IVhM:@= G^ٱtz&۳c 37Lzg6g)n8fϮ7lΤSp,ŞKpؚIPk=>1POqzv/}`c&#I/@L:@= G9!mtzez+=>)POqL/Xz6e)nK/DL:@= 2RuHuؐIZ^*s27P y$}`;&HIfL:@= 9q͘tz"s,[17Cz`b)n8ѤO<lĤSp-Iy؆IK<Ǔ> POqwx(}`&HoR&L:@= 9-tz/sT07_z}'6`)nؽq>M:@= {9I`һ;G>77\zs&Ko[@ڤSo齝KM:@= 2pG&j^8Gzk#aPOqL/!tzezy7ݤS@- Kn)ngvx/=47Wz_҃IPOqnutzv+YI`һ:|ștz*1POqN7uDz f)nةJ IDATL:@= sI`k:|.=27RzK/g2&=J@z( c)nأ"&JopBz, b)nء&IpRz0 a)n؟~'&Ioh@S7IZQۛtzezjOHܤS@- 7sxBz<&j^8)Ó67PIIZNIM:@= {^9tz$óC57Hz%3nj)nؑFgH ԤSΒI`?8%=(pKPOqnq8SzT&Hop M:@= {^laۙtz"37DzH ̤SM^ =.p3POq>qxL:@= ^e27Bz J ȤS%^,=4pPOqwpxmL:@= ;^c717P8Bzl&&j^(ëna)n7Nzr&j^ ~+Gn`)n{^-=<ޤS@- һ7Zzx`I\z tzʥ7o@z|`I[z񆋤VtznLz`IZz GtznTz`IYz醋֚tzWn\z`IXz+H,57J/p 9&^"=HҤSP+nu' Vtzjm(BPOq@ ג%Xh)nh^jL:@= ҫ6\Oz`ITzՆ+J,37tJ/pMye&j^.hU Vtzez9 W)Xd)n[6\Yz`IZCK/pu5&j^-cե ֘tzez9 tz 7lX =VĤS'`&>HO,07IoװHz`INzUҳ77IְLz&6IO\ݤSP&YBtzʤ7kX)=_pmPOq@^ K m)nޫaM:@= U[5,1IJz3574I԰\z&&KO\դSP$Q j)n(ިs47HpAktzez9> 4IZIop#Q+tzez96 5IZIp3atzez9* 6IZIpCqtz:iL:@= {4TzZ& 5n,=rp%POq@ 79IAzK\ǤS Cͥc)n(^! =vpPOq@ ktz/?CDz&Kϐ<I`3G`)nؼ )M:@= [ޝ!&=|pI`һ3䤧.67l\zs&Koΐ?ԤSmtz-7CVzBPOqL/ǐޚ!,=pIZcHo͐A̤S@-!wfK!\f)n;3䥧.27Pr6 =pIZ#Ho̰9KL:@= ەޗa҃tz+/6'.07lVz[H"\`)nت [ESUe6#=zPOqVwe،0M:@= ^aC67lTzS I#ڤSME6%=ZPOq6dؔ@kM:@= ^ac# 47lRzKI$ҤSE%6'=:POqwd؜PL:@= ^ac 27lPzC J%ʤS=6)=POqcؤ`kL:@= 2Wz=mJO&ƤS@-~cبh+L:@= 2Vz9J'ܤS@-nwcجpM:@= 2Uz5 K'ؤS@-^7cذxM:@= ے^a /57lKz/MK(ԤS)6.=BPOqbظ M:@= [^aC /37lIz'K)̤S! ^d͛_{'ϿNq7b(SxIX~zVq7K/P!=B?~]zItžyͻ(n^DzT&`S*mJG^`3oiPej7+}77w_Q]j7 <_ܼ69HP$=pIX&xſ`#қ0I+m?ng?=ylmH/P%=pIX_I}Wv^qg+n؈ U t5mo+n^lb 2Kz 23M:o,n^lb 2Kz 23M:w&V@/ˮ`Z8Ϥ}msnq?۳SZ]IP'=pIX?.7w&UTo>~`yezٓ c gt+n~_ͧGY=IoP(=pIXS`{ 04J-eV;,nN67'C9&`o忼E_?] ۓSzr Yg~9 6'B&`N//ǟ}Io*=IX愻O|漣`kһ/J/p^HﻰI'L:j}߼o3/{GqL/.Dz o|'U@1K {exܤvfqsL6ezua7 t-nOm~~Vo^z]#=IXw7P.Ž5_ܼ}?şߞiw77]ؑ8& hx̤SsaW tz"k.LzPOq@Dz˅I457NzCH:|j)n> ǐtĤSp3m"=If,EzPOqwY8&[IpaL:@= 7^e@M:@= 7d@M:@= ^dPL:@= cPL:@= 7^c`#&Hop0{POq-X8IK,NzΤSp '=pg)n {xo)nIzJ=77PR$!ޙtzez)_҃L:@= 2HpP)nGz{J>U\NqL/=+TzrVK/pX7])nX,7SXzsK?(n.` ~I` ~I` ǖ~I` ~ &K+]=ÛtzVJ/px7n)nX(wn)nX(&u + l)nX' k)nX& 8I` ~+&U*p'^M:@= U^#tzezٺ |~7&j^.8IZK/ؤS@-ƥU7k)nek*;57Pm5L-ÚtzHogo ֤SBzI~S&;*G57,^Qo ԤS@zCH-pPPOqT47\_z?NH1pLPOqS47\]z=NJ5pHPOqեS[47\[z9~s&kK#oѤSpexTHrfgb>M66;r9p%xaVaY)․ ^r -P"R?Ϊ׬;Nu}ꪊ'|yN܈x`*]@{&Lt nVz0KV'`ZEA>.= Jϥ- t nTz,nnN hOpmٽSz*nnN hOpmٽRz(nM hOpmٽRz&nM hOpmٽQz$nL hOpmٽQz"nL hOpmٽQz"K hOpt(0@Q..= IϣNT7L&=[R04 I hOpTҳ(0PY&.= SIϢPnT7L$=+R0$ G hOp4s(t`=*]@{C];Q0 $2XJОIP`7ZT7L!=;J7 ֢'` Uk.= HOmt n@zvnD hOp(0Bq.= Kϟ:T7Ж|O`t`*]@{he>'0Ju .= e2)<XJОڲ{ nA hOpmٽEzJw֠'^"=z+P n@ hOp~҃'t`*]@{;=W %BXJОN`/U7%=uI'`St.= HϜ]t nCzn#,\ hOp't`*]@{8V0^z$,Z hOpxqBh.= M`^U76i{ KV0Vz&n&,Y hOpXYJ`.= #'M`2vU7ЖKZz',W hOpmٽL`BrU7ЖKXz(,V hOpmٽdL`RbU7ЖKVz),U hOpmٽDgL`bRU71 KU0Bz&n+,T hOp \P.= K T| @L.= ;KA[ Tt DH.= Ja{ Tp HD.= Jϖ KTh L@.= ;JO T` P<.= Iϕ S\ P<.= ;IA[ ST T8.= H KS@[v/8t`i*]@{h%"=Sn2,M hOpmٽ$GJmt n-D \0.= e(#H7'^$pFT7 'cHw'`8 E(.= CI8ҽEt n*=LGn6,I hOp@Q8t`I*]@{JѤ R0Lz'oXJОa҃$pDrT7 #cJw'` U.= CH=Ũt n"=DGn:,E hOp8t`)*]@{Hѥ Qp _.= wK@@ .= wJ@B .= wJ@D.= er<H'^&=<!T7ЖѤgG $|XJОڲ{9Ĥ P@[v/ǒt`*]@{hXғ#n?,@ hOp҃#n@WpU7*=6Q@{.= JO@T^ hOpS#nAWpU7&=3a&@w.= H@\ \ hOp-#nC4W]z`}*]@{K U7lH7"zt n*=.nEVUzZf!݊h'`DY hOp6Yt3JОmҳ"0n@c.= ['E`6*]@{hғ"0~@_.= er`9tCJОڲ{9I#V hOpmٽXzL$ݑh'^+=%ItU@[v/yI$t nIzF&ݕh'&tW/$ IDATJО'D`vm *]@{nI%zt n.=3nLTp]z>(ݙh'tR5R hOp5toJОҳ!0S@G.= WgC` *]@{HOl U7\ J't n,=3nPSpYz.f,ݠ'XZEN hOp%tJОC!0s&@7.= gB`M n*]@{h#!0w.@7.= ertJОڲ{9D^ML hOpmٽ@z H7*zt n-Hσ@F@/.= e28nUR\zZH*Zt nx.= ={T7Iς@f@'.= gҳ E[H hOp$nW4Rp*= }}T7J@~@.= 9$ݱh'`#=;mT7<^= .*]@{I@/@.= Og@tJО3 MkD hOpS&ݵh']oC hOp tJО 0FsBZ>7n1ҝ jԪoԽ7ЖtRuBZ7ЖtSwAZ n~W7ЖdҳTyAZ<_@[v/I~@W@5jՅG/W7ЖTғV}@Zu9Y+She*h+ݾhF+j_]zK70F<+:_]zK70Y|L nV.=[W{+ͽ|:E]}nV.=[Wė?kzeJpn-CwS|n uK}@s&Dsռ2%X4nb]MQ_u2%Xnc\Mi_\=_ZzK1f_Yz :g~, =`ҍ y?S\uYz rsb_Xz!þܒ<ӌnV,=neZMI_{Cŷ'7J{BsV|]MmiWz"3N>z{Q~rgݼn+= nfY ߆:}Eo._rނVz#o_Mm^~< n-}'=`1 =^?+Rje7+UI<f]&Jp)Xt`6je7―I7=梆]&JpXt`.je7됞ဵIw=f'XN0.= *'8`}}yt n =+n|C hOpmٽ =k|B hOpmٽ =k|B hOpmٽ ހuJ>'^Ko:{sP@[v/g7`i?oCMpmٽ ݀Jw?f_9M10Λ`ғZ >OOչ[YKOnzy5~Wս_ͺ 'XWWW~Y`V2,]zn, m]g~Rn.=ku=8 ,^|逛Z8 ¥6`=ͷ9Fpdy @pr5jՇW^:N9ʩE,Yz^\Yy›R}搛5N,X12G\xFp5\Yt5e7\\JkϤ!I5fTꗁpr n+=Q7O|R߿=Y. erpA%S㖝G\/A_6Y. B4 -ú49}j=n*=\nu)Iu,SzF$H ܼ|<N&~"XpE-RW~ޅO*^7KK7LFڜN7ғ`)s` ԸeNZJSHa;HL- n"= 28nsʍ7KvnK]yYғ`3NMc+7M`On!=$48E=|b `#m#Q6gi n!=(68JpsNpmٽ\vnGZ%9'^.H_;K7NFܜ@[v/ҝQ77Ж˹0Bup5jڲ{9FHN'^K_'PFI7O'h/=znA hOp^z)>8JО0Vpx.= @w `t*]@{sx U7ͥ.=[(W|W=|g~:7|fS>=C=ן|irQ/lO>ݻ>7ͥ.}{(V{:w|߯ @o `?. kO>zP׃G~n7G.(V㗞WuCp{V^ͺ-YMr#h-=p+G8ucpsK8FpYz[pX5ry>s'y}*LpYz_pP5rݣӼOΞ9 nwNpYz_pP5nًRtc`JО` [3T73f(ݚ'p0K .= g#YJ7gƨt n'=R90F hOp0;`*]@{IOF3nPMz0t`w.= ܤ"J7hvW@[vR"JwhvW@[vR"KhvV@[vR"KhvV@[vB"YK7ivU@[vBg"YK7ivU@[v2G"KivTJz"t`G.= '"KivSV=O-1 !K7jvSV=~t%<0N NjԪo<*ʹŴ$4@UjUL nf$= tFzT{mL nf$= tFwʔ`>ҳ@n jԪͺ_Gz!ݭA\Wվ2%$E_0\^yśWu2% E_0\^_ IDATEzh#ݰ\W{?tEz#ݱ/vʔ`.c@ `5Ň\?73:Il>q7yeJp0!t` xeJp0t`Ӿz2%Kk0PMWſ2%eIO@ͤ6>_]>fُn-wY@7 05G~s33h]Nq0LMyN9{f/L n-wQ@? 5}%y)]?}إ7?On n-wI@C 5'} 7á?эڲ{$=4n R|]Mmiw?dS:Ry0DM'rW?n^llGCC+͏t K>-7CޟpmW%'6Cgn7qt`o_Mm^~'5(yz ˟l!nғ@S _~EG77P6|gps @HzXtO`~Dha?ǧͿۿ~߿?dgEI7u}[nsCAG ߔ 6p @HzXtW`{_'*y7yTJpp&آFڼ55W2(<]s]vč #=,LEZf\z7ν !=,Npjq&|?/dI7vnVV-.|O^\5 @BzXtkF5j=ѫ nΒ˟s3y @@zXtw&.І7O?\m #8hP *]cI2k\P.{Fg}7ǒdV#8W.l%8vf+ I|a nGzXtvf+ qUI7}԰7[ n"=Kp^W#|}Bpp`emS.= e`&8U@[vo+`uҍJОڲ{;I/lT7ЖIz|XtJОڲ{I/k]._4 JOpph`77XzlX-7ZV,} @p7 aUKa nVzfXMJО3 oT7YV.}XJО ʥoWRz`X`*]@{CJ+] hOpp@q`*]@{J+Hn*]@{JO+nj8~Wf7Ж@zX%7Y>zP) kCpmٽ g^pU{ͺkkn7Ж;QgҷU=~sl{vOpmٽTx&};X/I]j@[vNokVW>%Ϧ+r7Ж;{9S؅Wr>|* Τ+V#}w?'_p7'`hMwUQ6^W,`roTV"e[wj)7KO&,}Xj)%\z.` `jܲ#7[Oyn&K&}Xns𽛓o 4fUz*`-`jP/oxjk{Ոڲ{*=UB5zi<;W~_֊@cvLnIO o]Ȣ^O~v_}W6W_} eTz&԰N#-hHҷ թa n@[v<'nMN Lp`JۥS.l%Rz ԰7[ nGCF652V UL hOp04ҷ t nF }XJО`:Y7 ut n&E$}XJО`2IAҷ Ut n&D&}XJО`*97 5t nC*}X:_gBp0`[>>}>7IO! gG~[ga3;H4֣/=u; gvkF^];I6V.|tgl#`n nY{g%=`-jܲo m{g%=`%jܲ_YCjDpٽs>Y5j{q>ޯ_~//$@_vvwCZ7J|˓7h:ԨUoJe3^~ywo7Ж;#1wUQ>/G=9;h'FI>V,:}??q^ZR'FI>V,.ԇ9Ņ$g7JAYyB.q+zFpHԘEׂ+=Sn7{J@Yt6sGb) So!+Pc] n=s~'=tM`j̢k͵Z!7g,_Ytv.zXG,_Ytk~%=rm`jԪk\>p ',^Zu-}׿gj7H)}#XZ,Ņ6Iۥ ,]Zy#78?ë,`qo% Wmrfs\yPyׁpҷqNɧxt)9q.~b nKL }3X<,y37߿/`7e΂G=Or^|xO&/=k0`jGg96Iι5)%޼$ҷE+]Ig]mV{ғHN/@Vq{҃IPXOgw~ܬ$@_vo\z`" ^O>zb>/sҧۗ-ڲ{cIR&NW?sLpmٽi)ɤo) VSW?t7㤇 ,X hOp0Nz`B rU7G ,W hOp0Jz`R bU7c ,V hOp0Fz`b Rq_3_ynFHL-}gXs=!!=^0`j_Y7ymMэ`w- U{GT͟`wH\/KRn7;KBL5z[bG|6]&Yz ҷE 6/>|U&7] #}X?WJޖ*=Xp  ոe'Cxϒ- c,Q[6dzSzʺ(=Vp0[ ըUgly=( nC,PZu̼$ݤ (}X+rn-7$=Sp@ ԨUQR]) efG *}X4yGFp ef'm& *uM%H(ԺFi NUR 4K׾zge=3{Zy sf߳{=M-OWQrs/n-woD K4[n.V+2 rFOA0I;hݛ~;G d'=|Ǎppwl.}Lnz7r<~7G dg=Ψ_n~3҇ \g=P?LuNB^;H6s۞jWGI)z"}Ln{Gn~_7 $EJ/tW 7 o$IIćOC=D;jDV};I83WQ6 o#KI]מK/7Q?/<o;=J&}L{e/tCR]I9&ܜ%\'&yunn~҇<꺇 7g 7WI`Wc`uÄA0aY 5o YEhOF;K< 7H`o`=r)ݥITzhݻ>{Pn-wH>s 7ЖwG$O)Tzhݻ;"*=@{ O`u?k~ٻ??߹ExN}!`u/}k ֆpR'gOg> ׂpbGNfW˵prg@uS|ڈn.Kg (}W7?_mTnHJBխOɣnyTn~OWs` Ew D!'~M͟s#\~@Vn|oO @wu^z/R % "궧}|}F vQ\2}ˍppAqYGj~>>ίnKY`Y?6w/;nKW`Y z,oB8hny7{{;>Z[P[p soC<hnyCXex$soD@nyprn.>QO$' 7n-wFHhny×_7wkw #}&Uڪ?r|B8)%궧=z毧"~`@WuӾOGI)F>۞,TwJ 7'0Tۇ/~__C|ÍppRIM=խO|Pͷ??>cTnJ7`@ ߨğ 70R?m~_pppBCJO-B9I}*~g@Guϓ3sOԴ0Q*|W }*}B4T?׿|~'DxO]JQ O|(H=w+}FSnK'``C i?o=670)NoT=Я@x$)vg b6O701N_`Yznޕ~C@7u˓~;Zϯ:[^T}}si:n^KI~uI_;GEmm,FgW>_ۋE]m,Fg+F`oh$}hQ7=KyKx~@#C Y[ +>ڨ%ܼ%~@+c Y[ goh&}ptQ=G߾wVuP g?ͤ..*=@{ /> 7 7Xh'6\h'7 }xPntn>Z 7 H_-Tzhݻ@S zS?_Y}hݻ@S zW_n1S/ ]Kz@?j=&7Зw-G@\O}O6 r$X_tT}[փpm{Wh,}(7 ptG gM}s#h-} }Omևo6px pl~ͷ׿ctzncK4>FW>;_WkֆpZzQ0q?Don#K/>WW>njf#pYz` `lu>>hwn#KL }|7#7R p`U)3Օƹ<ϯ9S/ p`M)3u{`owo'nJ/L"}{pMz $꺇=6SV=|97!FHX]0,84G꺇 7g 7Q|&>U=L9K*00aY pP5aunn-w}>SIkê&ܜ%@[޻|&>FU=L9KܽwIL&}{pspm{^MdU]0,r#00aY Gz`> `LuÄһ=Jnc&ܜ%GQtR]0,8f꺇 7g 77{>FT=L9K'00aysx{=Jp&ܜ%VP]0,8Vgx꺇 7g 7Ѥwz&>S=L9K&0)0aY p09unnIo-}|܏}l9(X<Kt 7yf>SnCIoL/} '_KhO$p`,=8.O;TzhrMCHwC 7Жҋ<>FRn-wx"}'@[ޗJFHhOܽ/80JОpm{_(p C`=8=qTz +<>QncH/KFhO!p0`=8ѤO>ATz;>QnCHoOChO p@` =8O?!Tzқ;>PnHoSAhO80JОp/pT`=^zkG *=@{ 0q@JОp.p`C 'KY 7;>*=@{ Nzc @ZhOܽWI\ H 7Ж*}Ka=r^#px '@[kuHY=r^!p\hOf@'Ko7 pp=XzSϤCJОp++ 'J }"Uz`Z-# 'JoZLȩ 7;:>r*=@{ 0oOEJОpL*;"@LhO&c 'JoRnItxW\H 7 :<>B*=@{ 0~FJОpL*c 'JXdȨ 79'}4DTzh{Yz;FJОpm{/KoPn-weޗ>*=@{ , '@[һ9<>*=@{ ,S`='MpBH 7d=#*=@{ 0OF=%*=@{ 0EOIJОpLEIUz`* 7L>n]]hOf"0I JОpLEJUz`" DJUz`7t=-vVny7=-vVni|"CWhOfp@`_=p@`W=' mDL]Uz` DL=Uz`74=4Tn-w# DOMUzhOZ; 7Ж] DMTzhO 7Ж DONTzh' DNTz` ENTzO{ 7@ -EOOTzOZ; 7@{ MEO}TzO 7@w mEOP]Tzp@#3`=hN JОp7t=EvPnބZ; 7@kOp@+s`{=hMA JОptv7=I6Wn΄[ 7@cp@3ѳ`k=hL``c=dn&zl'@[^IDSmUzh{7=OUn-wp,**=@{ uLn'zl'@[{=RTn߽纍p@C3`K=ln(zl'= 7L%zl'-6 -EUTz%DUTz F JОpt$0 JОp4t74=[Rn~.v+V*=@{ Џp+F*=@{ n#V|FhOnT|FhOy7=a6Qnn=a6Qnf6 EX-TzEX-Tzgp@gS`=E`nc`}=hn#ZX_hOy 7.z'@[{6 EOZUzhwpG-*=@{ uĻn#\X[hOx 7AXYhOx^m+ 7@ =nUn66 E[uUz  7@Wv'.*=@{ ĵF`3`M=hB@g.*=@{ F`S`E=AP.*=@{ F` s`==hA`.z*=@{ p'/j*=@{ KpG/j*=@{ ЀpE^Tz`|/6 + 7)z'{YnFXGhO'pP`= p<'0**=@{ uWఢ'0**=@{ uF`"3` =:+p`3` =:n#0) JОpmn8) JОpm}ynJ_hOFvCnJ _hOF&pxѓn=-F`2ѳ^=p [hOuSnM4ShO%7@o=mF`:>=pDcTz`T7vDOdTz`T =QnAm&=QnA 7VPChOtsnQTChO$2*=@{ 0ۻp2*=@{ 0$7 76  7x_hUhOf{6 7 7w]F`VF=ښn pJОpm{m=nSny^N7 7ִwF`^&=̽F`b3=pgEh[Tz`,wwEOiTz`(wEiTz`( \=^'#Y7-zRXhOF"3'5Uz` ktEj 7@x^xJОpcn#0i B=p׈/Snam=^'n:E*=@{ 0pDl 7(VxJОp bn#pC*=@{ 0պpDOm 7x pJОpmum!zp\'@[sݽ LZhO{W6 =Un5pADnkUzhkWW 7DwF(7*=@{ '-7u*=@{ nn8 pJОpm#z\'i (z\'akwר 7@F@85*=@{ %8*=@{ ~n89JОpD 7p9JОp$mm%z<'I +z<'A[tϩ 7@pϨ 7@&Fh93*=@{ #9e=٦7O@'1 #z\TnpEtK*=@{ ݪ7PL'@[^=. 7Vwn#pHcJОpm5{XSX8'@[p1Ev*=@{ ݰ7Sd8' -zSn-pAEvs*=@{  ;=ߦFਢ;=ݶFఢ;i=؝pUz`ow㊞'Uz`g[w'Uz`g l&zRn}mm,zPn} 7)pBhOv}n89ThOv%c=jw 7HhOݽ{u`=v 7JОpm5{w6 (78*=@{ =ު 7.6 p 'n`_уJОpan#g'?=؃p{Uz`{vDEhOv ?'۵7*=@{ }p'n #Rnmx#|'ۻ7FtLhO6&@Nt Tplkn#[-ӹ>˿xţ `Swf7?wo>n~`D dEy{* t 7O 0Dn.Pq kOun . TzM,??"߻=7߽ss#7p%@[߽n#pp`q3r n`э8J%R]77pl%mx"V6p59÷7plEqD*="g`#n#Sѭ8J/`n# ѽ8JWɧgno/_= ] ;h.&ٟɟ_?'[vNnUzN4?ګo'ѻ?|yJV'hp\`}byZ'W`n#i8JO~2ܜ,7 hnvVX K?!`{ (GUwCyķ 7Fs pP`}7OgS`kn#9 8J[͹#n`T8JG~e36 cׁwO= tnRXW 7#oD֕6 \]#_uO/y_~a tDK{pD`}?[#-gQ{1))TwkOpMG,bStSA4ߺ¼wF{eZ Tz j4wOR0ty ފ`Pe8JS_gmݿ|hu)o 7րwo:ӄ`-`2Nz'qE7`*=O?_᧞.K_?~p&l^[I0LRjWѳyG㧟pk~}p$kXI0Kw?z9kSXK:׼ ވ`dѭ8J<'6 tyk(Ȣ[p(`+}mNO_UF֑5X&J/0ȈB9n>m~?p"]kޱLއ`hѽ8J IDATg٧h˓Μ 7~3n5cͻ0-9Rn[#Lu] 7 ҭe: qTz~T2TzE0*=@{ pty2UzE0*=@{ pty2UzE(*=@{ pty2VzE7 *=@{ ptyb+ +p=S<̕^ChO4O-7a EPn+ dE:!Tzhk7]iNX&K/Bt'@[CܽHs2Zzp=M7k0]% 7woќ̖^.̯'@[ܽDs2\z .0JОpm pip%n*=@{ pt9c.@}]hOn4,w`#Q 7ҁe }D7 `r=QϜ̗^Ṋ'I0t*Uz6UzhK9cy1fL'@[߽ⲒդZ֚0JОp.kY^NzyE7JОp,ҽe5I04SnEfy=&Y^*=@{ [ֳF 3n-@+=x%][VB S-@'=x%][V> S-@'=L:iyI}\F*=@{ \F]DwJОp|*/]>*=@{ 0Yn`h'uv_.*=@{ -k[^VzEJОpCږe`~ h';ו^e; ChOKg-/,DJОpG,[^XzC1@=.]Y6 c*=@{ [:layi=! Tzhk7Y q2̮'@[kܽƲ嵥X8{w`n=ZM'm,/.aܽ3 7woldyuXhO3Wa8kyUzh7X f̪'afח`@; 0JОp+Y^_z#.5*=@{ U:lgy%# 7pP麲W8^'1ʖ_X 0JОpǔ+[Z^bz}6*=@{ Rljy& 7pD鴲EW8v 'ƖW^^h 0JОp.+[^ezyÉ7*=@{ O:lmy' 7p8鮲uWW8'᤻י\K0JОpG*[^hzq#n9*=@{ L`y)# 7p0骲啦V8 '^[ࠢ0JОpn*X^kzk.;p*=@{ uݛN*X^lziZ}Z 7ЖpsbK+[YhO^~NWY{*=@{ 7T tUn޽鞲7V8-vJОpmM,/8m-Uz8tM +ZtFRn1eOKNplHhOCH]-9Ew`= R ]~qTz8tJ*]tQn%eg˫N/pxEhOCޖU '!eoN@tQn`z鎲uWU@ 70tFӛ*]:oyEn~ L.]QW^TQnn -QSD7!`=n(kOg0JОpSK7嵧T.UzY:d,/>Wn`b ,Vn`^递 ݈JОpm=wI+*0JОpm 7g,?o 73wo:, oC 77]O+^Pw 7Ż7OKOw é 7֥7Nk^Ow! 70t;ZAz=F@NhOIr)Xt;b*=@{ L(]N–^ND# '|$m x_tCB*=@{ 'NҖː^ME7$ 't$nx"#=f&y˅HoS- 70t5r%ҋ)pBtO*=@{ %MF\^ ]JОpsIG,"D7% 'Td˵HI] _hOJӢJОpIA,W#gD%`o=&.&XFz'Ή.L*=@{ #LF\J ]}UzF c8/4 70t.rA )pAtmTn`Z2劤Q'@[t-rE(pIfWnGwo:d$u(A{ 7ֻwo e>m,C 7;wo:e>e,D 7ۻ7]J\2 <#F; 7Жpsr}һ('@P2岤WQ9 IhOҝd8uI;JОpݥ3x DE(`=KW-W&ϋQ.*=@{ 4$ZLzݣ=Tz-IF\ \#I; 7Z i65JtWnt"rq[(p6l'@gD2⤗PJu ZhO҅dPI JОp}ɨ˓^AEW*`[=Ja-'׋.U*=@{ t#Z.PzݪMUzh*]G\ @tTntr' PhOmdh5JDw+`;=:J-)}/ݮTzh(]F\ PtRnO<ܽD22wOॢJОpuJEw,`=y'3 <.Y6*=@{ tO7g,*y/ݲMTzhmnXTznݳ-Tz>!ܜ\ "i 7J\ $k 7J\ $k 7I:\ &m 7H4\ (ok 7G:t\ *q+ 7F\ *r+ 7F\ ,s 7E:\ .u 7D4\ !wk 7C:t\ #y+ 7B\ %{ 7A:\ '} 7@4\ )k 70tfl]WtVRn`xrҫ&p '庥WMn XGhOѥ3H?˅Ko[JОpKW+^4D0` =vO<ܽ>K35D71`=vO7g,.fb*=@{ bnX.^zƀUz>!ܜ\ $ 70gpsr;&Bܫ'KZpJОpJ^1Dw2>=F}-0ane]*=@{ *]?[`zVˀ{TzT~4\~ )w 70tlXUt5nWn`Hr %rܬ'飷"K`e UhOGsULʢpJОpI˘-E74F=-1Zhm*=@{ &=[.dzҀTzLzL`@tOnQn`,1RJ` M AhOk^+MDw5*=@{{#Kvއ%l-&ТELR"XRۗ"5+.f6]-g̙SUS!P4O܀ J n`'7꘦Iઢ0^I= ,Yf@t7 \W{F+'H'+SG5LW߀J n /tMt/ \[F*'tα:u\ӭ$pu 1ԁMwE8`.{K+TG6H71J n +rQt BF('tƱJulm$p^8 :MwMD9ഒ.{HJGT7E怓J n (pTt HN)'tVux=$p+ю8 bjMD{:ฒ.{HIU8AꀣJ n $nXt P)'tfu#pK8 "ƪ1N-E[;.{HHGV9>7ma%]@7N6Vr{n+J nf'ޙv&u#pcRtOp77W>!P9;m%]@7pkuӭ#ps&h+'/ #ۋy@SI= ֌f@tD= nj|Bp3uoPtOp4vm"p n(hlDtdD=@I= N:؊:6|.{tu=#m=%]@7p+8c;ꀧ[F &ϕtӌ #hXh' PtOpG3P]V!ڋt1sԃuv%]@7pD:,襛<`- )t S_V#ڒBJ n`H:~\;<`=])t V`V$ڗBJ n->pz&VtOp-KԃnU6г.{hH'\twK=tteaL7wDTWI= H\toMEnttLvDTUI= I\tgOMNtsЁCn6Ч.{x&9LqXz,}FVTtOpґ$`:`*t vi7L7uJEUOI= 9`͢+t fTi-ڼBGJ ntAM7sEWGI= m֦t/]nt܀fmqMrE{XEI= cVl/B'J nغtM7rDYCI= zl} ^zPtOpö#tlCtܰi鄁G7gaJ nزtuÛ͈t%]@7lX:_JMwpvDZX.{6+.p58`C}-,[I= [z) Jg \Q= ؖhs VtOp&tlL*'aUsw&btܰA\+9ݺqaJ n؜t07]#퉶P%]@7lM:Tf@= آh TtOpƤ39 ncnMD%]@7lK:Rf@= ئh TtOpæy nԣn<%]@7lI:Pf@= تh StOpvntlV酥)'a3i7Sxc6,²tܰ0۩<ݯ[|aQJ n؆t-ՃnM6$%]@7lB:IQOwkE_X.{ $p[{5`0,FI= 뗎zӭy& K\=N @t nXtC$7 uKG$ԃn^vð%]@7Z:A tp'C^I= +?ݣ܋WtOpzRHh]1tܰZ: h_ Y%]@7U:< Nt$CTI= +s ݞ6TtOp:$H7gDc)'aYu{32ĔtܰBVt TA5m! I#PgB18! I7$PB/hʐPtOpú# !ݕDe('aMҁ nn 3^I= + nMp3·tO0 3ítܰ鸀"ݑ vpc%]@7F:-`1H7d}3VI= k X:%n Hg,Hn h 7TtOp*Ίt3pT)'a ARE8!BítпtLԉNvp#%]@7t/8uf1Ӣ4DI= Kg,P6 `h/ PtOpC TgG #N tгt@2nƉvpu%]@7t,Pu~[0=5\[I= J,V! `h_ UtOpCUH/YUtЧt4In&6pM%]@7t) hu/I5\OI= J,\' `h RtOpCұKW'J*eÕtЛt( goS%xL*J nK:X̀:Ym6\CI= ]I'"PgK8K߆tГt 0u{.D;n]I= H#P'L8W놙tЍt<uƤ.EnWI= HgtΙtph *',9n.maF%]@7t"Б:ge7̦ N:RLPtЉt@GIwZv0.{:H3> rfQtOp=\Yy;+a.%]@7,ӕ*БпT+3)'av.T,LHu0.{e*БgPtOpR]YٛBK0.{j"_-tܰHt@Gg@R9\ j]Yi#sHI= Ӿ<:ҞIKp.{fTXTg+'aY/M, N&u8WI= 2|eJgtdx6@Wtܰ ǮK,`t tl>/ծYJ nX㗥tDg@R; 8qUJgK$pbJ/մt%]@7,kR: X"̀ *'a F\Y nVK50MI= yG,`7FM,w t@GFN-zx t@GFO.x t@G&L/zy It@G&M0y it@G0Zz t@G&N1z8 נt@G&O2{8 Wt@G2Z{8 s.@,3Ḓ.{nO: #gM4|8 [;șS &tp[g_{Y9{/Ð.{n+O: #L7}h+'v.:rфXT %]@7ȥt@G.sKp [ᒓ {tps\pYc/3%]@7\Б'<@R  f+ҟo Б'=@R f ڗo",ٛbV1Z0%]@7k\}#,`7V3Z %]@7E}/,`74Z%]@7tM}?,`7vt/*b J n)LzbT \ ] ;;OzbT \ _Nc1-' Le\MJ*~͹[@O,ʜ8Kt_N\o/ϵIJ TIpw{O:bT \ il+ol`*sl.U\2tIJ TIp o@swSl.aT \ ens⫽ol`*sl.U\D{Sy[@?,ʜ8KtWQs/}|?ݻ9~- Le\MJxϾ 꽃jNml`*sl.U\:adS[ZIp-{w=nNlpk%]|G;)̷~/[-n _仯9Bfpst [+h+h+h+h+h+h+h+h+h+h+h+h+h+y'[o~9mzK:5Df ?OR%]0'ߚx|3r8 f[LX^-,7L6z[ऒ.azoO^p13rS76 fs~_[os9lzꟾ#F(=5O{ 1[̂V)0ϦIpW3kC]O|Oִ3rjf pTIa _~ޏ=733r1tL>X./ NOfgf ̳鵇nSJ`竅O>m?i9f 弳iuw7I%]07B왇}[a\:v^t32l#F&tT'0 r|ԳIp fq]%ӷ_ᗛ 32lg)y `9f e$Rh=Vƭ-7LL<7YJ` 0S`.MU  f,ǷL\v6 nQJ`&˅;n3r$F)[7뷆_0S`.M`.K]1|[N{ޥ)0& 0JI)y~?[oslt|zrL\p6 nQJ`F{灙s9lt>÷~/-yd\>7(%]0_仯Wֿe93r$F)h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+h+ϕ7ߥ[ut ۿ7nsfQ+1T_Q›~q0޿}߿7i"tJ nJ5̇_̵~TOם xn`J`%N7ÃN<(7%]'3س nnw4 ,BI;^l!7 nVk9;7%]Ô~G oݱ^ _ф]|뗾?W_O~LJ?~G?ܜ|[U}(9hS#6Op3M%]!}s74v_]wŽ/Wvv϶dzx(1eou_+UkGl'Gտ8xN].Xwu^Ʈ?|v./vB;n7'7?V{jڣ^0b7c.pJIYϸھhwfoLcp*o_9 ؏|}z~OՆ'>7_RwLQo8 Vdp wZkônsF!y!0w<`Xp3boF ~1mOIBy;hv:dA6#v^p3zJ`%N7kDžC<.O0;f7Qىs~c/~靃nFo>VpHDg܌-.X],OvݿxJӌ]Ẅ́ CWcfeSf npBI捯J#>˅Mc58Hզh#vׁoEIޏ:X" (ƸvLߎxSN:X1iJ`%&7uy͔]\W53v҄nٮmʈMn&pBI} N7SvqYpSoT4 dw7ϝ1b#{͔N(f.. n˛_~̈́nlb `9kĞv34L-.X;:LeC61淏̈́n~6M|Έ7cL]- V;n_}ͨ]\rh'tJ,_܌ŅhƏjxq+y/^ ۹7෩<|Q9cF 8 V⼇ï<;sģvqapOED켲mأ7 M<+1ih'tJL n(޹-?n!(?~?'Fo~Ip;|#<=|o6y+ܼŸ?x_-nJ3&f7_a{;9,on7/wR{ZN7{>Yk0Zq%]Sǻ?i]\d$oOf'#6<{rhǕtJLn~b?L3O۱'5e/Ep n?yĞ16Fp3zJ`%&77p_,i] pr?wQ_<#4uĞ1?~ nFpTIfی?554c.n^lOݽewc6,ycO8b{q4oʱU+qFpO?lYeG5x/v1KpG .|eハ?Ef҈1ovp3vcJ`>Wr7|.x _5z_G6F柟}fozVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVVV Z^'IENDB`bayestestR/man/reshape_ci.Rd0000644000176200001440000000115613571067532015557 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reshape_ci.R \name{reshape_ci} \alias{reshape_ci} \title{Reshape CI between wide/long formats} \usage{ reshape_ci(x) } \arguments{ \item{x}{A data.frame containing \code{CI_low} and \code{CI_high}.} } \description{ Reshape CI between wide/long formats. } \examples{ library(bayestestR) x <- data.frame(replicate(4, rnorm(100))) x <- ci(x, ci = c(0.68, 0.89, 0.95)) reshape_ci(x) reshape_ci(reshape_ci(x)) x <- data.frame(replicate(4, rnorm(100))) x <- describe_posterior(x, ci = c(0.68, 0.89, 0.95)) reshape_ci(x) reshape_ci(reshape_ci(x)) } bayestestR/man/diagnostic_posterior.Rd0000644000176200001440000000721613616544116017710 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/diagnostic_posterior.R \name{diagnostic_posterior} \alias{diagnostic_posterior} \alias{diagnostic_posterior.stanreg} \alias{diagnostic_posterior.brmsfit} \title{Posteriors Sampling Diagnostic} \usage{ diagnostic_posterior(posteriors, diagnostic = c("ESS", "Rhat"), ...) \method{diagnostic_posterior}{stanreg}( posteriors, diagnostic = "all", effects = c("fixed", "random", "all"), parameters = NULL, ... ) \method{diagnostic_posterior}{brmsfit}( posteriors, diagnostic = "all", effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, ... ) } \arguments{ \item{posteriors}{A stanreg or brms model.} \item{diagnostic}{Diagnostic metrics to compute. Character (vector) or list with one or more of these options: \code{"ESS"}, \code{"Rhat"}, \code{"MCSE"} or \code{"all"}.} \item{...}{Currently not used.} \item{effects}{Should parameters for fixed effects, random effects or both be returned? Only applies to mixed models. May be abbreviated.} \item{parameters}{Regular expression pattern that describes the parameters that should be returned.} \item{component}{Should all parameters, parameters for the conditional model, the zero-inflated part of the model, the dispersion term or the instrumental variables be returned? Applies to models with zero-inflated and/or dispersion formula, or to models with instrumental variable (so called fixed-effects regressions). May be abbreviated. Note that the \emph{conditional} component is also called \emph{count} or \emph{mean} component, depending on the model.} } \description{ Extract diagnostic metrics (Effective Sample Size (\code{ESS}), \code{Rhat} and Monte Carlo Standard Error \code{MCSE}). } \details{ \strong{Effective Sample (ESS)} should be as large as possible, altough for most applications, an effective sample size greater than 1000 is sufficient for stable estimates (Bürkner, 2017). The ESS corresponds to the number of independent samples with the same estimation power as the N autocorrelated samples. It is is a measure of \dQuote{how much independent information there is in autocorrelated chains} (\emph{Kruschke 2015, p182-3}). \cr \cr \strong{Rhat} should be the closest to 1. It should not be larger than 1.1 (Gelman and Rubin, 1992) or 1.01 (Vehtari et al., 2019). The split R-hat statistic quantifies the consistency of an ensemble of Markov chains. \cr \cr \strong{Monte Carlo Standard Error (MCSE)} is another measure of accuracy of the chains. It is defined as standard deviation of the chains divided by their effective sample size (the formula for \code{mcse()} is from Kruschke 2015, p. 187). The MCSE \dQuote{provides a quantitative suggestion of how big the estimation noise is}. } \examples{ \dontrun{ # rstanarm models # ----------------------------------------------- library(rstanarm) model <- stan_glm(mpg ~ wt + gear, data = mtcars, chains = 2, iter = 200, refresh = 0) diagnostic_posterior(model) # brms models # ----------------------------------------------- library(brms) model <- brms::brm(mpg ~ wt + cyl, data = mtcars) diagnostic_posterior(model) } } \references{ \itemize{ \item Gelman, A., & Rubin, D. B. (1992). Inference from iterative simulation using multiple sequences. Statistical science, 7(4), 457-472. \item Vehtari, A., Gelman, A., Simpson, D., Carpenter, B., \& Bürkner, P. C. (2019). Rank-normalization, folding, and localization: An improved Rhat for assessing convergence of MCMC. arXiv preprint arXiv:1903.08008. \item Kruschke, J. (2014). Doing Bayesian data analysis: A tutorial with R, JAGS, and Stan. Academic Press. } } bayestestR/man/p_direction.Rd0000644000176200001440000001561513616544116015757 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/p_direction.R \name{p_direction} \alias{p_direction} \alias{pd} \alias{p_direction.numeric} \alias{p_direction.data.frame} \alias{p_direction.MCMCglmm} \alias{p_direction.emmGrid} \alias{p_direction.stanreg} \alias{p_direction.brmsfit} \alias{p_direction.BFBayesFactor} \title{Probability of Direction (pd)} \usage{ p_direction(x, ...) pd(x, ...) \method{p_direction}{numeric}(x, method = "direct", ...) \method{p_direction}{data.frame}(x, method = "direct", ...) \method{p_direction}{MCMCglmm}(x, method = "direct", ...) \method{p_direction}{emmGrid}(x, method = "direct", ...) \method{p_direction}{stanreg}( x, effects = c("fixed", "random", "all"), parameters = NULL, method = "direct", ... ) \method{p_direction}{brmsfit}( x, effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, method = "direct", ... ) \method{p_direction}{BFBayesFactor}(x, method = "direct", ...) } \arguments{ \item{x}{Vector representing a posterior distribution. Can also be a Bayesian model (\code{stanreg}, \code{brmsfit} or \code{BayesFactor}).} \item{...}{Currently not used.} \item{method}{Can be \code{"direct"} or one of methods of \link[=estimate_density]{density estimation}, such as \code{"kernel"}, \code{"logspline"} or \code{"KernSmooth"}. If \code{"direct"} (default), the computation is based on the raw ratio of samples superior and inferior to 0. Else, the result is based on the \link[=auc]{Area under the Curve (AUC)} of the estimated \link[=estimate_density]{density} function.} \item{effects}{Should results for fixed effects, random effects or both be returned? Only applies to mixed models. May be abbreviated.} \item{parameters}{Regular expression pattern that describes the parameters that should be returned. Meta-parameters (like \code{lp__} or \code{prior_}) are filtered by default, so only parameters that typically appear in the \code{summary()} are returned. Use \code{parameters} to select specific parameters for the output.} \item{component}{Should results for all parameters, parameters for the conditional model or the zero-inflated part of the model be returned? May be abbreviated. Only applies to \pkg{brms}-models.} } \value{ Values between 0.5 and 1 corresponding to the probability of direction (pd). } \description{ Compute the \strong{Probability of Direction} (\strong{\emph{pd}}, also known as the Maximum Probability of Effect - \emph{MPE}). It varies between 50\% and 100\% (\emph{i.e.}, \code{0.5} and \code{1}) and can be interpreted as the probability (expressed in percentage) that a parameter (described by its posterior distribution) is strictly positive or negative (whichever is the most probable). It is mathematically defined as the proportion of the posterior distribution that is of the median's sign. Altough differently expressed, this index is fairly similar (\emph{i.e.}, is strongly correlated) to the frequentist \strong{p-value}. } \details{ \subsection{What is the \emph{pd}?}{ The Probability of Direction (pd) is an index of effect existence, ranging from 50\% to 100\%, representing the certainty with which an effect goes in a particular direction (\emph{i.e.}, is positive or negative). Beyond its simplicity of interpretation, understanding and computation, this index also presents other interesting properties: \itemize{ \item It is independent from the model: It is solely based on the posterior distributions and does not require any additional information from the data or the model. \item It is robust to the scale of both the response variable and the predictors. \item It is strongly correlated with the frequentist p-value, and can thus be used to draw parallels and give some reference to readers non-familiar with Bayesian statistics. } } \subsection{Relationship with the p-value}{ In most cases, it seems that the \emph{pd} has a direct correspondance with the frequentist one-sided \emph{p}-value through the formula \ifelse{html}{\out{pone sided = 1 - p(d)/100}}{\eqn{p_{one sided}=1-\frac{p_{d}}{100}}} and to the two-sided p-value (the most commonly reported one) through the formula \ifelse{html}{\out{ptwo sided = 2 * (1 - p(d)/100)}}{\eqn{p_{two sided}=2*(1-\frac{p_{d}}{100})}}. Thus, a two-sided p-value of respectively \code{.1}, \code{.05}, \code{.01} and \code{.001} would correspond approximately to a \emph{pd} of 95\%, 97.5\%, 99.5\% and 99.95\%. } \subsection{Methods of computation}{ The most simple and direct way to compute the \emph{pd} is to 1) look at the median's sign, 2) select the portion of the posterior of the same sign and 3) compute the percentage that this portion represents. This "simple" method is the most straigtfoward, but its precision is directly tied to the number of posterior draws. The second approach relies on \link[=estimate_density]{density estimation}. It starts by estimating the density function (for which many methods are available), and then computing the \link[=area_under_curve]{area under the curve} (AUC) of the density curve on the other side of 0. } \subsection{Strengths and Limitations}{ \strong{Strengths:} Straightforward computation and interpretation. Objective property of the posterior distribution. 1:1 correspondence with the frequentist p-value. \cr \cr \strong{Limitations:} Limited information favoring the null hypothesis. } } \examples{ library(bayestestR) # Simulate a posterior distribution of mean 1 and SD 1 # ---------------------------------------------------- posterior <- rnorm(1000, mean = 1, sd = 1) p_direction(posterior) p_direction(posterior, method = "kernel") # Simulate a dataframe of posterior distributions # ----------------------------------------------- df <- data.frame(replicate(4, rnorm(100))) p_direction(df) p_direction(df, method = "kernel") \dontrun{ # rstanarm models # ----------------------------------------------- if (require("rstanarm")) { model <- rstanarm::stan_glm(mpg ~ wt + cyl, data = mtcars, chains = 2, refresh = 0 ) p_direction(model) p_direction(model, method = "kernel") } # emmeans # ----------------------------------------------- if (require("emmeans")) { p_direction(emtrends(model, ~1, "wt")) } # brms models # ----------------------------------------------- if (require("brms")) { model <- brms::brm(mpg ~ wt + cyl, data = mtcars) p_direction(model) p_direction(model, method = "kernel") } # BayesFactor objects # ----------------------------------------------- if (require("BayesFactor")) { bf <- ttestBF(x = rnorm(100, 1, 1)) p_direction(bf) p_direction(bf, method = "kernel") } } } \references{ Makowski D, Ben-Shachar MS, Chen SHA, Lüdecke D (2019) Indices of Effect Existence and Significance in the Bayesian Framework. Frontiers in Psychology 2019;10:2767. \doi{10.3389/fpsyg.2019.02767} } bayestestR/man/simulate_correlation.Rd0000644000176200001440000000346413571067532017705 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/simulate_data.R \name{simulate_correlation} \alias{simulate_correlation} \alias{simulate_ttest} \title{Data Simulation} \usage{ simulate_correlation(n = 100, r = 0.5, mean = 0, sd = 1, names = NULL, ...) simulate_ttest(n = 100, d = 0.5, names = NULL, ...) } \arguments{ \item{n}{The number of observations to be generated.} \item{r}{A value or vector corresponding to the desired correlation coefficients.} \item{mean}{A value or vector corresponding to the mean of the variables.} \item{sd}{A value or vector corresponding to the SD of the variables.} \item{names}{A character vector of desired variable names.} \item{...}{Arguments passed to or from other methods.} \item{d}{A value or vector corresponding to the desired difference between the groups.} } \description{ Simulate data with specific characteristics. } \examples{ # Correlation -------------------------------- data <- simulate_correlation(r = 0.5) plot(data$V1, data$V2) cor.test(data$V1, data$V2) summary(lm(V2 ~ V1, data = data)) # Specify mean and SD data <- simulate_correlation(r = 0.5, n = 50, mean = c(0, 1), sd = c(0.7, 1.7)) cor.test(data$V1, data$V2) round(c(mean(data$V1), sd(data$V1)), 1) round(c(mean(data$V2), sd(data$V2)), 1) summary(lm(V2 ~ V1, data = data)) # Generate multiple variables cor_matrix <- matrix(c( 1.0, 0.2, 0.4, 0.2, 1.0, 0.3, 0.4, 0.3, 1.0 ), nrow = 3 ) data <- simulate_correlation(r = cor_matrix, names = c("y", "x1", "x2")) cor(data) summary(lm(y ~ x1, data = data)) # t-test -------------------------------- data <- simulate_ttest(n = 30, d = 0.3) plot(data$V1, data$V0) round(c(mean(data$V1), sd(data$V1)), 1) diff(t.test(data$V1 ~ data$V0)$estimate) summary(lm(V1 ~ V0, data = data)) summary(glm(V0 ~ V1, data = data, family = "binomial")) } bayestestR/man/dot-extract_priors_rstanarm.Rd0000644000176200001440000000055513620150257021212 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/sensitivity_to_prior.R \name{.extract_priors_rstanarm} \alias{.extract_priors_rstanarm} \title{Extract and Returns the priors formatted for rstanarm} \usage{ .extract_priors_rstanarm(model, ...) } \description{ Extract and Returns the priors formatted for rstanarm } \keyword{internal} bayestestR/man/p_rope.Rd0000644000176200001440000000457413571135410014737 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/p_rope.R \name{p_rope} \alias{p_rope} \alias{p_rope.default} \alias{p_rope.numeric} \alias{p_rope.data.frame} \alias{p_rope.emmGrid} \alias{p_rope.BFBayesFactor} \alias{p_rope.MCMCglmm} \alias{p_rope.stanreg} \alias{p_rope.brmsfit} \title{Probability of not being in ROPE} \usage{ p_rope(x, ...) \method{p_rope}{default}(x, ...) \method{p_rope}{numeric}(x, range = "default", ...) \method{p_rope}{data.frame}(x, range = "default", ...) \method{p_rope}{emmGrid}(x, range = "default", ...) \method{p_rope}{BFBayesFactor}(x, range = "default", ...) \method{p_rope}{MCMCglmm}(x, range = "default", ...) \method{p_rope}{stanreg}( x, range = "default", effects = c("fixed", "random", "all"), parameters = NULL, ... ) \method{p_rope}{brmsfit}( x, range = "default", effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, ... ) } \arguments{ \item{x}{Vector representing a posterior distribution. Can also be a \code{stanreg} or \code{brmsfit} model.} \item{...}{Currently not used.} \item{range}{ROPE's lower and higher bounds. Should be a vector of length two (e.g., \code{c(-0.1, 0.1)}) or \code{"default"}. If \code{"default"}, the range is set to \code{c(-0.1, 0.1)} if input is a vector, and based on \code{\link[=rope_range]{rope_range()}} if a Bayesian model is provided.} \item{effects}{Should results for fixed effects, random effects or both be returned? Only applies to mixed models. May be abbreviated.} \item{parameters}{Regular expression pattern that describes the parameters that should be returned. Meta-parameters (like \code{lp__} or \code{prior_}) are filtered by default, so only parameters that typically appear in the \code{summary()} are returned. Use \code{parameters} to select specific parameters for the output.} \item{component}{Should results for all parameters, parameters for the conditional model or the zero-inflated part of the model be returned? May be abbreviated. Only applies to \pkg{brms}-models.} } \description{ Compute the proportion of the posterior distribution that doesn't lie within a region of practical equivalence (ROPE). It is equivalent to running \code{rope(..., ci = 1)}. } \examples{ library(bayestestR) p_rope(x = rnorm(1000, 0, 0.01), range = c(-0.1, 0.1)) p_rope(x = mtcars, range = c(-0.1, 0.1)) } bayestestR/man/bayesfactor_inclusion.Rd0000644000176200001440000000636313616542434020046 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/bayesfactor_inclusion.R \name{bayesfactor_inclusion} \alias{bayesfactor_inclusion} \alias{bf_inclusion} \title{Inclusion Bayes Factors for testing predictors across Bayesian models} \usage{ bayesfactor_inclusion(models, match_models = FALSE, prior_odds = NULL, ...) bf_inclusion(models, match_models = FALSE, prior_odds = NULL, ...) } \arguments{ \item{models}{An object of class \code{\link{bayesfactor_models}} or \code{BFBayesFactor}.} \item{match_models}{See details.} \item{prior_odds}{Optional vector of prior odds for the models. See \code{BayesFactor::priorOdds<-}.} \item{...}{Arguments passed to or from other methods.} } \value{ a data frame containing the prior and posterior probabilities, and BF for each effect. } \description{ The \code{bf_*} function is an alias of the main function. \cr \cr For more info, see \href{https://easystats.github.io/bayestestR/articles/bayes_factors.html}{the Bayes factors vignette}. } \details{ Inclusion Bayes factors answer the question: Are the observed data more probable under models with a particular effect, than they are under models without that particular effect? In other words, on average - are models with effect \eqn{X} more likely to have produced the observed data than models without effect \eqn{X}? \subsection{Match Models}{ If \code{match_models=FALSE} (default), Inclusion BFs are computed by comparing all models with a predictor against all models without that predictor. If \code{TRUE}, comparison is restricted to models that (1) do not include any interactions with the predictor of interest; (2) for interaction predictors, averaging is done only across models that containe the main effect from which the interaction predictor is comprised. } } \note{ Random effects in the \code{lme} style will be displayed as interactions: i.e., \code{(X|G)} will become \code{1:G} and \code{X:G}. } \examples{ library(bayestestR) # Using bayesfactor_models: # ------------------------------ mo0 <- lm(Sepal.Length ~ 1, data = iris) mo1 <- lm(Sepal.Length ~ Species, data = iris) mo2 <- lm(Sepal.Length ~ Species + Petal.Length, data = iris) mo3 <- lm(Sepal.Length ~ Species * Petal.Length, data = iris) BFmodels <- bayesfactor_models(mo1, mo2, mo3, denominator = mo0) bayesfactor_inclusion(BFmodels) \dontrun{ # BayesFactor # ------------------------------- library(BayesFactor) BF <- generalTestBF(len ~ supp * dose, ToothGrowth, progress = FALSE) bayesfactor_inclusion(BF) # compare only matched models: bayesfactor_inclusion(BF, match_models = TRUE) } } \references{ \itemize{ \item Hinne, M., Gronau, Q. F., van den Bergh, D., and Wagenmakers, E. (2019, March 25). A conceptual introduction to Bayesian Model Averaging. \doi{10.31234/osf.io/wgb64} \item Clyde, M. A., Ghosh, J., & Littman, M. L. (2011). Bayesian adaptive sampling for variable selection and model averaging. Journal of Computational and Graphical Statistics, 20(1), 80-101. \item Mathot, S. (2017). Bayes like a Baws: Interpreting Bayesian Repeated Measures in JASP [Blog post]. Retrieved from https://www.cogsci.nl/blog/interpreting-bayesian-repeated-measures-in-jasp } } \seealso{ \code{\link{weighted_posteriors}} for Bayesian parameter averaging. } \author{ Mattan S. Ben-Shachar } bayestestR/man/describe_prior.Rd0000644000176200001440000000161113616544116016442 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/describe_prior.R \name{describe_prior} \alias{describe_prior} \title{Describe Priors} \usage{ describe_prior(model, ...) } \arguments{ \item{model}{A Bayesian model.} \item{...}{Currently not used.} } \description{ Returns a summary of the priors used in the model. } \examples{ \dontrun{ library(bayestestR) # rstanarm models # ----------------------------------------------- if (require("rstanarm")) { model <- rstanarm::stan_glm(mpg ~ wt + cyl, data = mtcars) describe_prior(model) } # brms models # ----------------------------------------------- if (require("brms")) { model <- brms::brm(mpg ~ wt + cyl, data = mtcars) describe_prior(model) } # BayesFactor objects # ----------------------------------------------- if (require("BayesFactor")) { bf <- ttestBF(x = rnorm(100, 1, 1)) describe_prior(bf) } } } bayestestR/man/rope_range.Rd0000644000176200001440000000457213616544116015601 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/rope_range.R \name{rope_range} \alias{rope_range} \title{Find Default Equivalence (ROPE) Region Bounds} \usage{ rope_range(x, ...) } \arguments{ \item{x}{A \code{stanreg}, \code{brmsfit} or \code{BFBayesFactor} object.} \item{...}{Currently not used.} } \description{ This function attempts at automatically finding suitable "default" values for the Region Of Practical Equivalence (ROPE). } \details{ \cite{Kruschke (2018)} suggests that the region of practical equivalence could be set, by default, to a range from \code{-0.1} to \code{0.1} of a standardized parameter (negligible effect size according to Cohen, 1988). \itemize{ \item For \strong{linear models (lm)}, this can be generalised to \ifelse{html}{\out{-0.1 * SDy, 0.1 * SDy}}{\eqn{[-0.1*SD_{y}, 0.1*SD_{y}]}}. \item For \strong{logistic models}, the parameters expressed in log odds ratio can be converted to standardized difference through the formula \ifelse{html}{\out{π/√(3)}}{\eqn{\pi/\sqrt{3}}}, resulting in a range of \code{-0.18} to \code{0.18}. \item For other models with \strong{binary outcome}, it is strongly recommended to manually specify the rope argument. Currently, the same default is applied that for logistic models. \item For \strong{t-tests}, the standard deviation of the response is used, similarly to linear models (see above). \item For \strong{correlations}, \code{-0.05, 0.05} is used, i.e., half the value of a negligible correlation as suggested by Cohen's (1988) rules of thumb. \item For all other models, \code{-0.1, 0.1} is used to determine the ROPE limits, but it is strongly advised to specify it manually. } } \examples{ \dontrun{ if (require("rstanarm")) { model <- stan_glm( mpg ~ wt + gear, data = mtcars, chains = 2, iter = 200, refresh = 0 ) rope_range(model) model <- stan_glm(vs ~ mpg, data = mtcars, family = "binomial") rope_range(model) } if (require("brms")) { model <- brm(mpg ~ wt + cyl, data = mtcars) rope_range(model) } if (require("BayesFactor")) { bf <- ttestBF(x = rnorm(100, 1, 1)) rope_range(bf) } } } \references{ Kruschke, J. K. (2018). Rejecting or accepting parameter values in Bayesian estimation. Advances in Methods and Practices in Psychological Science, 1(2), 270-280. \doi{10.1177/2515245918771304}. } bayestestR/man/rope.Rd0000644000176200001440000001722713616544116014426 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/rope.R \name{rope} \alias{rope} \alias{rope.default} \alias{rope.numeric} \alias{rope.data.frame} \alias{rope.emmGrid} \alias{rope.BFBayesFactor} \alias{rope.MCMCglmm} \alias{rope.stanreg} \alias{rope.brmsfit} \title{Region of Practical Equivalence (ROPE)} \usage{ rope(x, ...) \method{rope}{default}(x, ...) \method{rope}{numeric}(x, range = "default", ci = 0.89, ci_method = "HDI", verbose = TRUE, ...) \method{rope}{data.frame}(x, range = "default", ci = 0.89, ci_method = "HDI", verbose = TRUE, ...) \method{rope}{emmGrid}(x, range = "default", ci = 0.89, ci_method = "HDI", verbose = TRUE, ...) \method{rope}{BFBayesFactor}(x, range = "default", ci = 0.89, ci_method = "HDI", verbose = TRUE, ...) \method{rope}{MCMCglmm}(x, range = "default", ci = 0.89, ci_method = "HDI", verbose = TRUE, ...) \method{rope}{stanreg}( x, range = "default", ci = 0.89, ci_method = "HDI", effects = c("fixed", "random", "all"), parameters = NULL, verbose = TRUE, ... ) \method{rope}{brmsfit}( x, range = "default", ci = 0.89, ci_method = "HDI", effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, verbose = TRUE, ... ) } \arguments{ \item{x}{Vector representing a posterior distribution. Can also be a \code{stanreg} or \code{brmsfit} model.} \item{...}{Currently not used.} \item{range}{ROPE's lower and higher bounds. Should be a vector of length two (e.g., \code{c(-0.1, 0.1)}) or \code{"default"}. If \code{"default"}, the range is set to \code{c(-0.1, 0.1)} if input is a vector, and based on \code{\link[=rope_range]{rope_range()}} if a Bayesian model is provided.} \item{ci}{The Credible Interval (CI) probability, corresponding to the proportion of HDI, to use for the percentage in ROPE.} \item{ci_method}{The type of interval to use to quantify the percentage in ROPE. Can be 'HDI' (default) or 'ETI'. See \code{\link{ci}}.} \item{verbose}{Toggle off warnings.} \item{effects}{Should results for fixed effects, random effects or both be returned? Only applies to mixed models. May be abbreviated.} \item{parameters}{Regular expression pattern that describes the parameters that should be returned. Meta-parameters (like \code{lp__} or \code{prior_}) are filtered by default, so only parameters that typically appear in the \code{summary()} are returned. Use \code{parameters} to select specific parameters for the output.} \item{component}{Should results for all parameters, parameters for the conditional model or the zero-inflated part of the model be returned? May be abbreviated. Only applies to \pkg{brms}-models.} } \description{ Compute the proportion of the HDI (default to the 89\% HDI) of a posterior distribution that lies within a region of practical equivalence. } \details{ \subsection{ROPE}{ Statistically, the probability of a posterior distribution of being different from 0 does not make much sense (the probability of a single value null hypothesis in a continuous distribution is 0). Therefore, the idea underlining ROPE is to let the user define an area around the null value enclosing values that are \emph{equivalent to the null} value for practical purposes (\cite{Kruschke 2010, 2011, 2014}). \cr \cr Kruschke (2018) suggests that such null value could be set, by default, to the -0.1 to 0.1 range of a standardized parameter (negligible effect size according to Cohen, 1988). This could be generalized: For instance, for linear models, the ROPE could be set as \code{0 +/- .1 * sd(y)}. This ROPE range can be automatically computed for models using the \link{rope_range} function. \cr \cr Kruschke (2010, 2011, 2014) suggests using the proportion of the 95\% (or 89\%, considered more stable) \link[=hdi]{HDI} that falls within the ROPE as an index for "null-hypothesis" testing (as understood under the Bayesian framework, see \code{\link[=equivalence_test]{equivalence_test()}}). } \subsection{Sensitivity to parameter's scale}{ It is important to consider the unit (i.e., the scale) of the predictors when using an index based on the ROPE, as the correct interpretation of the ROPE as representing a region of practical equivalence to zero is dependent on the scale of the predictors. Indeed, the percentage in ROPE depend on the unit of its parameter. In other words, as the ROPE represents a fixed portion of the response's scale, its proximity with a coefficient depends on the scale of the coefficient itself. } \subsection{Multicollinearity: Non-independent covariates}{ When parameters show strong correlations, i.e. when covariates are not independent, the joint parameter distributions may shift towards or away from the ROPE. Collinearity invalidates ROPE and hypothesis testing based on univariate marginals, as the probabilities are conditional on independence. Most problematic are parameters that only have partial overlap with the ROPE region. In case of collinearity, the (joint) distributions of these parameters may either get an increased or decreased ROPE, which means that inferences based on \code{rope()} are inappropriate (\cite{Kruschke 2014, 340f}). \cr \cr \code{rope()} performs a simple check for pairwise correlations between parameters, but as there can be collinearity between more than two variables, a first step to check the assumptions of this hypothesis testing is to look at different pair plots. An even more sophisticated check is the projection predictive variable selection (\cite{Piironen and Vehtari 2017}). } \subsection{Strengths and Limitations}{ \strong{Strengths:} Provides information related to the practical relevance of the effects. \cr \cr \strong{Limitations:} A ROPE range needs to be arbitrarily defined. Sensitive to the scale (the unit) of the predictors. Not sensitive to highly significant effects. } } \examples{ library(bayestestR) rope(x = rnorm(1000, 0, 0.01), range = c(-0.1, 0.1)) rope(x = rnorm(1000, 0, 1), range = c(-0.1, 0.1)) rope(x = rnorm(1000, 1, 0.01), range = c(-0.1, 0.1)) rope(x = rnorm(1000, 1, 1), ci = c(.90, .95)) \dontrun{ library(rstanarm) model <- stan_glm(mpg ~ wt + gear, data = mtcars, chains = 2, iter = 200, refresh = 0) rope(model) rope(model, ci = c(.90, .95)) library(emmeans) rope(emtrends(model, ~1, "wt"), ci = c(.90, .95)) library(brms) model <- brms::brm(mpg ~ wt + cyl, data = mtcars) rope(model) rope(model, ci = c(.90, .95)) library(BayesFactor) bf <- ttestBF(x = rnorm(100, 1, 1)) rope(bf) rope(bf, ci = c(.90, .95)) } } \references{ \itemize{ \item Cohen, J. (1988). Statistical power analysis for the behavioural sciences. \item Kruschke, J. K. (2010). What to believe: Bayesian methods for data analysis. Trends in cognitive sciences, 14(7), 293-300. \doi{10.1016/j.tics.2010.05.001}. \item Kruschke, J. K. (2011). Bayesian assessment of null values via parameter estimation and model comparison. Perspectives on Psychological Science, 6(3), 299-312. \doi{10.1177/1745691611406925}. \item Kruschke, J. K. (2014). Doing Bayesian data analysis: A tutorial with R, JAGS, and Stan. Academic Press. \doi{10.1177/2515245918771304}. \item Kruschke, J. K. (2018). Rejecting or accepting parameter values in Bayesian estimation. Advances in Methods and Practices in Psychological Science, 1(2), 270-280. \doi{10.1177/2515245918771304}. \item Makowski D, Ben-Shachar MS, Chen SHA, Lüdecke D (2019) Indices of Effect Existence and Significance in the Bayesian Framework. Frontiers in Psychology 2019;10:2767. \doi{10.3389/fpsyg.2019.02767} \item Piironen, J., & Vehtari, A. (2017). Comparison of Bayesian predictive methods for model selection. Statistics and Computing, 27(3), 711–735. \doi{10.1007/s11222-016-9649-y} } } bayestestR/man/density_at.Rd0000644000176200001440000000166313571067532015623 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/estimate_density.R \name{density_at} \alias{density_at} \title{Density Probability at a Given Value} \usage{ density_at(posterior, x, precision = 2^10, method = "kernel", ...) } \arguments{ \item{posterior}{Vector representing a posterior distribution.} \item{x}{The value of which to get the approximate probability.} \item{precision}{Number of points of density data. See the \code{n} parameter in \link[=density]{density}.} \item{method}{Density estimation method. Can be \code{"kernel"} (default), \code{"logspline"} or \code{"KernSmooth"}.} \item{...}{Currently not used.} } \description{ Compute the density value at a given point of a distribution (i.e., the value of the \code{y} axis of a value \code{x} of a distribution). } \examples{ library(bayestestR) posterior <- distribution_normal(n = 10) density_at(posterior, 0) density_at(posterior, c(0, 1)) } bayestestR/man/simulate_prior.Rd0000644000176200001440000000123413616544116016506 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/simulate_priors.R \name{simulate_prior} \alias{simulate_prior} \title{Returns Priors of a Model as Empirical Distributions} \usage{ simulate_prior(model, n = 1000, ...) } \arguments{ \item{model}{A \code{stanreg}, \code{stanfit}, or \code{brmsfit} object.} \item{n}{Size of the simulated prior distributions.} \item{...}{Currently not used.} } \description{ Transforms priors information to actual distributions. } \examples{ \dontrun{ library(bayestestR) if (require("rstanarm")) { model <- stan_glm(mpg ~ wt + am, data = mtcars, chains = 1, refresh = 0) simulate_prior(model) } } } bayestestR/man/dot-flatten_list.Rd0000644000176200001440000000057013620150257016720 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utils_flatten_list.R \name{.flatten_list} \alias{.flatten_list} \title{Flatten a list} \usage{ .flatten_list(object, name = "name") } \arguments{ \item{object}{A list.} \item{name}{Name of column of keys in the case the output is a dataframe.} } \description{ Flatten a list } \keyword{internal} bayestestR/man/map_estimate.Rd0000644000176200001440000000604013616544116016120 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/map_estimate.R \name{map_estimate} \alias{map_estimate} \alias{map_estimate.numeric} \alias{map_estimate.stanreg} \alias{map_estimate.brmsfit} \title{Maximum A Posteriori probability estimate (MAP)} \usage{ map_estimate(x, precision = 2^10, method = "kernel", ...) \method{map_estimate}{numeric}(x, precision = 2^10, method = "kernel", ...) \method{map_estimate}{stanreg}( x, precision = 2^10, method = "kernel", effects = c("fixed", "random", "all"), parameters = NULL, ... ) \method{map_estimate}{brmsfit}( x, precision = 2^10, method = "kernel", effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, ... ) } \arguments{ \item{x}{Vector representing a posterior distribution. Can also be a \code{stanreg}, \code{brmsfit} or a \code{BayesFactor} model.} \item{precision}{Number of points of density data. See the \code{n} parameter in \link[=density]{density}.} \item{method}{Density estimation method. Can be \code{"kernel"} (default), \code{"logspline"} or \code{"KernSmooth"}.} \item{...}{Currently not used.} \item{effects}{Should results for fixed effects, random effects or both be returned? Only applies to mixed models. May be abbreviated.} \item{parameters}{Regular expression pattern that describes the parameters that should be returned. Meta-parameters (like \code{lp__} or \code{prior_}) are filtered by default, so only parameters that typically appear in the \code{summary()} are returned. Use \code{parameters} to select specific parameters for the output.} \item{component}{Should results for all parameters, parameters for the conditional model or the zero-inflated part of the model be returned? May be abbreviated. Only applies to \pkg{brms}-models.} } \value{ A numeric value if \code{posterior} is a vector. If \code{posterior} is a model-object, returns a data frame with following columns: \itemize{ \item \code{Parameter} The model parameter(s), if \code{x} is a model-object. If \code{x} is a vector, this column is missing. \item \code{MAP_Estimate} The MAP estimate for the posterior or each model parameter. } } \description{ Find the \strong{Highest Maximum A Posteriori probability estimate (MAP)} of a posterior, i.e., the value associated with the highest probability density (the "peak" of the posterior distribution). In other words, it is an estimation of the \emph{mode} for continuous parameters. Note that this function relies on \link{estimate_density}, which by default uses a different smoothing bandwidth (\code{"SJ"}) compared to the legacy default implemented the base R \link{density} function (\code{"nrd0"}). } \examples{ \dontrun{ library(bayestestR) posterior <- rnorm(10000) map_estimate(posterior) plot(density(posterior)) abline(v = map_estimate(posterior), col = "red") library(rstanarm) model <- rstanarm::stan_glm(mpg ~ wt + cyl, data = mtcars) map_estimate(model) library(brms) model <- brms::brm(mpg ~ wt + cyl, data = mtcars) map_estimate(model) } } bayestestR/man/check_prior.Rd0000644000176200001440000000334413616544116015744 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/check_prior.R \name{check_prior} \alias{check_prior} \title{Check if Prior is Informative} \usage{ check_prior(model, method = "gelman", simulate_priors = TRUE, ...) } \arguments{ \item{model}{A \code{stanreg}, \code{stanfit}, or \code{brmsfit} object.} \item{method}{Can be "gelman" or "lakeland". For the "gelman" method, if the SD of the posterior is more than 0.1 times the SD of the prior, then the prior is considered as informative. For the "lakeland" method, the prior is considered as informative if the posterior falls within the 95\% HDI of the prior.} \item{simulate_priors}{Should prior distributions be simulated using \code{simulate_prior} (default; faster) or sampled (slower, more accurate).} \item{...}{Currently not used.} } \description{ Performs a simple test to check whether the prior is informative to the posterior. This idea, and the accompanying heuristics, were discussed in \href{https://statmodeling.stat.columbia.edu/2019/08/10/}{this blogpost}. } \examples{ \dontrun{ library(bayestestR) if (require("rstanarm")) { model <- stan_glm(mpg ~ wt + am, data = mtcars, chains = 1, refresh = 0) check_prior(model, method = "gelman") check_prior(model, method = "lakeland") # An extreme example where both methods diverge: model <- stan_glm(mpg ~ wt, data = mtcars[1:3,], prior = normal(-3.3, 1, FALSE), prior_intercept = normal(0, 1000, FALSE), refresh = 0) check_prior(model, method = "gelman") check_prior(model, method = "lakeland") plot(si(model)) # can provide visual confirmation to the Lakeland method } } } \references{ https://statmodeling.stat.columbia.edu/2019/08/10/ } bayestestR/man/as.numeric.p_direction.Rd0000644000176200001440000000133213571076365020017 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/map_estimate.R, R/mhdior.R, R/p_direction.R, % R/p_map.R, R/p_significance.R \name{as.numeric.map_estimate} \alias{as.numeric.map_estimate} \alias{as.numeric.mhdior} \alias{as.numeric.p_direction} \alias{as.numeric.p_map} \alias{as.numeric.p_significance} \title{Convert to Numeric} \usage{ \method{as.numeric}{map_estimate}(x, ...) \method{as.numeric}{mhdior}(x, ...) \method{as.numeric}{p_direction}(x, ...) \method{as.numeric}{p_map}(x, ...) \method{as.numeric}{p_significance}(x, ...) } \arguments{ \item{x}{object to be coerced or tested.} \item{...}{further arguments passed to or from other methods.} } \description{ Convert to Numeric } bayestestR/man/mcse.Rd0000644000176200001440000000303313616544116014376 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mcse.R \name{mcse} \alias{mcse} \alias{mcse.stanreg} \title{Monte-Carlo Standard Error (MCSE)} \usage{ mcse(model, ...) \method{mcse}{stanreg}(model, effects = c("fixed", "random", "all"), parameters = NULL, ...) } \arguments{ \item{model}{A \code{stanreg}, \code{stanfit}, or \code{brmsfit} object.} \item{...}{Currently not used.} \item{effects}{Should results for fixed effects, random effects or both be returned? Only applies to mixed models. May be abbreviated.} \item{parameters}{Regular expression pattern that describes the parameters that should be returned. Meta-parameters (like \code{lp__} or \code{prior_}) are filtered by default, so only parameters that typically appear in the \code{summary()} are returned. Use \code{parameters} to select specific parameters for the output.} } \description{ This function returns the Monte Carlo Standard Error (MCSE). } \details{ \strong{Monte Carlo Standard Error (MCSE)} is another measure of accuracy of the chains. It is defined as standard deviation of the chains divided by their effective sample size (the formula for \code{mcse()} is from Kruschke 2015, p. 187). The MCSE \dQuote{provides a quantitative suggestion of how big the estimation noise is}. } \examples{ \dontrun{ library(bayestestR) library(rstanarm) model <- stan_glm(mpg ~ wt + am, data = mtcars, chains = 1, refresh = 0) mcse(model) } } \references{ Kruschke, J. (2014). Doing Bayesian data analysis: A tutorial with R, JAGS, and Stan. Academic Press. } bayestestR/man/sensitivity_to_prior.Rd0000644000176200001440000000320713616544116017761 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/sensitivity_to_prior.R \name{sensitivity_to_prior} \alias{sensitivity_to_prior} \title{Sensitivity to Prior} \usage{ sensitivity_to_prior(model, index = "Median", magnitude = 10, ...) } \arguments{ \item{model}{A Bayesian model (\code{stanreg} or \code{brmsfit}).} \item{index}{The indices from which to compute the sensitivity. Can be one or multiple names of the columns returned by \code{describe_posterior}. The case is important here (e.g., write 'Median' instead of 'median').} \item{magnitude}{This represent the magnitude by which to shift the antagonistic prior (to test the sensitivity). For instance, a magnitude of 10 (default) means that the mode wil be updated with a prior located at 10 standard deviations from its original location.} \item{...}{Arguments passed to or from other methods.} } \description{ Computes the sensitivity to priors specification. This represents the proportion of change in some indices when the model is fitted with an antagonistic prior (a prior of same shape located on the opposite of the effect). } \examples{ \dontrun{ library(bayestestR) # rstanarm models # ----------------------------------------------- if (require("rstanarm")) { model <- rstanarm::stan_glm(mpg ~ wt, data = mtcars) sensitivity_to_prior(model) model <- rstanarm::stan_glm(mpg ~ wt + cyl, data = mtcars) sensitivity_to_prior(model, index = c("Median", "MAP")) } # brms models # ----------------------------------------------- if (require("brms")) { model <- brms::brm(mpg ~ wt + cyl, data = mtcars) # sensitivity_to_prior(model) } } } \seealso{ DescTools } bayestestR/DESCRIPTION0000644000176200001440000000533213620704270014111 0ustar liggesusersPackage: bayestestR Type: Package Title: Understand and Describe Bayesian Models and Posterior Distributions Version: 0.5.2 Authors@R: c( person("Dominique", "Makowski", email = "dom.makowski@gmail.com", role = c("aut", "cre"), comment = c(ORCID = "0000-0001-5375-9967")), person("Daniel", "Lüdecke", role = c("aut"), email = "d.luedecke@uke.de", comment = c(ORCID = "0000-0002-8895-3206")), person("Mattan S.", "Ben-Shachar", role = c("aut"), email = "matanshm@post.bgu.ac.il", comment = c(ORCID = "0000-0002-4287-4801")), person("Michael D.", "Wilson", role = c("aut"), email = "michael.d.wilson@curtin.edu.au", comment = c(ORCID = "0000-0003-4143-7308")), person("Paul-Christian", "Bürkner", role = c("rev"), email = "paul.buerkner@gmail.com"), person("Tristan", "Mahr", role = c("rev"), email = "tristan.mahr@wisc.edu", comment = c(ORCID = "0000-0002-8890-5116")), person("Henrik", "Singmann", role = c("ctb"), email = "singmann@gmail.com", comment = c(ORCID = "0000-0002-4842-3657")), person("Quentin F.", "Gronau", role = c("ctb"), comment = c(ORCID = "0000-0001-5510-6943")) ) Maintainer: Dominique Makowski URL: https://easystats.github.io/bayestestR/ BugReports: https://github.com/easystats/bayestestR/issues Description: Provides utilities to describe posterior distributions and Bayesian models. It includes point-estimates such as Maximum A Posteriori (MAP), measures of dispersion (Highest Density Interval - HDI; Kruschke, 2015 ) and indices used for null-hypothesis testing (such as ROPE percentage, pd and Bayes factors). License: GPL-3 Encoding: UTF-8 LazyData: true Depends: R (>= 3.0) Imports: insight (>= 0.8.0), methods, stats, utils Suggests: BayesFactor, bridgesampling, brms, broom, covr, dplyr, emmeans, GGally, ggplot2, ggridges, KernSmooth, knitr, MASS, mclust, modelbased, lme4, logspline, performance, rmarkdown, rstan, rstanarm, see, stringr, testthat, tidyr, tweedie RoxygenNote: 7.0.2 Language: en-GB VignetteBuilder: knitr NeedsCompilation: no Packaged: 2020-02-10 03:44:33 UTC; Dom Author: Dominique Makowski [aut, cre] (), Daniel Lüdecke [aut] (), Mattan S. Ben-Shachar [aut] (), Michael D. Wilson [aut] (), Paul-Christian Bürkner [rev], Tristan Mahr [rev] (), Henrik Singmann [ctb] (), Quentin F. Gronau [ctb] () Repository: CRAN Date/Publication: 2020-02-12 05:10:16 UTC bayestestR/build/0000755000176200001440000000000013620150636013500 5ustar liggesusersbayestestR/build/vignette.rds0000644000176200001440000000117513620150636016043 0ustar liggesusersTMo@u@(P>$DBJ)FQEn^#]ޒƟJ;&MSx}ffMf~)8=\Y<7c|8%gO&tO]%d\63=wxw5ڵ_$ ߋL~?|{T*՞ڶg?;Sm ǶTGc4z-2zc,o1r7\T]3W[n{Pcli즟O֞NWs#ule(l{pr{]TW/o7}m׭7]+`xs97ٜ?{Lqsti+ LZ94@/Wfre啇s3U]^_XV9wf?k +U23o_ױ ?A?5zfmSy[gm[0L_@<6^J4.f%]st\ff4~}[v' r H#%?* XG`hqEQ|̍1; Sy&|W%\@<[IAtTRJD<Yhbi?! 4kP᳐mbC hb(IH1Za7^-rX5<,a7$m9l#"$5n)M.k{Qh>3q>PAxbdSNaeUv{|rV[߶yݽ f}YvaYda1i'& ћ6D{pDuhKauiL,%. :+.]"LXCɻvW*7:NώZsU`^) | Z԰/?C`d"M_JxYD$ <w0( -ȷv߸EO~% 'oA~9ʹ |pVA3Ob7\l4 "OiA "oI;!Wn60u](9Ldx 5d/Pi6gP6ƀ{"oI~`~滭 k%S DaGy_ѱ22voBXyDh=<9+M){yM)%w k~SJhOP7P4~@Dx_}M'rwb-q{ɛrX6+nK\{x[52EoN%$y-P[Hڻ`E4Pģ^ѡKdVj,/ZYg9䱨IpmfYr\b[ڢQ2\Cw2އ7n{ML/x(!!䇉ێeT kA"í50θt.Yx⎟<(JGBav3cpN6;ꗰ{ +\Gm7N2P5sjWf`R^Gː_V]*"m9;$`,…[~~`;rg%%`2 ׳ySlf0Gsy vA(] 7[qtYu\uK[.f K\֦ٮ+r)*_ ߁43Ȗ%Sw]]?\'-z/&{.)U{K,󺓳 o& VF>ϣ/=JzT^_TtVt*16}Ztt{h\lnN@B~5vnf.@IgU3Ok wjLpcՒf/*?lN"3gcglu7f:kb(aŲ-붶~/Ob$sĐ xLzdSBtfē Mpr|J;Jxbx ZJ#JUEnJԫWs4z{g}uQ1a&ZocDgZ@< 1Ju߈\5P^ϴ{4;E3lJe&Tʟ&<rT{hOLm[`D^tCsWQvԀ vJ<t^/hՒۗaBXwUTOdͦTT2"EZiKa:AQf[iX%uЎ3s6ITnBx^Y?mȚ:pr5͓xMyFY3".}5ތ(NT7 F*NTT«ݜ![7۽54 :9tN./ANnئ:^dCf*3x'W/#ߍwl 9oh,0pwY_+[Ûf/i_ >K Nx $21y&% t3!Fvɸ1"KtKs30n5/IݴʆUAwqr'OeP#dr#"WRTR,s d9]E'6'XMVO֊31y ##,u! V竝Q˺^A&[n߸M^e;βG7nK=ST[a,-ޱZuvt >śjEzIL>椋I8F\>bLN|μoZ_ϳ'Q$oI'㇐Qp"J+Xxdh]ytjv'QGL~dZRæv`jF*^ET. t(MLLΘ$a CCm&Q{p_Uapj f[lԪF?.k&$ ^MͬCH,=<84ʿ@sE6|/i>WܪV !o ֎Vܱ`1BVE]oCrGJ+;<\窝8.UOZof B2ҽz>Ρ)vŋ磼쭙𻐿+vѸʴ|ya/o]']}OT=apc,YznfrۜY^}ڬط4^-XߢtPQyZuSrHa Ouު /9TuUzbW@qKX׼SҜ9Zo3I/[L/7čVKD 5Y^_^LR{zRV; Ms(%'l';u3y# ߙkNaQx+:6`r`ռg/Z3lpݲfzN!޷uߵVqe94F+hf6lG/xck5jVMxۓ4Ho߱Jfk Le!P= ߄-0)Tk i 3! 6EͰYR.)facwiqpֽ[7j4Wt)]s-~m|vsd-y|}:_smC-޾1]YY%'gd"CHHd sS5)߅w]|APizߙ_5].ۍ/di}&`sۺ ڿ܏[T{>N\,T.dSz]n,y[' ($ ƑPr@%l"7`r-;5[Em" Y8wVr˥bIŨ3%.`\ߡC6t 'f--77tZZn%C%5Ҫ< # k<$6&RzXKM-9wQZgl_ow+`o۵>Sk yc^n ysqE@u/Z{;C|_Ͻso(>o/K<(^;P_o7:6ߖ^e#q@ۢҾ5]lQ˕ {aL79}{#U1/I{GhHGlyu;Uq g}6󦾒e.ګ7ecކ;7yŶq'8ްNf(o ua;wE֛فQ: bnfVeM);7dЪԧYWhsSSl&YX"'kH%4JB6o*UNP.mKV*]3oxw%| ~eBȟI̷Ff'fN?{^2e C<1$zXW{r`OEp]"DŽDcl٘0lNE}= |B^R+C')zQ~(sO(} 3|3oUl%SI{뿓 zn20smzczpbx6`Nܟu/lP`{ԇo 1ƞ7ُ(dzZ ?]xsd( ] tI7뼕5>̈́ٷ9pFTL҅H_ 6UmXGTQ&4%xv7wBV*Tk3CNiz(c]KS=݋5l&.>J~xYm\*GgxKk2H=f!g؄ۖ!i:N-PAuר*h wl\ ApN$.phɣi'&glPjO*t3vmcQZ;-.Hɷ G6vǥ+QC =$PnXxgj݊'zmWƤeLeˣ b7I8<0aѹ%wt AXӹmB:I8 k "+.B^]%0k|lk߃^É) 4TڛB+'IpoM +橰j׫%)K:Е7rҿ*\+Uy9/|I߭F9:\{.ԏ9Ezh",dZ6m  y緈^ށ|^|񫳬Ow >u`!7˙; 4Nt$ш c@EZOkb7P`+mr/~Gx ~hE/6x(I(vVu=-‰V ]&3:߭P;utgm.WKN:qwMjxLmgo0li8cFN/Jo%VOB^M k[B=X"Bc< 01O'fzP4V0ڡإRرwGAKpkxLbɦ"R7nH($&Z,JC@<+Nȝͯa|׎w q)a.~MbalwT3B['5 WEq6" M{A nk[&k9jicEɷ JǥC+{;FT93]XAWX6zIr=|&i@* ss3G3Ҫcq([N0P9FT1%n}ߘRr{ m}m$x 9Cf<7>v<.R~`2)( vw*J#ƿ(mS(a;-?@ Oohs]WYAl|ރ |rLM-ǀ' HN- f gb%"S7B-Fe ~ 9 T"XXM!۲٢7vտ!}|SS5ji,edsMX6NYX":,M50Ē]kD>I| s7+2č.՗K1_LFxv`7J)o'}Ð7>v@V#Tw0FWUeEmYgn6*{"x2dPp8TZhvpo o 5GWoB~3ەvC[jEQہ-s| 9lvsOhc0 ; Y}~^{dZtʂX^OAV6QOCt b]a(^fP8 )o[{z9YEP`(q朅[%ua V+:hv!*MfN u6| !ވ5q)r%IBn#jCcS~H,xG3:,R |҂dہ>˘QC|Y, 5%!`SjJt; l?dY;cɪ<rJvѽtd X]xӀfFAʢ[.E+b<#|* svl,Fڬː ňQhJhB6c[o++e_ tC}YM-NZ iDF_LY,ϞbiU_SH1if$ਫg`_43.CJ?/^Qפn.2 pϚG`6/6߁Ry8,]-;1Y Q&xnRydS%6͝HAL _K"7Ĺ$3!\}`&.U s5L?7.O*(_]_ucҟ s>v?FۿL:'B{m>ɫJH ;בxcm+H;x}W:Dei@ɜtp$b P8.>fyӶ9P+dw(َV [O<ޗ%w6u#tA![z}>h&aʏp` ~9Fv ǟ4gm5YZw@p}iM1R w}GGJ .Fyr%؀,`r1Yu0&mBJۨ'~2c ?w%0!XT^iB!"^΄pB@&l6 ~*C&3iG4(>'ө{K0>yKUxljnY: *~$])@ Pf@083|P1"Pmd aǺPP!L.8J Z(rbKZq LBURBWAŴ+yYo09˽HXTo]B&ed2YM F e9 + U#/o[| >2V_wZ^_|jP_ԏUfӠM(|T>5(- Uςg[^}4%Z} eU!D,'E&LnbDK(..aI):3{ yOlN{@0qW:tyxpX#SwE`N W(YR7!#"`d^dqevxt) ed8"#a #dP^vFȒ*^{cz)d?8 FVǑ&&G,= #Ub R'L.nE&p_ of<$ Ƅ-3M`E("j4x0($i< ODLQń+ExESB*Uqt{%]~*Ob޸)LkYq;=NQY%^bwoJ7"atm7 R`Cx +gKY޻7{3wjoOJ7'Z*H0|;Y,=ԗa/F lVW`_i=~6$1>{]sd6\^ͰqK^oԼͤ5jDpxY :a\gc  LvpH v!vUB_MRѧkQ w3&qqEpe;wVEO$}AZui8FFG$Bㄏ!/@_ƿ Z^!%Щ̞6Tsq-j^]e;+lY+Uu'h 5ɱj󦅝al+6t>}[ -:zp\ 5!ɰI SYOyΐ?ޥsnCjPl߷!n[bkqK8^w9QMw crŰX c漐 et%ɭ\ogs%a#q4fh Ei%k>kZj_Z6jmv5;+0]WriRW2E|KL>Ԅf7C_R?T7 ;(. e<G76g%䇷^0"@rikQ?4. ,g -j&}r7P J+iU\ P(o1MGPېfߏ"۩jn/}k\6M<0^\m`iz20+Ï/<FwFFrGc=˚9qF6( %;Uu9nk̶.[̴\ >(jf>'OMݺfmg]w7D5snc玏#] --3g siM#]+<%1wʹz?k_S(V4[+.zw'~'כ+Gs+ *2~ylkvUepzD*@phH$%?& aC%ه\C~^cD"m|OgKi|u8d MuǼV2[XcY*O '!b[-ҽJ!<X$<Tu)h;2gD}o| R#z '!^,NhML'Axk?$ 44΄ƀ KtArxҵȌN/ B{ iN½6_'x/@< DDìzk*%U| z@>|UQg`UQ'$P&J Ua#r)QvA=]PϮ֪j`sTwl#j1JUMVU0> ^PmJ`SkK&9Rl@C}-JxVߺ=I}46kK%M"<\- 1xedF GQk҉ T`mxr$GIo4EXn*Ց#9Bs D|UvonrR4O~v+V?ݯg(v"]R$SLJٻh9:N"'&,4;5]ƲȽ2ddo\@nx$H;<@TzLo%)=lQ)0n)̨m-yd[3T} H>i_?&/O̩D&|<< *S[_ ' KVdb f.zO,yY)ZCKNZCѕsH>T85Aw\i&8ρ͂J6UXns4T)#YprYG&,w S] L-sbbKr)U]pVqA\U WU^AℭtWvɸnwrwu BnPy%U8Xn. P#dVp­.w7AfŮS뾕6Punv~tK}7*\C9v>a&^(4yۈۀ!w|kG ?ڽ!B]Pڪk`EE:L a[)Je [ٲGnm:f hٞHR/ڲMC[-mو@plo@P6*3ڲ͂ij@01Pڤ;rނ4zm$RW&8JrM5]as7}mk0 tTl>F&803yͮ5K-!Lnb6jq)$NPVuS~xC؀P 6!*V up: -h CIuq1 ur NB<f]5^z,.Piܥ\y6kkZ)PT½*]u3JSa OC>\$"+!'Vɶ|XG Įc؀V/BV6T+W'KoJ7?RۇӐi kSJ~F@<ͦJ "l(Dp'䝉\7"7Xk锃=6?> e61Ju4P6һk}yIz һh>}-άS8Y- +ƶAx %w2wܥ6J *䫉c0*{5G1׀!ߏ#, Zk^$*^аKSW{~ sq>{!wP=bP}-WÐiS ,MC,6 (U!4C=/!ʌC>::{mcVg^CoR.o@[)ϰW Aw ;>J9uaM~JxM(07>,] ܣhc4Vz= {\Q''+ӥU-d 8YmRPڿse?|5KB¡ͮ4ƗQ6&vbbZWPL)y%uDX[l0Y #<Tm+pLp?- f*J lV:%솬ԓ %Dնu-2rY*wO@>xDx %l_G  4~fKCgVgTK. 6dקhE?r=9s>t5QTJ0be/#e9*9Y6imq֗ #[YA89ɾuDe!!aҵOiJ ͘I(Tu^ Z$lˈc [蔛![SGx :hu&3Y&lp rNHQ nxMh誃)0<9;+"R<9Ů~ epxopR\R'l?1>Gy#ɷRQumF_4iICk?*yIj.N6p)TӐc7\+jtx3I߆=LGUy:9WΌ5\KzIse=x *ߟFSjc5P!O!:~:6Mr-)S]4u2Wwl~30'hԷ $y[|$ˀO`;rg[6JKdBǙEXg Bfc_@ܐAUg|Y ^pX  R0^&  E!?NC𧧸Yeg،Uv  & b˴R~5<2ۨXpaV[֖F.݀ar +*׷D;ˠ, TTlQּыYi,e7!L_e&ӽ(Z֚ISAȦgnka0l~!Lٿ7x794^ǞeU=ȓGƟE>A׬f a FJC6v` FJKd=,WCQza{ BJ֒SW!lsxrk")Mx#[q%~BkkxEHwwIېߎAUr?H BO.uŲI{_BlwE$HBIwTInCR]{0otxɪS/?K($㐛~ Ixr|ߴs9z" ' '7_ԓw_tJ^^|%zS+6-*!S۪y٪E߀Fb*;8Rmy7^m8d ]SBA4^~V%'oA? <E`hP:qgY/ex}3P r+< ڮ BӐ; $3)4#ȏ3 oqgl`@לU\'3Q]ր+S73FIw- ٢[<[[37"e俤Qٸ"sV{44 F;M!^>Z[#0Өl&X?vm ,`QIwu%oS!cg!9@z0niQ XЖZ󗎉%v)uYd*2D8u1[߷BkLo*y^ZGv.R&dYAMe)` C_qL({8Kf.IסxgF tW~!+],")#>}5J]YUd~$2թdd#L瀋oS P}D0o0X K2)AB%V6R)YK3}J`cQm6D4Ӕ:]Fb.o%`g ߀|l,pp͈KG ?0˺F Rpt[#{YN3)wtWZ"sx*MN'<BÙrdOI{s󶶂L:^:'Clϣ{ >6Qr;^ cwF *; ;ʫm*WPb6+,UP] .֋*0zBvTXLJ [aM \tCw}uz=15ty ^Tl)ojYEPo{q=h~ġ `(=:q0nN_bb;WK!Jp9 <[3ZsGeJy9׳[iAoM((Bo6E&9´Ux["s !+8y @WeJ^|)9(Nko6G-Zb &j.غ*ס -OMX\j> ,C.VV%@U \[Pہ- (B@@\Pr;Ϭ~l3ϴܛ~&,C YAqtx>`A|oANM5moTC?8LhX`X<.WPী_2ײIWL1b[I!ZWeF&|=PwKu A~a%<XlloO4簈>u3R2[jF IW?  ]NXE[WkDHj;hX$=0˹wͮbsKwʸ9ɗ| /@Vy,~ڀ^@#9?5ːՖOƳL o=M 8w_#ad {KO)%Cɜl)?(' Dy w{cg.گ2[/KC㟹p1o^s\'tmU+}~O^=bܖy%aK`y焽JeNӕ ya/<{/,!0FwH"oqg=i#< Ymuͧ{W]L,fD:'W' YigkrxӶ-%2Ia^ KC j09}mfw3W=_[C imi׮oN:\1U#Yrm ]LuEielP7PƎD٤Y^ẘ2Zn/eظaj&,sذ{+m4 H3o=9=_Jk̵fP|C i7@{\ֱQPk#=լI S~-[W2.|xARWm.M-an*ÿEkm؜C*|s=`*dVz+klΠxD4ãCie?j`ofgxgŪQ}ײJL~[[FPo#C١K.pqtwN#Qݤ[3S3#>@gn;*Gzc׆RS|.+;\q3h5mϩ0FS*ecE׭8cݬe/ DmVy\KQ0>m~.;oZ=r*ZNaȅ:oU݆I{BM,u;%)3/gl0`km^sR[dWzֲVk Ms(A&Ox7v]ɡj"}&⪤ǺL࿶SI^]>N׸6 m<􉱁9GVy`RJre`-W_^y870.// (k@2lPJ|%~wj dl(sMւcǫK9<3-yåub[c\tGKw7D6snfG‡Z[fҚVW0_y#JxngユsS~t׾>7ToΨrg}BwJhn|+ /~ O`(U5y4.4qUdDx-o`JlHtQgģ@#Ӟ3bu+vAR1kwoH h%#` kZ@tKh;浒-(C,uP;۩S=qUoPR-ۡ }5A-¤`fV@< e7W -욯xj6@>Az ' QQjA+a3+ f=laD% 0 ,dSf&jmB% 0 ׌J>ֹD65K%^$6(J-=Pa3}҇Q #l~QZvB; DzB*LV-pܸ󏁔T.(0MPN0HKx+?* +gjbjbTmbf3.UK0;UUXz!< 20a hk``Ikzn$=Eԝk!!$-,QģOK'r^ @<1 f`= TcnLRS EX9n`0ũ4ʧ CJ 񳩚LVw"Fv䠢\!t_h+h7w{3tfne؍t؟i-0xERF~!MxM!JoY<ԒO#Yȳ-yafK!n,܌N4s(u`dө9y_l)?#?y&v~N=-m1G$ǑgDr9 be7܀4Ժz$M arLb}(əҽNUښ$iRoK|G>}dIyn0FF 9w0jɜuSq}|kː8a;/ Jkۛ[X[mLnBh7p/hf{ڟxR nbechKaKjRL~e2P-ģXťs=OM+)axz3wBNn "<YiHx9+V^MvĬI̲U8ɩeќM"sQ K_1 /sxzNIgk׊wg-r/6`r%$ fG ؘo$Ha7g'6Uu ]O4|o@I'!lPL@<ͦ1pt_nb cHQ7RU' l5-Gb.K z! E_TIFvoA[ϻ[ݕQ,jeIvBa1aB161)iJtqgb$K_=]nRRȿ3)mކ|;Am^P>4:ңݔL1BaMq{hAL JJ]4m@ivCOO͍ hBp؄Fי1R{!=@xkH%|XeL~P}Uwe<$d3B>bPeT' TlUMjb..ay fT UYNCn[%?# fӘD  `v{j)p?Cka毈-/,Zn*Ue | Afi.Ce#QlJ[%⢭/ ${ Bat+딐0J~D@<ͦq%`c*kBeI~ӨxG׵}ZlF C{T4HsY^wrH7OrkTbzSNHz NhdIK5OT!OgD Sr//@[s*"phb:QU!JZy%6JJqXG^+cې}4[pdl. F_-fc1:w N:j~T-0<&g2nZ.kjOĹN^{oe((@ {ΑvDhd}Ol7U9dF^Q/CVjy%3猂jl"r^hPbUJՕ֖]'Y""i!jtu'@<ͦ11xwmߣ$7S}3~5[2;~'0waצ1-t~ېߎ)УQK<:ajF]p,=}ݩY) Hz $ ڍE @NnP|RD!կ6X_) 'l)I(=vudT\G(v(Vz.MWLJN gIlp(JղReŵ1IH{kq5H4v 5m]jI̧Q; &%>!wǯ*# A8&=$>G!m| |rMϳp 3 * ;bb ӹ ZlEf4_Sx>*:p\[r:Py rLuoo郲8َ<+e3}%_jI_]d[.Q=&.OܕCDdp?CN೩m1 &V5^1TTTTVTH;&.leZ@"<P+*a \!?v|? 4 K}7 cuCSܝ^&)܆=q="($T;i_vHBak i{YL7'f4Z[uu7:ޕf Ofm~&*77nѤh]9%C9 OC v׼> CD)[kyjv[\:tVM¦q@:L#x)ِz v}lE)t |٦𷋮[qtYuqK[.f k`?3ٮ+@,nE\[딧=YJKOBp O0B-z/&!+aP4^kdPD)Z(No*u r.T=IJSC3xJ? \P5SiBǮkֶ0D4PST# h2]qkъv.;WY*Aɭd?zALw9[x)y[آNL^K\F.MM29,:,cY^*gµq>3-jGKU5,j;!t"0 ^7m*D%֝o0^iDZxbˁh֮! lhd1A&c9G׎޾lӲArr(䣱 %IVA#M (Iy[A&cq/ djxdUi[5/`I(\q7_xwI.ϗ18(ke]ce[O> Êc^R.W؇l47+~)]8}o 7Gc#!B8y&~%;7Ӹ ?Eqx~!ي9sH3 Xƻ)nF1*X}5Q a+]ב6a ]!q%'ֹP%E\ c^ť1ӏI:*sH)#<Lg+J)yo7'_9ihk+!ϭzkZS-p"|r͎^/nke+oY=vS'ٶf`2Lm "leǐ6a,j|2]jy$=O"'Zc\)aK-E9E5Su%ڒ~uj~ߵ轅e{O }/748X\Y}ޚ"Cfص8c1Ϸa |:W,{ *'t#'>6/KL` $)kKkyf6+tUR A*F (Λ֢k@N\xVmxG OVVmuޖ2y9?rv%~D{ijcШUɚi{W'}}&9o쓝0]WxwsZķS tL|aIv׶҄uNo?97 ;(.`.{6TRAKtǩ5t|kPcbl`ΡmX7xrSsI+[ ?pn`l.//;9yWoE]PBVA.]g/nw*ǫK9<򖷈\.}P}(1qjֽ0[: 4)osn_?q}[,|Y]̜V"f^C]uvt;}+f}QO}b I藵mM;듽P uCծ`M^Ix̤_@< c +yT;U {!+WO+2t Z`PnM[[~P&j%e[3tZT/fn)Z~X;ېnj5-@C,?:| gЄmOO!Ϸܚ!AMH6?ynX s[ko~MkaӻOeD}>6`5;0 cz~ap=-m}Iw0N67V B1_u Ȧ v#q 5I8j=Hb0 ɸ+o4Xlb]e.1_76Nl.V-9[[ 9ނ|K%G_"+!@IF^MB];V"<>09loQ[@[-zY%ȥ A_!C v֗ے[68,f5/)5 fӸ1}!bT (< c?m=o,tv7:=qyQoϰ{N&%jo oT,;:^y7Cn {Cst(Ғw6fd+{'󐓌*#*EGVI( fӸ13t.V4Jki P`+mrwdm2# v)!g×)BaF?- fE  ƲIrc۱Dlm ;{+"E2U-h-Ue | AVD?oVV ݡe œ,VѰE[_6hle}A(7 Cn~-GlWP6-Wwu*QumF_4 PUGU򺓳Ed-Oe*XNCsNmh: v/MG:{ @(,Jh覣TYXQC6Dexo) _9wbp8 y412%YiVƀ!ߎ}LuٺM'!Or`xxƠt9msG]a8~sfu0<97N\ȡH+#V (@g=Hg"x 1jM:+EyJmúr8IxΜ3 FNy]Bko0 dUJՕau 5:*$\͒Qwl7P6&ﮭ~7=D* ]YjHǙdBEˈ]`}c*[蔛Sab~ hA4|KSb?Cͨ .)SS$$Iu#hɨ}$:+"R<9~~(!q'c8D-%R@< g^K*@o:XE@ɷ GZ;ti:a0O3:^yK-TVhr-ge*FOI򸋴B{m4gwnFg[b֥Ƙt 쀬*+{ͯvC_UJG@!%bkyԴӗ+ ;bb K ZlEy?*+Oulh^@<ћOCtlYt-F>(Û-3/ARKڥ}{؂vlaDl>qW}7Qr;ؓBSǽIXR 9zt!j[Ru&Q]&[Su&Q]&[[u&Q]LtѲ9@4G +#W&QiBV $;M\slcڞVh;Zz +˫yK+JF٠USK-kxjF&kA6#K@E@vZV@Jv;.0Y׫lӵԲUX|U d?1us1%$ބa:­G F_ ͖T{TZSRkmJ`=vL}հ{UR뮶i0wz]mYvpizӐ# ɴjmꕮ{^໐mIݻv5u>ֽo&[:V ґU>/Ƥ{"px rr}iE%':>}QsVJKi mtSK@Lv)q{aK)o[{9YEPIv{}th =vU{#.LxbJD:[?o!쁜\#Y ,sqm [LD AVtf`hM:F7ЃTK+=%.`J?*Wꑦ 7I h6wUMKz͛PX00Жsp_ҥ%`2&Ax+3P-QۀN[T_2يE׭8c:ܲW5H3u r)*o7S@~<]ٷPpީ'+{avEdȅ{rJ~I@E{ıxfh)瀯@~%Q'vƁo@~#1}ݗhN$w]- ,Zݰ^#V W hCSҁ+>3lxpe# *(Ǜd7 #~z[)e- VKLW2iKar=R2xJUdY-Xqbw 0[gЗewMfyiXv{f99'a*>˄ 6^ r[rcixq72 |/ƴ/l1|,֗+.}޶omx"7jؿdd־c͊Š<`75KzHJm)$6;n_)jL3xgg7:|7+= |BV;C~>t{rx&ЄZ"2 <9ɣnf8y 1C99a])K/%f!{)S w 8[x un 4CKT3SLXZ+rO%BFsֲ:TN}#"J^}u2t2G@_ƻʙw9PjGlӠ*9:u[G{`oK,<2Al;1t^:$5+,=S4.>48xNP6?:m=癌 _Ԇ2BsP($!42kֻla +j&p7I%[\^6CKRBUF/qvYvævHvl؍~{d9t*"DaF%NGy,u:nOoaJcBiCnWrSoontM9!A';fx!5%#`2Kx3kuFgr26(`oJq91D^r,=9&Wwq&kh% Y.rFnC .CnJn7Ce٬7uW3JY:CYAIGșXJJ4bvd9MQ Uy8y9&,`iM?OLǘח1LK:e=<Ym|c"m¦:6` CP:|ZnD챾,#g75pW=^M:cƗ'(l(^46&,Tː/7߄(Q4݄*0J"&?a ]Z(N` ]%#`2..ci`M8)Z+̯U%/&pi4 MK” B?y1 ƪϜXG*ϱQW4;(OxydL[ URktrc. m,A.)|L3ZF= lBh;L̟2¨l\d0{+{ܨ-`/x{畖$kNpK,3PudTE>(k(MENܟy1-t"ed0~'vf^eX)P3[ֲ^v\'-T es gN;Õ^~$J!d?|% xrO347ˇ0¦6` :ljIv[nR=lDq=w޽=>$W{ qr5CAT /5^1}]@ְ{G-o1]ue2G(L [ϵX xdds>LP8 ~a(q 4ǩ+~ojuayDP%z$#O%gR\+xq]ϸXdQO`drǪ,QVu6j4"5<}||h^&-┖nBo$A0atӜ׹zS/1ph@~% <ފބ|3wΐ`_h1%. E;K磔7'n׌x`H0Wg6LЅ܄C)]~!4#o,n̦ 䆑M955Xk[c^D"5ȯŦvT٪p;7}}dR 0 ]ԄY:oVc\Q kի*eéj%㥵1} 㧩d]!/| Ek1B}~0Ho0^$iDV/?@<1@X5{5vY~Hj_@>8E*00]ei`C}u!P|y~ !\{|$85ךCC*ciA^ a&<ւ#H;djSC<~%MCWYtM'9~8tL^e((COGc):"d&M !yl^/7r{ T*˕%![qK,Z7i_ee7َt ҄/C~9nG^} OM(?isO {Ny#rcA؜XV33Bf-iB?cYG&cQ𧻓˺f?* M+kf ć<]q{=ğK dMHt yZ.+091\kI7O+0-Ӏg`D\ziM( A> gt uP?koktt9!LbKq{}}i9Yiu XA dQ;B-*3 HxYviVjFt(MGPi:C\v5Z}6e$QEHϣT lKR(y&._Lj/@~ jTT5|zZ {Y6/7֎6}lk9{fel)o2}qn+Z%kj3KҡVC 4?|y0. +T;>Ww0xg: b(?kY% 轅e{O }C١K/ ]>atKC#<쭙y صTyeo; N -o0FS*ec&ݬe/ DmV-XtP鷊Qh#?#Uu^$~jЪ2 5NRV54GμOyZi"Bz5G-Ovg-@ad;獂^r֓}7v׿J2M+ȵoiɧ蜋Ͽ[d~w?y=oZլu+JጱqV]6x[>vάxըzަHo,$cԈI-FS׃VG;lRf:Izv>GX#xO2 oeٰ5ZrRɶIE(z?&V+_N!(㽰ETZEo7vv$Q(D]@< y ] ^|e`r i22J8w{w8杏qm&vnZJjVN+ <˕\~|y782<8:0~ oAW(=ߩ|0ivG7u;slq%cyfzp郢f棼[lM0Rnj~EsӺe,iM]+<%wʹz?k_SاTle!o*t>; b7ԿN^ FI䕄Wے 'i6]5G9oz4/ͬ/uyCoe,llҥGGFXtS($ɰSOXD|Q'ē*X*˒ ckpuA)]06e㍥v]k Tҕ|$ዐ_LPS†AfӐO7_Sgģ3OK'<=PAxbXgX6?o/DrLH nGJ^daȑZM+H;@lӌE6: A'J :|Rj缿V?"ayj_Ey2capUlv8 y-iab=Oل!I'aHan&j/@> pawדF6auxrrCa0DMAki nM> s>|&lDrs(eK 60 CZ0del$dlZ-G^2EcrcL/:)㱞ս̼U&I+z .k1&߃IynB v:SI9(<4uĉ5Q%ƇhZKҋr-ɩzJ5%y<&ڒ[ڒV$/ ? -0`KrU8gyV"޿m-fN'iS\s(ܣP6-m_Bn6Ztow?մ&{!6Q6 V8 j/CX옯:RDG"J~T(VqdS$\dHk;]nwљ|SJlb7U, o8m,Vs&4i%b:v WZ!g=9s.9Y)7'ϳ.pї -u\ժh/& ]|-h~?P -2aBԩ('|c}#ܦ4?˄qclVfi$2abZ K2a`(?lQ6f$K$]H?!V!z4٢U-ѢyW毱-/,Əlݭ.\l_{s OUUxG% 52(B .9!PQI+oAoCV/>ٺl&w'csU0J\c, ǧ1wb䴒jܫ^G^6+caxx 6CM۩˚GZ wBx+{Er[ddÞs3<YmqZΥf_ΥCyE<YmXW')oeNE#'2uREjU[ޤg BNnEö[ >1e !Oe]-k!O- TG?'e1lNL~ =k~}ߏR br5Q%5IL* Z ~=B{UEA'[Vb:iR% fh}?FJ}Jr"tub4&J쪤1}"OVB(g)%u"rP1G;;k,4aiK'WF/o5j9.t+O6sSy`QlpB~V 6 mkfA/|c܏N{ gjYnJOdyڳLCS%7|D-c@qNoںFwfA+Ћ:^jyÖ7{0¿Yip94.OM{ac8Z; Pysutk`0^;gP X4*kFm荔%ܧ:FF@<1]W!]/[G f`c/@V.'3j@<>JZPrs6w^aE{rtBEVyf2~˟@_ Sd[ENY@<Єl1!qe޴4WK,!]YM˄.rIٷ?YL0f+T^lU|MZ%}E/_/O*/!*SN lcު!~j /Tާ#rP|r?}^rZ|M(4ݰ CLPFa1)%CH/m+?w~w]tm8gF[yao$N\e­ھ/&J#ⅿ#i%ԧPLxe2aixi\:7ɹMS#Sܮ#S ~їɲ*\h!*xU]M~8˄Iu7ˤC[LXL{/^IH /&mA镄d|s2}]풌 )|})I(Xef(|ח c*Dii'Q˾ܮ*mjJ/6C)ALS)يym|찭1ၲJ|E2a+ڿ /6a@e˜8` `W?u ~ o}0y #(Z8Nɷ GdLb I4 WiY4qo?-sL9s05-tQ"KwQ 2qu-/d}dYG-ϰ4yF{ʺSF`2[cm-C+AoP͓ǵ5c-X+ϰŪoMfg887Bj 6dVˋVXhbk+^ZIygX/T0QG}Lp_B>/&fȍ<8?mʫ7|1_&#Mocj675af cjeZ\DCi˚Qvd0&X} -r$fǰvyG/2asl/xTov3\B19gGԻj,%.`c٣Kn_,5i ?=vOCV H '!S+1I^op?Y&ނ%fjw+g^U䁖Mֺ E+BYvswX|uMY64&G躴99?l*Է߆*A02;Ӽ2qˠT?.*e\ӠɅn., ˍ͌\X-[vRW+H8["|*W@(G*2=GA{Z: NU'f(6~H]>ij@V01hi\:f Z괱10>WdC^2{ I, `g߯4{ DٿVWmr} e'E$| RÕچt<|86s2U*O8.t`2N`x=:nWQ(MÐ 調K)_:OE:m k }> mD3bibØNHeM|eT1cR`2b{.3Y^e ~km+arQ`ilNEޗ%h1b2O1荍T*˕%![qK,Zr'ar66g ݜf;'bf6Ll)e`E?!I+~HE!ʗ"m/#q}N'fy ]WCUp%LXU_/OxEoѥӏyU -fhzaH>Z:|&6bcuW^A4 ޏ mX^/4ծev5l@ }#br`{%=?h~JO#lND&rr 4*MGS(J#gG֊`73n\QlM*W3l7#&Kfpѭha.X2b%CnΊe?rW/M;UV𥋗L~ VCF,].j3:X5JOZVc3zo!oSC} fGF.*R FՒ[3S3#s|l]J'|ۦGcWA3W`wTX{O)N|E2KLVnֲ^ky6+4'o{MkֵGNE $F.<Ո`-I?m5hUX'Xn)+wJS#g^<4ZCQ|{^樥ɮe?&U}7v׿J2MZķS tLݵ$Tz; T0؍oؑDvc +oCFFRIn/}5~BScs4ܴM&Vx+3n2ùw:2<8:0(^6( M]Zu9n{G/2zc,oyjyåu2F[Lz,o`@a};{pZ>ˮ[f΢mTE̼oʳQ .,蜾?{؟E==HVeR$z3P:y+%}W#O`+W nӧtIݷ伕o a卻d C K%' c^HF-ÝwPLiS jB(֍B!g +Lh;浒E@:ZV4BOBYYXVIl.Cl.C6K/$ݾgEض$R&%$ Y,sH@-;" ^N]-E ]#}SJ~Z@<:={A7Xw2bx>8#w fh0dIeY]n‚)4N`Џ"Q!hl9 whaM/G!hYoSGT= {@;ۆR|Bز g +y߾"eϊӞ"pxD38rw\$(B!X=:!(R;B<4}C\p9m*̥{ c=cyLV*}F`ʒ.6I+{sd+i2NByJ"lhӚCH;-Լ6y{BoRڼ MZk7JkގVkބ͛M8%!Lt}tκA2u[z3uZ0̂wIxL{S _"mq9A('@DmI$N:RؘP7bmClW~;Bxt^HI8Ri]ݛGBģtѹm.]DAǕF(eɴcEiL9ނ|K%"+))nJQ`xQWf TLWuBQ{@~&9lo㐏7G)' HH2p"SNa뫊r^^|99 bltds xpr Fr1tSI_Oi1c ݵ$=2N^{[- :mv f/*٬t|ˉyb7ㆀxo(+ |Zw)& o$Z]MEdEUE]W)\Jy7 F|(>QO'Tf7 |I@<ͦq%`cڧSs;$]zޛø߇\n^ <_W0)6Kk2^?yUKnt?|uWzK֒S- r'l&*꠾j> kfW̖--*t ;"Ѕ6it#}עː:R+i6 a_Km٬uVrr ]Oىm&]L઎W! xhAI'!lPL@<ͦ!j@ī$`C0=[<ÉGn愃i gkELQinG|#<[CM^JŻz۵XˈmMiޔ⢭/4Q,[Y^c+KBÐ_K(4F?J T֞ {I~Y,L)Nyv6/UK1VT4򺓳Eݿ^Xv@ʖu,!OV99Sw~nd<Cw* 9Q|9%0R1]m,{cU}LsֻB|ԧRY-ȿN2:^]Ȕ=O3࿇=ȿ[+]^r!̓|;Ag*>+ﲑkH[6+6ݗz׼*nmFZTKCJ]xthwxb{c&6spA K%p$ Yi3F0+#M]w+r5k_>vs8X~ .钵"@MaҝKnn4ʆ>l_k74d`ԣx2ZޛsϜ!O;yiak~6d+.{-隝׳4+iNr0FʹDvuCcg?>eQ$h?9 }9岮t}/lyW^x5aeXh5`n+qIԚs,7p5BCɭ4p%_@i /(Pρ.a5! 3>7<Aͻ1y*:'쀬6&~z_ڋOO1y#<DlHYBZCT:/U x& ͻD nUkK6p@ !I6c)WzG%&WӢ<(&&.Z+%轓KMIf{<2hjC:)Prf&o~ʽʈ"$=l[)9O P_MmqTRYxĥ%5Lf1I6 Nm7[ Uap|@ӟcS#kydvC7gZlj6&f_ϰ{:>oUINNsn=6W7svdX2iFkoEߓ4w`K/eUtd^aY:B_j3-WF)]*9ccc3zo!oSC}-`vd@.e)A`󚛽535>1s?"#صTyaoG ]ew,G*''>ƢJf&Xu+7kKQPn9+a(y3g?יZ0(9}[97 Rc^+ZX'GXBrIR%[ p/P!OXibC!JxUɽmv@;Z2LV1{S['.NCMM'Ax높?" D+^.߱h?J1):M4]PFWk+M*JVbnIJB>tC%!A4K=1$ݞz&&J岤fzpKjFY 'QFOzqz4Jv`#3_0d]PƮ&+FŴR1*̌D#VMۼQ=Yz>Iλ&~J;*s 6*f!g2(iDnPXwbx fFB={S]$H f$u _hHNK .J#*?acR{!+yT;#7$<Y-p?V_qx~E[~qPspmMH6~C~m\i5to :;-{yąد޼?]Eˡ?y03wK< ',϶aAly&`ވޔ⨒lao?yu209ׂ: 8-V7t>arR{Mm?aluY~UQ+$ބ^1J@!?IB 0Ū8|5z`&`?ޛJrzq b kePdՀ O؄^`S#TP1tQhT%cCN e;1d!>AI9Jrp74'ɍ!d!^R BǘUBҽ㱞CV$ta/8AK,k{sNϩDr?o :]Hl6Xi8q\|,pCnoA, b-6 x<& –D0ձ\, `ebp^OD, nI.Ƽ?-S7]q2]}Zؾgx궵`:*#v- @@@32-h@`(f hB1_uL{H*n#~*i]5^u,.ڵKx{º6ٓ.uQѹN#+pJשjQ$ d--oA\#ULg $V #ng!ƮLA-!?LN-w6Ugy-YNڕULX\J "32rJOi\D ؘ6Hn݉u6oJt} ]O4|/AI'!lPL@<ͦq%`cw6 % YiKGFWiY.KE..ZU3dlѪ~$GAVts-2wbi=0\AK9ݕKf2'ۦyƷܛ"b우O!!  Yi SNwmKE3.?͘ _3Yv_-[̗߆ؚ9nj 9K*;[}k9CzfK ז-j< cK2"e.xff Ƕ_&LL%yu=ȿz7[v_nS7dwr[AȤkuWlk1bȶܦt~\c7z/YL3%~*S}k<ͦ11}!ߩ N{Qڟ~ [%5]^J,=qyQ[4Ja9 MJKRߨX=ү޼Ox7UC於[7#[9(;dTQ9^ ~9~T)mi6+(S9kɒ^WԟԬSqW+:T:w(&x, gYA;{ݳL!ː!`C" ~2/7>V_SGR_O}Ub<59Q_Oi\C ىU 䮧|H PRXKK%&YOA>xM +VmI~7QGoN:^fjI]ƪ;uB+ym򺓳E-LOe&XNCsNm. v/MG݃[BaErTK5MT!o|NɽBlͩQ"GɎ -uS+K_ 7C+cېoʎ>ٺl&w'csU0Jwϟa,j7 SIEdL$x)HJB̌HEDR]utU_smo]]}1]9{5?jl"ӑ`*= =4@ ɽZxlUCPpLlWV w%uHQ3)&h J(@W '힉nJN>{^SQ'cByE46jzfjf.zl"rƪה$rl$~f}5vG◄b,*Nx:MZ vMG;$齚 =㭫$|$:ZN"s7lSm߄fj=i5">'^1KUz CxU&LHnM#hY/uvo1}y)76<`}1|Ζ?, M:ghqǞg2OT|xǛttnfI*߾ȇoƬ4\t.5Zpv(~+̷xxepO^kn6yfL 6>Nmը: owHo0#Lg[y=n֋d]^If3} #fxچOJflp#uhњ}V4-{Q.0hs"FRK}:U6w(ɨj1b3O3Y6Ldԥ'y"IYWook[{ h [ @lK0-RpCɻwIF.z bn v4B{]""DN!lۖw&NK&Za,ˇGi Y`IZqM y8(kzE#Lc" )* \fʖݓYiM\vE= 9$K"pxmm6ՋYJx>bb׃٬ZwӜG:1gj;`ۗ '._cZpU_tEVvѶ|f> r0ZR#7$$Uȋ0HB}SY7dڈjCiㄦhR+G#m|ui]up o,Α걔i^Ibf36h93.i')lLzXZ asʳkY%F "[Pu-tϬ(s^HtXSWFP Bak,U ȣjJ^``,zmjI9TP7:/YYHj0LWy*G C7J$CB'%dqp\%/ 'b1 ^oɹEOCiGI^ۀ{!;^w.,O(xp'\c;Yh33[[ \%0\&;.+_.Wa[_bg#aS˯`GSK*Ԓ*`R˯[va|hp?|_C <@j6w٦YJ-!heU`fWhqFʥ<)HY6HT=(5S?tAJ)L뙮z_G/P9֋-olU;uTmo v>*IofB!Ͳb]:WHKi"afP?BVR6pF9L>`k0.[1~0;{77P@>%$ d G! mONKV:?8@Z8-t;(;BFr#/\C给0PO'.m7jx}M󽰒'57£`_ Nb_T"Ihn VKM /FiD qMo8"]BN)+\ j(0ݱ@⧇$p!ȇ$!Pp;1f5X#:*!4U*{*?^y? 㘓x.{(:B=ZوݪJ<wBVzZ H&'E'XZ@iUi? xvѽs/^i#[_>g_(.Pg)0:E)_a[{+.H_@p SςC"|*MwqFEiVsO hj-s֬}Qg iGu-Ѓwq&|!E:ϧLEJ$&.7ANbztܫyŁr™Ӭ O ++`'h.˖SwW<7e6bdЊu/j<D,;8=3щ=&X`c֍5 ^'Yr;ݙlk0vq`׫6҅]:n$ tQ9)qϫ%Ï%HmKs~ۛ'~Nljm]pד> \'/{/6I%N/h~M)}ǵxz6ԕ\JF٬Z%vJts:FON(=s062Qj@κވwUa[k+FuMմgoz)޴1jGTxI÷GMmo3[{L5{{KtףM5$OV-_skMu.{'/{ilF\g69.]lւβ)^O7n` 'vãqY,@_ ?Qa>>UӞ饨#ƨy6rR&t^Skeyn^iEb$Z}w}ɦ_w]Z\'aOPGޗ>9as{,v).QZ2\vc6.5cWru,3٥ 1 Y`\'+"iCd#A.sѱ}:zgS?!9?[ىᅚH'#TAԏG=߫SVNNb.鯚O/E6FH6L'f\WV=%~Qͦ Zv'Wd3ǻ`ͩ6b%YNe~ve~gA/ǎET ]tmD w$Dz% ! v)Oxft.v?{:I}aF!J=(wR?FM|˟-"L.Pj]?ncGw2\;zًw=/9ZםQcVk3~67SccʼnspakTN*_4;b1inp%qq΃Edeَ̲K]Noݸqf} 51u?tͿl Y{rTSH},hT*Kx{oߚŒ^C[[ {S2*ZM6҇\/r V\+[:$2!DZ?tݮg\vJu:X0<5в̪F70Y>%kƣQnmdMS׽]wJ`ml&wCyP{ut|$I(+,.w~p-hnќV:9R=˘)tSYdaduOP`d pdI&9Ӡ6CV?=En`4U6ܲYN_bZ(^1s8<Y[";SAX=nQoP(x`!v:.8#PSZz݉l>BU1&0O1 nM-իaش!QwBV˧e6}z,[ʆOf!|ױ BLxEfɰ)[ V^U,kpww.Պ[3*VmlevBBv^zɫϯ4LcqM@"됿쒶7Piw߀ RiC.;39BScDSSAo&&I\/)+qȧGЦ::R e.Dφ + /b=DZ3.u mc~hن\r]R!DK><Lj}#,Y ˭JմaZ5z%ka%|L-ɳ"0x4ӣ?mjU-It/&8 kݕ^zsϡ>+bTByg?DeM"sxJA>Z1o!i}l 71\3ZU1\y_n Lމβ)u@%[7zAn/9WsB  G҇[G!+{ŪfFN)|倧! g* {Mg|-1K% RE+SUǹ 0^ բz/`% ;aC@ѝwlI-l'%>ǁÐI#G1Q'LU+oݾ̃s~kjbQ6¿e(ҝU~ ҏ]0!adIR:.l@O^CoVfX˪,Ss3`q3h>U po"y5P(T_V.~.-LvKYVهt1UP/VYYj/6=5N>a}v-2'K(JIV6|cUd4c Ĉ՗Ғa6*8o|?@,Z9HʂIV?+kzK؈,A0ʊۉ`$19 {4`C(hؗ@C8M|!w.{_qklZ<<T-2 ݜCNxu<*cY(0UFPtz hP$ D(,OI./N-+ܥJKwQ]mkf˖?ǻkY-*NPPJc~^#]> yh1cJW~qAQ㡃V?'Z}RZؾ[F*mRѓy=Z[`˜*'>?jؙ&HxPMi鼔 w F'e$ژNԱ؟g3&ChzT2k͍8*qdUf2L|+p:dْE+?ˎ v#CgG/;!UrCiP8sfxT3g c#I]d{n?{^[Gl?8Fc[yk>J:Srk6FNRVmX7&'T/{KtףM5&]ѣZ{Z}ʕPw]6UbxLx! o5g\~R23)RaˏRx%^{eT$9wG/Fc@h_+~c ^eoW SGW}U-=WQc˛u kIRU蘀xhl=%@|GtQ>> #\uk*`ZkccyJ[]qZ;RM)ձ;;*IEhw桛ڦ=t^-TѨ8IRLN%6wW-e[NԂ!$NE"BjB |J@f>XxԗYm%՝ }_@<4EZ&jkoˆqd[ dQKI Xcגb(jȣI\- *Cfl k9n9v9va[6ݢsˊWWL@hZyDSrwJ3M7l47o($|z?#Xًjl''1kSLlX.z?Z.b`߻c럑]Aj ٥{7%.*~jƚNn4*ם.K{Q)ime&$doFkku֘ 2\AKفYȱװN$R+^2y05x\ئ퇌K]hߨ࿦[-+~'<|Jci]G}C_P-MFCJ{CQ8a. ϠK~9d5Hv4dj@K ;Br0NBC q(B C8  ېAj[w!v=d3j ^͐AS vA~KCB~&Fſ& NF Dئ´$6a}A|,̨:uۧk<,ĹpEYvR/ףWkVp,mȷo#P+a];?#`\{-a{ Fa; +*%XE}õcdmx#<Hmx";m/kY-hߓSc5t\w293;ro9Z=Qgiq.׌ΒNÜ ӻրdvɩWʸ` . ^Gs,|-xgοP&Oqku6/6i|.仩^e7=^p+gP\n+D(F@,YœlݝP0uPc4 u-%&E 3i'[r]Bڊ΂>BX0vF Ժzte(ZB}C`? 1|Ζ?, Nӈ&=>ksJwS]w);-Ϙ2:<>iMZE:g >U9ImȷS\+aƐ@7[Lyp.Y'uޤcDZs%4p6l^ (ieȗ;I4Z< T9H :멹\^*99̡|1FKHL.Z'erh^[hzj+AY+#4+8j'}דvDh7dySa>s^SQ'cYyE46jmfޟZ%v."w 8 y65(u~FS0̪ Y!Nx4~MqǔU2iTM@Gѕos1# s)q Pɱ}âfAǩ8M7xPc'MM^>qI(ҊMI(}\ɍ)\s1;mVTFqac꩘ftEolržǖEz-Lw "$MU BN?Ͼ;Z[`vNP! Yɗ/j|T;y xҔ\@@|8yk7Jp50'-kƺlCk! 5r7n Jxg(3"?XEd+*˔ RW`Ge5奞ÇӲ5"<Yi 8]=qO jm)C/AyE$VR sJ;>K<57b hB6SXƻEi A̹{{zv={qjh vG!SkBMx\^F(m,s1t*3W['Zݓa` r-u=*"9o?}:<$[ZѪ$b{%[AIXTs+Y)?&VyURw[Lr%u7t(P8\\SWPLDd;p'd]3r͛n@F. Ԙw$Q@S 6{!v>TCy2lUt槹:._prNMulL]r\fOwKXx?/ 5?0/IL8pb6G_=(UsL[|)ż *^y4{~ɯVp8g:d[yMq`زhSڣK4z{_GG' [;D>x4վw]ǎ[Yt[0{uQhp9HŔ~ vi0|t-Vlw,<@u(hcEB |m4ד@ل@~'uӍ5T2ak[j9aA'j'.Èn۬.- Ѥ0 Sݙ2rsR+I@Z76V~`;7&!_ AVk_/c/Tj.vj+x hOMm _ЦmU]BVzG/;\<_p9bIk-垜_D@h5 /=#VNЀnoUF~_EFָ'(I_ANofAaJYBk,hQZ,LYAf:W$>q9< dM{S)6vC a4ׂ1תRg><[f3z FVfՉ]5#BmQqYf4hBm,Dkx'Cr:jzҹ&ks0ZYn&h.Regy77LW0ġm}3MLg/9ϵ?Sq;!-*?}Fbux V}|+Qij4`T̟&탉 9y'L.uQvzġ ӣv9͡SP!7|IkS6H=T)iȧ<s)JcJ/@Z%{zTtn ]m`ĿMC9<0RDRq˦|p@_._ 5`*vqv08vC@ApHgZ1* Q)RՓK iy^t@~˛j._v3D㮀xR,CK 0ZGnn%uCH/v >!/'. +4%omH?+6E+>FdBw3FIE=90ΨpHj/Y%(./x-ʮqKt>tO 2˞T|޻ GnvQ[\>vQqۀ|@BUKhv ޔ Jg6 ~8^pڮ߃<~ } KXG!S+L~ ~z3= @Iur77 pyEŴPaCp=MNQq;zi?%Yld 8yfLt Eeݤ h\O-ؔnd[T&`-*~!".̶m@3LͽO+P +bf:v/w 8u+b0_hUL\0)醈hZ<.WP[o@6L~4.az&ZuToyH f{ hSZLNJ TFǥm4Py/wlzѣ3-czS(<ӵ"Z 6m86p'L鬰@ѽ´LD灃U 49xR;q~ɱJ*b wBMS!"!+ rB@>Z7+p +53{>ou;uy.]3mRب"OʊݟiV^8ϯ/ @}/7)vnoC=SqۀOB~2Bnsм2_rJ#K>Ȅimzfa.6jē!pd,\>+Q%UcNQfei^Xtzm3ؑ{J:*YꟀ.> | ~TM7k>5b*$Gڥ\x9TZ_sW >>YiRa?Ԥ"cAƒ<\$ 娴*P`= FKey} \G_k;x$I[7@p7ݩi/1_ȋ "'gMP%,@V]CkSw%@V;.Uo ޺BoN;upJK˩+֒6oǷrH[K/F[]/=ߩcKҊ˼ Ye<^6mώ(- Xbx)ŁAvRwAP;/A+ tҀ= CV{$~*#sś392m$ANdq[KSz5F.K E,gS쑶Aل={:M& B*Չ-oLcI=n{ZA%-s.nC3M4au-̭mJK2"lƢiW{|]qۨy6d+]6\1v Ǧ,;:R8c7eZ4]6::g{IwIB_ŚӉYA}ׂs9,uR~Cq*Ly7_vьma$?Vvbߝ ms3lZ[-YR6֖bc[)Kův[l`VwRɱp3ShHJ||4޶95R4} 2);;6h# vWCeFHFWsw?:'_89|3]oS.1|);f6Nq΃Ede'li->X2Cz/޺q8=nBm}SŦY0S7.)΢۷f}Gf7Š:Uyߴ:kn|ESɫm{ނU1)O!DuQvB[yB.;zմ} W1ꄧ!NM_3Q=Xq&?4U&qvӇ} g:*Y>YbK5'؆#la3 i#o)`U +"uB R:޹/9q3aۡ*£6c㐏&Bi6є0dŸin;0f"~/IL*AEV{X' d@ޓ c>JفI˞\.;UJe yo"_aڈNwf։HdC[m{bܤ4-z^<!Ŏ/-L8o!~`,Ib~ߺ]q4-ϲ G :O5GBE#h]›o*#K.ЀO&2J!Swɧ@@c$-괟?Oi=q'ɏ^.Ȼ[0Xnplt  Tl֔JGQ2%ȗy490ʼn4$레k"d%O!砨RN 뉨x퉄!^N*~A@e3'Dvn<8)w3;}3V3 Nsgw_n]2 TNuD$a qgN*4,Z 6u䠒3eI;;"o6Ğ {G +-uaaQ :m?/Ζ?, N6M)ɏF^L9/gzpF׼96w>]^e+V]Cl[mȷS\+Clź'=9{ y]nROIsc0Uq}F%wx ԚHNN' Ohp^E+/CL!OAfg+됯ry#^ys(kqG_Y%`V&ےxh^XիU:a}PʨSrV&8^CF&8컞{&BG TT `4v(z8YilX) jfZJBϓ\g)IGSku_6? >IT.׳C;Ts.w>I~jh棞s6nP/%8 [=ǃL hpR[aBRYݵIXS,+tiNltq GLFJ8y*u5.թʎmRIӵKl.=͂s%M/E AQ ,aQ+bNMdZIb: v aq˂Ty,5?\A"Aש*Lݞ b{w(\ۥ@JkAK¦FEL'MDf8YmP2):Rm͛Yi:3Þ_F ϫ}/@iϦӯӒJޅ/Ծ91%u0%T|xG7(& E>XEVtV&!yOjb;"bis ; hM'&5!|u4qR͚4} p9UL&" x ;ES1[0xر©.uϳ ;p,8Fc@_wMo©w0dd+q,˖h*~y 5_fV\l0/y.j@(L0u}eGU_Sco+aҼ{뺏 fo>VPcoou#ls|cei1čy܅Bfs @^'T#vgMFTy0!߅yeGu;ޠOq'Utl#c[SN9p\,W'$v)_JK3DK)eq'|{.gvQ l cH]EtSivZ(;B=.UulU1vp֢eV]ɳlab`w!YsQtl0^U҉8t*8߰٬C]=MyS 96QȎCraPatJ j>A.0c0H>j0fW-ۆ\q9zMb˦.7| U^p.enG#  FOq kX})kЂInF`YSA>?st8N[y>bݪw Q'gce?` #--gbxpvw̍3*C_@ 7?Ԥ<7`=so>=/UXO+$mb;s}-~͛_;pw^}[bzvijF$r_,T{SUנ !?1K8ꃊ@Xlӯ@VAp06Fј" =gFs얚=<}o{L߇ʌ T;R8Fm _f &N?C]×TvNɨ 1Z}pwg*ccʼnsphpհc1壶Q;h{XmbG5>-~<ոM%Óuk٨8OUi~6_0j6_5j͟{]f6A?λ(r'?2? bayestestR/tests/0000755000176200001440000000000013421102270013531 5ustar liggesusersbayestestR/tests/testthat/0000755000176200001440000000000013620704270015402 5ustar liggesusersbayestestR/tests/testthat/test-bayesfactor_parameters.R0000644000176200001440000000703313614722624023240 0ustar liggesusersif (require("rstanarm") && require("BayesFactor") && require("testthat") && require("brms")) { context("bayesfactor_parameters") test_that("bayesfactor_parameters numeric", { testthat::skip_on_cran() set.seed(444) Xprior <- rnorm(1000) Xposterior <- rnorm(1000, 0.7, 0.2) # point bfsd <- bayestestR::bayesfactor_parameters(Xposterior, prior = Xprior, null = 0, direction = 0) testthat::expect_equal(log(bfsd$BF), 3.7, tolerance = 0.1) bfsd <- bayestestR::bayesfactor_parameters(Xposterior, prior = Xprior, null = 0, direction = 1) testthat::expect_equal(log(bfsd$BF), 4.3, tolerance = 0.1) bfsd <- bayestestR::bayesfactor_parameters(Xposterior, prior = Xprior, null = 0, direction = -1) testthat::expect_equal(log(bfsd$BF), -2.5, tolerance = 0.1) bfsd <- bayestestR::bayesfactor_parameters(Xposterior, prior = Xprior, null = 1, direction = 0) testthat::expect_equal(log(bfsd$BF), -0.84, tolerance = 0.1) testthat::expect_warning(bfsd <- bayestestR::bayesfactor_parameters(Xposterior)) testthat::expect_equal(log(bfsd$BF), 0, tolerance = 0.1) # interval bfsd <- bayestestR::bayesfactor_parameters(Xposterior, prior = Xprior, null = c(-.1, .1), direction = 0) testthat::expect_equal(log(bfsd$BF), 3.7, tolerance = 0.1) bfsd <- bayestestR::bayesfactor_parameters(Xposterior, prior = Xprior, null = c(-.1, .1), direction = 1) testthat::expect_equal(log(bfsd$BF), 4.3, tolerance = 0.1) bfsd <- bayestestR::bayesfactor_parameters(Xposterior, prior = Xprior, null = c(-.1, .1), direction = -1) testthat::expect_equal(log(bfsd$BF), -3.88, tolerance = 0.1) # interval with inf bfsd <- bayestestR::bayesfactor_parameters(Xposterior, prior = Xprior, null = c(-.1, Inf)) testthat::expect_equal(log(bfsd$BF), -7.94, tolerance = 0.1) bfsd <- bayestestR::bayesfactor_parameters(Xposterior, prior = Xprior, null = c(-Inf, .1)) testthat::expect_equal(log(bfsd$BF), 5.97, tolerance = 0.1) }) test_that("bayesfactor_parameters RSTANARM", { testthat::skip_on_cran() library(rstanarm) set.seed(333) model <- stan_glm(extra ~ group, data = sleep, refresh = 0) bfsd <- bayestestR::bayesfactor_parameters(model) testthat::expect_equal(log(bfsd$BF), c(-2.69, -0.14), tolerance = 0.2) bfsd <- bayestestR::bayesfactor_parameters(model, null = rope_range(model)) testthat::expect_equal(log(bfsd$BF), c(-2.96, -0.18), tolerance = 0.2) model_p <- update(model, prior_PD = TRUE, refresh = 0) bfsd <- bayestestR::bayesfactor_parameters(model, model_p) testthat::expect_equal(log(bfsd$BF), c(-2.69, -0.14), tolerance = 0.2) model_flat <- stan_glm(extra ~ group, data = sleep, prior = NULL, refresh = 0) testthat::expect_error(bayesfactor_parameters(model_flat)) }) test_that("bayesfactor_parameters BRMS", { testthat::skip_on_cran() testthat::skip_on_travis() library(brms) brms_mixed_6 <- insight::download_model("brms_mixed_6") set.seed(222) bfsd <- bayestestR::bayesfactor_parameters(brms_mixed_6, effects = "fixed") testthat::expect_equal(log(bfsd$BF), c(-6.0, -5.8, 0.7, -2.7, -7.4), tolerance = 0.2) bfsd <- bayestestR::bayesfactor_parameters(brms_mixed_6, null = rope_range(brms_mixed_6)) testthat::expect_equal(log(bfsd$BF), c(-6.33, -12.8, -36.48, -2.6, -29.88), tolerance = 0.2) brms_mixed_1 <- insight::download_model("brms_mixed_1") testthat::expect_error(bayesfactor_parameters(brms_mixed_1)) }) }bayestestR/tests/testthat/test-estimate_density.R0000644000176200001440000000147113614722624022065 0ustar liggesusersif (require("logspline") && require("KernSmooth") && require("mclust")) { context("estimate_density") test_that("estimate_density", { library(logspline) library(KernSmooth) library(mclust) set.seed(333) x <- distribution_normal(500, 1) # Methods density_kernel <- estimate_density(x, method = "kernel") density_logspline <- estimate_density(x, method = "logspline") density_KernSmooth <- estimate_density(x, method = "KernSmooth") density_mixture <- estimate_density(x, method = "mixture") testthat::expect_equal(mean(density_kernel$y - density_logspline$y), 0, tol = 0.1) testthat::expect_equal(mean(density_kernel$y - density_KernSmooth$y), 0, tol = 0.1) testthat::expect_equal(mean(density_kernel$y - density_mixture$y), 0, tol = 0.1) }) }bayestestR/tests/testthat/test-mhdior.R0000644000176200001440000000136413571077272020002 0ustar liggesuserscontext("mhdior") test_that("mhdior", { testthat::expect_equal(as.numeric(mhdior(x = distribution_normal(1000, mean = 5, sd = 1), range = c(-0.1, 0.1))), 1, tolerance = 0.01) testthat::expect_equal(as.numeric(mhdior(x = distribution_normal(1000, mean = 1, sd = 1), range = c(-0.1, 0.1))), 0.631, tolerance = 0.01) testthat::expect_equal(as.numeric(mhdior(x = distribution_normal(1000, mean = -1, sd = 1), range = c(-0.1, 0.1))), 0.631, tolerance = 0.01) testthat::expect_equal(as.numeric(mhdior(x = distribution_normal(1000, mean = 0, sd = 1), range = c(-0.1, 0.1))), -0.079, tolerance = 0.01) testthat::expect_equal(as.numeric(mhdior(x = distribution_normal(1000, mean = 0, sd = 0.01), range = c(-0.1, 0.1))), -1, tolerance = 0.01) }) bayestestR/tests/testthat/test-si.R0000644000176200001440000000244413611741734017127 0ustar liggesusersif (requireNamespace("rstanarm", quietly = TRUE)) { context("si") test_that("si.numeric", { set.seed(333) prior <- distribution_normal(1000, mean = 0, sd = 1) posterior <- distribution_normal(1000, mean = .5, sd = .3) res <- si(posterior, prior) testthat::expect_equal(res$CI_low, 0.039, tolerance = 0.02) testthat::expect_equal(res$CI_high, 1.053, tolerance = 0.02) testthat::expect_is(res,c("bayestestR_si")) res <- si(posterior, prior, BF = 3) testthat::expect_equal(res$CI_low, 0.333, tolerance = 0.02) testthat::expect_equal(res$CI_high, 0.759, tolerance = 0.02) res <- si(posterior, prior, BF = 100) testthat::expect_true(all(is.na(res$CI_low))) testthat::expect_true(all(is.na(res$CI_high))) }) test_that("si.rstanarm", { testthat::skip_on_cran() testthat::skip_on_travis() set.seed(333) library(rstanarm) contrasts(sleep$group) <- contr.bayes # see vingette stan_model <- stan_lmer(extra ~ group + (1 | ID), data = sleep, refresh = 0) res <- si(stan_model, verbose = FALSE) testthat::expect_equal(res$CI_low, c(-0.057, 0.417), tolerance = 0.02) testthat::expect_equal(res$CI_high, c(3.086,1.819), tolerance = 0.02) testthat::expect_is(res,c("bayestestR_si")) }) }bayestestR/tests/testthat/test-rstanarm.R0000644000176200001440000000357313611741734020347 0ustar liggesusersif (requireNamespace("rstanarm", quietly = TRUE)) { context("rstanarm") test_that("rstanarm", { testthat::skip_on_cran() set.seed(333) library(rstanarm) model <- insight::download_model("stanreg_lm_1") testthat::expect_equal(rope_range(model)[1], -0.602, tol = 0.1) model <- insight::download_model("stanreg_meanfield_lm_1") testthat::expect_equal(rope_range(model)[1], -0.602, tol = 0.1) model <- insight::download_model("stanreg_fullrank_lm_1") testthat::expect_equal(rope_range(model)[1], -0.602, tol = 0.1) model <- insight::download_model("stanreg_lmerMod_1") testthat::expect_equal(rope_range(model)[1], -0.097, tol = 0.1) model <- insight::download_model("stanreg_glm_1") testthat::expect_equal(rope_range(model)[1], -0.18, tol = 0.1) model <- insight::download_model("stanreg_merMod_1") testthat::expect_equal(rope_range(model)[1], -0.18, tol = 0.1) model <- insight::download_model("stanreg_gamm4_1") testthat::expect_equal(rope_range(model)[1], -0.043, tol = 0.1) model <- insight::download_model("stanreg_gam_1") params <- describe_posterior(model, centrality = "all", test = "all", dispersion = TRUE) testthat::expect_equal(c(nrow(params), ncol(params)), c(4, 22)) testthat::expect_is(hdi(model), "data.frame") testthat::expect_is(ci(model), "data.frame") testthat::expect_is(rope(model), "data.frame") # testthat::expect_true("equivalence_test" %in% class(equivalence_test(model))) testthat::expect_is(map_estimate(model), "data.frame") testthat::expect_is(p_map(model), "data.frame") testthat::expect_is(mhdior(model), "data.frame") testthat::expect_is(p_direction(model), "data.frame") # testthat::expect_error(equivalence_test(model, range = c(.1, .3, .5))) # print(equivalence_test(model, ci = c(.1, .3, .5))) }) }bayestestR/tests/testthat/test-p_direction.R0000644000176200001440000000237513611741734021016 0ustar liggesusersif (requireNamespace("rstanarm", quietly = TRUE)) { context("p_direction") test_that("p_direction", { set.seed(333) x <- bayestestR::distribution_normal(10000, 1, 1) pd <- bayestestR::p_direction(x) testthat::expect_equal(as.numeric(pd), 0.842, tolerance = 0.1) testthat::expect_equal(as.numeric(p_direction(x, method = "kernel")), 0.842, tolerance = 0.1) testthat::expect_equal(nrow(p_direction(data.frame(replicate(4, rnorm(100))))), 4) testthat::expect_is(pd, "p_direction") testthat::expect_equal(tail(capture.output(print(pd)), 1), "pd = 84.14%") }) if (require("insight")) { m <- insight::download_model("stanreg_merMod_5") p <- insight::get_parameters(m, effects = "all") testthat::test_that("p_direction", { testthat::expect_equal( p_direction(m, effects = "all")$pd, p_direction(p)$pd, tolerance = 1e-3 ) }) m <- insight::download_model("brms_zi_3") p <- insight::get_parameters(m, effects = "all", component = "all") testthat::test_that("p_direction", { testthat::expect_equal( p_direction(m, effects = "all", component = "all")$pd, p_direction(p)$pd, tolerance = 1e-3 ) }) } }bayestestR/tests/testthat/test-hdi.R0000644000176200001440000000325013611741734017254 0ustar liggesusersif (requireNamespace("rstanarm", quietly = TRUE)) { context("hdi") test_that("hdi", { testthat::expect_equal(hdi(distribution_normal(1000), ci = .90)$CI_low[1], -1.64, tolerance = 0.02) testthat::expect_equal(nrow(hdi(distribution_normal(1000), ci = c(.80, .90, .95))), 3, tolerance = 0.01) testthat::expect_equal(hdi(distribution_normal(1000), ci = 1)$CI_low[1], -3.09, tolerance = 0.02) testthat::expect_equal(length(capture.output(print(hdi(distribution_normal(1000))))), 6) testthat::expect_equal(length(capture.output(print(hdi(distribution_normal(1000), ci = c(.80, .90))))), 12) testthat::expect_warning(hdi(c(2, 3, NA))) testthat::expect_warning(hdi(c(2, 3))) testthat::expect_warning(hdi(distribution_normal(1000), ci = 0.0000001)) testthat::expect_warning(hdi(distribution_normal(1000), ci = 950)) testthat::expect_warning(hdi(c(distribution_normal(1000, 0, 1), distribution_normal(1000, 6, 1), distribution_normal(1000, 12, 1)), ci = .10)) }) if (require("insight")) { m <- insight::download_model("stanreg_merMod_5") p <- insight::get_parameters(m, effects = "all") test_that("ci", { testthat::expect_equal( hdi(m, ci = c(.5, .8), effects = "all")$CI_low, hdi(p, ci = c(.5, .8))$CI_low, tolerance = 1e-3 ) }) m <- insight::download_model("brms_zi_3") p <- insight::get_parameters(m, effects = "all", component = "all") test_that("rope", { testthat::expect_equal( hdi(m, ci = c(.5, .8), effects = "all", component = "all")$CI_low, hdi(p, ci = c(.5, .8))$CI_low, tolerance = 1e-3 ) }) } }bayestestR/tests/testthat/test-distributions.R0000644000176200001440000000361613552545342021421 0ustar liggesuserscontext("distributions") test_that("distributions", { testthat::expect_equal(mean(bayestestR::distribution_normal(10)), 0, tolerance = 0.01) testthat::expect_equal(length(bayestestR::distribution_normal(10, random = TRUE)), 10, tolerance = 0.01) testthat::expect_equal(mean(bayestestR::distribution_beta(10, 1, 1)), 0.5, tolerance = 0.01) testthat::expect_equal(length(bayestestR::distribution_normal(10, 1, 1, random = TRUE)), 10, tolerance = 0.01) testthat::expect_equal(mean(bayestestR::distribution_binomial(10, 0, 0.5)), 0, tolerance = 0.01) testthat::expect_equal(length(bayestestR::distribution_binomial(10, 0, 0.5, random = TRUE)), 10, tolerance = 0.01) testthat::expect_equal(mean(bayestestR::distribution_cauchy(10)), 0, tolerance = 0.01) testthat::expect_equal(length(bayestestR::distribution_cauchy(10, random = TRUE)), 10, tolerance = 0.01) testthat::expect_equal(mean(bayestestR::distribution_chisquared(10, 1)), 0.778, tolerance = 0.01) testthat::expect_equal(length(bayestestR::distribution_chisquared(10, 1, random = TRUE)), 10, tolerance = 0.01) testthat::expect_equal(mean(bayestestR::distribution_gamma(10, 1)), 0.874, tolerance = 0.01) testthat::expect_equal(length(bayestestR::distribution_gamma(10, 1, random = TRUE)), 10, tolerance = 0.01) testthat::expect_equal(mean(bayestestR::distribution_poisson(10)), 0.8, tolerance = 0.01) testthat::expect_equal(length(bayestestR::distribution_poisson(10, random = TRUE)), 10, tolerance = 0.01) testthat::expect_equal(mean(bayestestR::distribution_student(10, 1)), 0, tolerance = 0.01) testthat::expect_equal(length(bayestestR::distribution_student(10, 1, random = TRUE)), 10, tolerance = 0.01) testthat::expect_equal(mean(bayestestR::distribution_uniform(10)), 0.5, tolerance = 0.01) testthat::expect_equal(length(bayestestR::distribution_uniform(10, random = TRUE)), 10, tolerance = 0.01) }) bayestestR/tests/testthat/test-as.data.frame.density.R0000644000176200001440000000024613502673052022570 0ustar liggesuserscontext("as.data.frame.density") test_that("as.data.frame.density", { testthat::expect_is(as.data.frame(density(distribution_normal(1000))), "data.frame") }) bayestestR/tests/testthat/test-describe_posterior.R0000644000176200001440000001174513614722624022406 0ustar liggesusersif (require("rstanarm") && require("brms")) { context("describe_posterior") test_that("describe_posterior", { set.seed(333) # Numeric x <- distribution_normal(1000) rez <- testthat::expect_warning(describe_posterior(x, centrality = "all", dispersion = TRUE, test = "all")) testthat::expect_equal(dim(rez), c(1, 19)) testthat::expect_equal(colnames(rez), c("Parameter", "Median", "MAD", "Mean", "SD", "MAP", "CI", "CI_low", "CI_high", "p_map", "pd", "p_ROPE", "ps", "ROPE_CI", "ROPE_low", "ROPE_high", "ROPE_Percentage", "ROPE_Equivalence", "BF")) rez <- testthat::expect_warning(describe_posterior(x, centrality = "all", dispersion = TRUE, test = "all", ci = c(0.8, 0.9))) testthat::expect_equal(dim(rez), c(2, 19)) rez <- describe_posterior(x, centrality = NULL, dispersion = TRUE, test = NULL, ci_method = "quantile") testthat::expect_equal(dim(rez), c(1, 4)) # Dataframes x <- data.frame(replicate(4, rnorm(100))) rez <- testthat::expect_warning(describe_posterior(x, centrality = "all", dispersion = TRUE, test = "all")) testthat::expect_equal(dim(rez), c(4, 19)) rez <- testthat::expect_warning(describe_posterior(x, centrality = "all", dispersion = TRUE, test = "all", ci = c(0.8, 0.9))) testthat::expect_equal(dim(rez), c(8, 19)) rez <- describe_posterior(x, centrality = NULL, dispersion = TRUE, test = NULL, ci_method = "quantile") testthat::expect_equal(dim(rez), c(4, 4)) # Rstanarm library(rstanarm) x <- insight::download_model("stanreg_lm_1") rez <- describe_posterior(x, centrality = "all", dispersion = TRUE, test = "all") testthat::expect_equal(dim(rez), c(2, 21)) testthat::expect_equal(colnames(rez), c("Parameter", "Median", "MAD", "Mean", "SD", "MAP", "CI", "CI_low", "CI_high", "p_MAP", "pd", "p_ROPE", "ps", "ROPE_CI", "ROPE_low", "ROPE_high", "ROPE_Percentage", "ROPE_Equivalence", "BF", "Rhat", "ESS")) rez <- describe_posterior(x, centrality = "all", dispersion = TRUE, test = "all", ci = c(0.8, 0.9)) testthat::expect_equal(dim(rez), c(4, 21)) rez <- describe_posterior(x, centrality = NULL, dispersion = TRUE, test = NULL, ci_method = "quantile", diagnostic = NULL, priors = FALSE) testthat::expect_equal(dim(rez), c(2, 4)) # Brms library(brms) x <- insight::download_model("brms_mixed_1") rez <- describe_posterior(x, centrality = "all", dispersion = TRUE, ci = c(0.8, 0.9)) testthat::expect_equal(dim(rez), c(4, 16)) testthat::expect_equal(colnames(rez), c("Parameter", "Median", "MAD", "Mean", "SD", "MAP", "CI", "CI_low", "CI_high", "pd", "ROPE_CI", "ROPE_low", "ROPE_high", "ROPE_Percentage", "ESS", "Rhat")) rez <- describe_posterior(x, centrality = NULL, dispersion = TRUE, test = NULL, ci_method = "quantile", diagnostic = NULL) testthat::expect_equal(dim(rez), c(2, 4)) # BayesFactor # library(BayesFactor) # x <- BayesFactor::ttestBF(x = rnorm(100, 1, 1)) # rez <- describe_posterior(x, centrality = "all", dispersion = TRUE, test = "all") # testthat::expect_equal(dim(rez), c(4, 16)) # rez <- describe_posterior(x, centrality = "all", dispersion = TRUE, test = "all", ci = c(0.8, 0.9)) # testthat::expect_equal(dim(rez), c(8, 16)) # rez <- describe_posterior(x, centrality = NULL, dispersion = TRUE, test = NULL, ci_method="quantile") # testthat::expect_equal(dim(rez), c(4, 4)) }) if (require("insight")) { m <- insight::download_model("stanreg_merMod_5") p <- insight::get_parameters(m, effects = "all") test_that("describe_posterior", { testthat::expect_equal( describe_posterior(m, effects = "all")$Median, describe_posterior(p)$Median, tolerance = 1e-3 ) }) m <- insight::download_model("brms_zi_3") p <- insight::get_parameters(m, effects = "all", component = "all") test_that("describe_posterior", { testthat::expect_equal( describe_posterior(m, effects = "all", component = "all")$Median, describe_posterior(p)$Median, tolerance = 1e-3 ) }) } test_that("describe_posterior w/ BF+SI", { testthat::skip_on_cran() testthat::skip_on_travis() x <- insight::download_model("stanreg_lm_1") set.seed(555) rez <- describe_posterior(x, ci_method = "SI", test = "bf") # test si set.seed(555) rez_si <- si(x) testthat::expect_equal(rez$CI_low, rez_si$CI_low, tolerance = 0.1) testthat::expect_equal(rez$CI_high, rez_si$CI_high, tolerance = 0.1) # test BF set.seed(555) rez_bf <- bayesfactor_parameters(x) testthat::expect_equal(rez$BF, rez_bf$BF, tolerance = 0.1) }) }bayestestR/tests/testthat/test-density_at.R0000644000176200001440000000037313537043657020664 0ustar liggesuserscontext("density_at") test_that("density_at", { testthat::expect_equal(density_at(distribution_normal(1000), 0), 0.389, tolerance = 0.01) testthat::expect_equal(density_at(distribution_normal(1000), c(0, 1))[1], 0.389, tolerance = 0.01) }) bayestestR/tests/testthat/test-simulate_data.R0000644000176200001440000000145313552545354021333 0ustar liggesuserscontext("simulate_data") test_that("simulate_correlation", { set.seed(333) data <- simulate_correlation(r = 0.5, n = 50) testthat::expect_equal(as.numeric(cor.test(data$V1, data$V2)$estimate), 0.5, tol = 0.001) data <- simulate_correlation(r = 0.5, n = 50, mean = c(0, 1), sd = c(0.7, 1.7)) testthat::expect_equal(as.numeric(cor.test(data$V1, data$V2)$estimate), 0.5, tol = 0.001) testthat::expect_equal(c(mean(data$V1), sd(data$V1)), c(0, 0.7), tol = 0.001) testthat::expect_equal(c(mean(data$V2), sd(data$V2)), c(1, 1.7), tol = 0.001) cor_matrix <- matrix(c( 1.0, 0.2, 0.4, 0.2, 1.0, 0.3, 0.4, 0.3, 1.0 ), nrow = 3 ) data <- simulate_correlation(r = cor_matrix) testthat::expect_equal(matrix(cor(data), nrow = 3), cor_matrix, tol = 0.001) }) bayestestR/tests/testthat/test-p_map.R0000644000176200001440000000216213611741734017605 0ustar liggesusersif (requireNamespace("rstanarm", quietly = TRUE)) { context("p_map") test_that("p_map", { testthat::expect_equal(as.numeric(p_map(distribution_normal(1000))), 1, tolerance = 0.01) testthat::expect_equal(as.numeric(p_map(distribution_normal(1000, 1, 1))), 0.62, tolerance = 0.01) testthat::expect_equal(as.numeric(p_map(distribution_normal(1000, 2, 1))), 0.15, tolerance = 0.01) testthat::expect_equal(as.numeric(p_map(distribution_normal(1000, 3, 0.01))), 0, tolerance = 0.01) }) if (require("insight")) { m <- insight::download_model("stanreg_merMod_5") p <- insight::get_parameters(m, effects = "all") test_that("p_map", { testthat::expect_equal( p_map(m, effects = "all")$p_MAP, p_map(p)$p_MAP, tolerance = 1e-3 ) }) m <- insight::download_model("brms_zi_3") p <- insight::get_parameters(m, effects = "all", component = "all") test_that("p_map", { testthat::expect_equal( p_map(m, effects = "all", component = "all")$p_MAP, p_map(p)$p_MAP, tolerance = 1e-3 ) }) } }bayestestR/tests/testthat/test-bayesfactor_models.R0000644000176200001440000001124113614722624022354 0ustar liggesusersif (require("rstanarm") && require("BayesFactor") && require("testthat") && require("brms")) { context("bayesfactor_models + bayesfactor_inclusion") # bayesfactor_models ------------------------------------------------------ set.seed(444) mo1 <- lme4::lmer(Sepal.Length ~ (1 | Species), data = iris) mo2 <- lme4::lmer(Sepal.Length ~ Petal.Length + (1 | Species), data = iris) mo3 <- lme4::lmer(Sepal.Length ~ Petal.Length + (Petal.Length | Species), data = iris) mo4 <- lme4::lmer(Sepal.Length ~ Petal.Length + Petal.Width + (Petal.Length | Species), data = iris) mo5 <- lme4::lmer(Sepal.Length ~ Petal.Length * Petal.Width + (Petal.Length | Species), data = iris) mo4_e <- lme4::lmer(Sepal.Length ~ Petal.Length + Petal.Width + (Petal.Length | Species), data = iris[-1, ]) # both uses of denominator BFM1 <- bayestestR::bayesfactor_models(mo2, mo3, mo4, mo1, denominator = 4) BFM2 <- bayestestR::bayesfactor_models(mo2, mo3, mo4, denominator = mo1) BFM3 <- bayestestR::bayesfactor_models(mo2, mo3, mo4, mo1, denominator = mo1) BFM4 <- bayestestR::bayesfactor_models(mo2, mo3, mo4, mo5, mo1, denominator = mo1) test_that("bayesfactor_models BIC", { set.seed(444) testthat::expect_equal(BFM1, BFM2) testthat::expect_equal(BFM1, BFM3) # only on same data! testthat::expect_error(bayestestR::bayesfactor_models(mo1, mo2, mo4_e)) # update models testthat::expect_equal(log(update(BFM2, subset = c(1, 2))$BF), c(0, 57.3, 54.52), tolerance = 0.1) # update reference testthat::expect_equal(log(update(BFM2, reference = 1)$BF), c(0, -2.8, -6.2, -57.4), tolerance = 0.1) }) test_that("bayesfactor_models RSTANARM", { testthat::skip("Skipping bayesfactor_models RSTANARM") library(rstanarm) set.seed(444) stan_bf_0 <- stan_glm(Sepal.Length ~ 1, data = iris, refresh = 0, diagnostic_file = file.path(tempdir(), "df0.csv") ) stan_bf_1 <- stan_glm(Sepal.Length ~ Species, data = iris, refresh = 0, diagnostic_file = file.path(tempdir(), "df1.csv") ) testthat::expect_warning(bayestestR::bayesfactor_models(stan_bf_0, stan_bf_1)) stan_models <- suppressWarnings(bayestestR::bayesfactor_models(stan_bf_0, stan_bf_1)) testthat::expect_is(stan_models, "bayesfactor_models") testthat::expect_equal(log(stan_models$BF), c(0, 65.19), tolerance = 0.1) }) test_that("bayesfactor_models BRMS", { testthat::skip("Skipping bayesfactor_models BRMS") set.seed(444) brms_4bf_1 <- insight::download_model("brms_4bf_1") brms_4bf_2 <- insight::download_model("brms_4bf_2") brms_4bf_3 <- insight::download_model("brms_4bf_3") brms_4bf_4 <- insight::download_model("brms_4bf_4") brms_4bf_5 <- insight::download_model("brms_4bf_5") library(brms) brms_models <- suppressWarnings(bayestestR::bayesfactor_models(brms_4bf_1, brms_4bf_2, brms_4bf_3, brms_4bf_4, brms_4bf_5)) testthat::expect_warning(bayestestR::bayesfactor_models(brms_4bf_1, brms_4bf_2)) testthat::expect_is(brms_models, "bayesfactor_models") testthat::expect_equal(log(brms_models$BF), c(0, 68.5, 102.5, 128.6, 128.8), tolerance = 0.1) }) # bayesfactor_inclusion --------------------------------------------------- test_that("bayesfactor_inclusion", { set.seed(444) # BayesFactor ToothGrowth$dose <- as.factor(ToothGrowth$dose) BF_ToothGrowth <- BayesFactor::anovaBF(len ~ dose * supp, ToothGrowth) testthat::expect_equal( bayestestR::bayesfactor_inclusion(BF_ToothGrowth), bayestestR::bayesfactor_inclusion(bayestestR::bayesfactor_models(BF_ToothGrowth)) ) # with random effects in all models: testthat::expect_true(is.nan(bayestestR::bayesfactor_inclusion(BFM1)[1, "BF"])) bfinc_all <- bayestestR::bayesfactor_inclusion(BFM4, match_models = FALSE) testthat::expect_equal(bfinc_all$p_prior, c(1, 0.8, 0.6, 0.4, 0.2), tolerance = 0.1) testthat::expect_equal(bfinc_all$p_posterior, c(1, 1, 0.06, 0.01, 0), tolerance = 0.1) testthat::expect_equal(log(bfinc_all$BF), c(NaN, 56.04, -3.22, -5.9, -8.21), tolerance = 0.1) # + match_models bfinc_matched <- bayestestR::bayesfactor_inclusion(BFM4, match_models = TRUE) testthat::expect_equal(bfinc_matched$p_prior, c(1, 0.2, 0.6, 0.2, 0.2), tolerance = 0.1) testthat::expect_equal(bfinc_matched$p_posterior, c(1, 0.94, 0.06, 0.01, 0), tolerance = 0.1) testthat::expect_equal(log(bfinc_matched$BF), c(NaN, 57.37, -3.92, -5.25, -3.25), tolerance = 0.1) }) }bayestestR/tests/testthat/test-map_estimate.R0000644000176200001440000000211713611741734021161 0ustar liggesusersif (requireNamespace("rstanarm", quietly = TRUE)) { context("map_estimate") test_that("map_estimate", { testthat::expect_equal( as.numeric(map_estimate(distribution_normal(1000))), 0, tolerance = 0.01 ) }) if (require("insight")) { m <- insight::download_model("stanreg_merMod_5") test_that("map_estimate", { testthat::expect_equal( map_estimate(m, effects = "all")$Parameter, colnames(as.data.frame(m))[1:20] ) }) m <- insight::download_model("brms_zi_3") test_that("map_estimate", { testthat::expect_equal( map_estimate(m, effects = "all", component = "all")$Parameter, c( "b_Intercept", "b_child", "b_camper", "r_persons.1.Intercept.", "r_persons.2.Intercept.", "r_persons.3.Intercept.", "r_persons.4.Intercept.", "b_zi_Intercept", "b_zi_child", "b_zi_camper", "r_persons__zi.1.Intercept.", "r_persons__zi.2.Intercept.", "r_persons__zi.3.Intercept.", "r_persons__zi.4.Intercept." ) ) }) } }bayestestR/tests/testthat/test-p_significance.R0000644000176200001440000000144213611741734021452 0ustar liggesusersif (requireNamespace("rstanarm", quietly = TRUE)) { context("p_significance") test_that("p_significance", { set.seed(333) x <- bayestestR::distribution_normal(10000, 1, 1) ps <- bayestestR::p_significance(x) testthat::expect_equal(as.numeric(ps), 0.816, tolerance = 0.1) testthat::expect_equal(nrow(p_significance(data.frame(replicate(4, rnorm(100))))), 4) testthat::expect_is(ps, "p_significance") testthat::expect_equal(tail(capture.output(print(ps)), 1), "ps [0.10] = 81.60%") }) if (require("insight")) { m <- insight::download_model("stanreg_merMod_5") p <- insight::get_parameters(m, effects = "all") testthat::expect_equal( p_significance(m, effects = "all")$ps[1], 0.988, tolerance = 1e-3 ) } }bayestestR/tests/testthat/test-overlap.R0000644000176200001440000000033713552545346020170 0ustar liggesuserscontext("overlap") test_that("overlap", { set.seed(333) x <- distribution_normal(1000, 2, 0.5) y <- distribution_normal(1000, 0, 1) testthat::expect_equal(as.numeric(overlap(x, y)), 0.185, tol = 0.01) }) bayestestR/tests/testthat/test-brms.R0000644000176200001440000000202513613227664017455 0ustar liggesusersif (requireNamespace("brms", quietly = TRUE)) { context("brms") test_that("brms", { # testthat::skip_on_travis() testthat::skip_on_cran() set.seed(333) library(brms) model <- insight::download_model("brms_mixed_1") testthat::expect_is(hdi(model), "data.frame") testthat::expect_is(ci(model), "data.frame") testthat::expect_is(rope(model), "data.frame") # testthat::expect_true("equivalence_test" %in% class(equivalence_test(model))) testthat::expect_is(map_estimate(model), "data.frame") testthat::expect_is(p_map(model), "data.frame") testthat::expect_is(mhdior(model), "data.frame") testthat::expect_is(p_direction(model), "data.frame") testthat::expect_equal(colnames(hdi(model)), c("Parameter", "CI", "CI_low", "CI_high", "Effects", "Component")) testthat::expect_equal(colnames(hdi(model, effects = "all")), c("Parameter", "CI", "CI_low", "CI_high", "Effects", "Component")) # testthat::expect_equal(nrow(equivalence_test(model)), 2) }) }bayestestR/tests/testthat/test-weighted_posteriors.R0000644000176200001440000000210713613227664022604 0ustar liggesusersif (requireNamespace("brms", quietly = TRUE)) { context("weighted_posteriors") test_that("weighted_posteriors vs posterior_average", { testthat::skip_on_cran() testthat::skip_on_travis() library(brms) fit1 <- brm(rating ~ treat + period + carry, data = inhaler, refresh = 0, save_all_pars = TRUE) fit2 <- brm(rating ~ period + carry, data = inhaler, refresh = 0, save_all_pars = TRUE) set.seed(444) res_BT <- weighted_posteriors(fit1, fit2) set.seed(444) res_brms <- brms::posterior_average(fit1, fit2, weights = "bma", missing = 0) res_brms <- res_brms[, 1:4] res_BT1 <- eti(res_BT) res_brms1 <- eti(res_brms) testthat::expect_equal(res_BT1$Parameter, res_brms1$Parameter) testthat::expect_equal(res_BT1$CI, res_brms1$CI) testthat::expect_equal(res_BT1$CI_low, res_brms1$CI_low) testthat::expect_equal(res_BT1$CI_high, res_brms1$CI_high) # plot(res_brms1) # plot(res_BT1) }) }bayestestR/tests/testthat/test-BFBayesFactor.R0000644000176200001440000000506113613227664021127 0ustar liggesusersif (requireNamespace("BayesFactor", quietly = TRUE)) { library(BayesFactor) set.seed(333) context("BF correlation") x <- BayesFactor::correlationBF(y = iris$Sepal.Length, x = iris$Sepal.Width) test_that("p_direction", { testthat::skip_on_travis() # Until insight v3 is released expect_equal(as.numeric(p_direction(x)), 0.9225, tol = 1) }) # --------------------------- context("BF t.test one sample") data(sleep) diffScores <- sleep$extra[1:10] - sleep$extra[11:20] x <- BayesFactor::ttestBF(x = diffScores) test_that("p_direction", { testthat::skip_on_travis() # Until insight v3 is released expect_equal(as.numeric(p_direction(x)), 0.99675, tol = 1) }) # --------------------------- context("BF t.test two samples") data(chickwts) chickwts <- chickwts[chickwts$feed %in% c("horsebean", "linseed"), ] chickwts$feed <- factor(chickwts$feed) x <- BayesFactor::ttestBF(formula = weight ~ feed, data = chickwts) test_that("p_direction", { testthat::skip_on_travis() # Until insight v3 is released expect_equal(as.numeric(p_direction(x)), 1, tol = 1) }) # --------------------------- context("BF t.test meta-analytic") t <- c(-.15, 2.39, 2.42, 2.43) N <- c(100, 150, 97, 99) x <- BayesFactor::meta.ttestBF(t = t, n1 = N, rscale = 1) test_that("p_direction", { testthat::skip_on_travis() # Until insight v3 is released expect_equal(as.numeric(p_direction(x)), 0.99975, tol = 1) }) # # --------------------------- # context("BF ANOVA") # data(ToothGrowth) # ToothGrowth$dose <- factor(ToothGrowth$dose) # levels(ToothGrowth$dose) <- c("Low", "Medium", "High") # x <- BayesFactor::anovaBF(len ~ supp*dose, data=ToothGrowth) # test_that("p_direction", { # expect_equal(as.numeric(p_direction(x)), 91.9, tol=0.1) # }) # # # --------------------------- # context("BF ANOVA Random") # data(puzzles) # x <- BayesFactor::anovaBF(RT ~ shape*color + ID, data = puzzles, whichRandom="ID") # test_that("p_direction", { # expect_equal(as.numeric(p_direction(x)), 91.9, tol=0.1) # }) # # # # --------------------------- # context("BF lm") # x <- BayesFactor::lmBF(len ~ supp + dose, data = ToothGrowth) # test_that("p_direction", { # expect_equal(as.numeric(p_direction(x)), 91.9, tol=0.1) # }) # # # x2 <- BayesFactor::lmBF(len ~ supp + dose + supp:dose, data = ToothGrowth) # x <- x / x2 # test_that("p_direction", { # expect_equal(as.numeric(p_direction(x)), 91.9, tol=0.1) # }) }bayestestR/tests/testthat/test-point_estimate.R0000644000176200001440000000137413611741734021541 0ustar liggesusersif (requireNamespace("rstanarm", quietly = TRUE)) { context("point_estimate") if (require("insight")) { m <- insight::download_model("stanreg_merMod_5") p <- insight::get_parameters(m, effects = "all") test_that("point_estimate", { testthat::expect_equal( point_estimate(m, effects = "all")$Median, point_estimate(p)$Median, tolerance = 1e-3 ) }) m <- insight::download_model("brms_zi_3") p <- insight::get_parameters(m, effects = "all", component = "all") test_that("point_estimate", { testthat::expect_equal( point_estimate(m, effects = "all", component = "all")$Median, point_estimate(p)$Median, tolerance = 1e-3 ) }) } }bayestestR/tests/testthat/test-rope.R0000644000176200001440000000511513611741734017457 0ustar liggesusersif (requireNamespace("rstanarm", quietly = TRUE)) { context("rope") test_that("rope", { testthat::expect_equal(as.numeric(rope(distribution_normal(1000, 0, 1))), 0.0898, tolerance = 0.01) testthat::expect_equal(equivalence_test(distribution_normal(1000, 0, 1))$ROPE_Equivalence, "Undecided") testthat::expect_equal(length(capture.output(print(equivalence_test(distribution_normal(1000))))), 9) testthat::expect_equal(length(capture.output(print(equivalence_test(distribution_normal(1000), ci = c(0.8, 0.9))))), 14) testthat::expect_equal(as.numeric(rope(distribution_normal(1000, 2, 0.01))), 0, tolerance = 0.01) testthat::expect_equal(equivalence_test(distribution_normal(1000, 2, 0.01))$ROPE_Equivalence, "Rejected") testthat::expect_equal(as.numeric(rope(distribution_normal(1000, 0, 0.001))), 1, tolerance = 0.01) testthat::expect_equal(equivalence_test(distribution_normal(1000, 0, 0.001))$ROPE_Equivalence, "Accepted") testthat::expect_equal(equivalence_test(distribution_normal(1000, 0, 0.001), ci = 1)$ROPE_Equivalence, "Accepted") # print(rope(rnorm(1000, mean = 0, sd = 3), ci = .5)) testthat::expect_equal(rope(rnorm(1000, mean = 0, sd = 3), ci = c(.1, .5, .9))$CI, c(10, 50, 90)) x <- equivalence_test(distribution_normal(1000, 1, 1), ci = c(.50, .99)) testthat::expect_equal(x$ROPE_Percentage[2], 0.0494, tolerance = 0.01) testthat::expect_equal(x$ROPE_Equivalence[2], "Undecided") testthat::expect_error(rope(distribution_normal(1000, 0, 1), range = c(0.0, 0.1, 0.2))) set.seed(333) testthat::expect_is(rope(distribution_normal(1000, 0, 1)), "rope") testthat::expect_error(rope(distribution_normal(1000, 0, 1), range = c("A", 0.1))) testthat::expect_equal(as.numeric(rope(distribution_normal(1000, 0, 1), range = c(-0.1, 0.1))), 0.0898, tolerance = 0.01) }) if (require("insight")) { m <- insight::download_model("stanreg_merMod_5") p <- insight::get_parameters(m, effects = "all") test_that("rope", { testthat::expect_equal( # fix range to -.1/.1, to compare to data frame method rope(m, range = c(-.1, .1), effects = "all")$ROPE_Percentage, rope(p)$ROPE_Percentage, tolerance = 1e-3 ) }) m <- insight::download_model("brms_zi_3") p <- insight::get_parameters(m, effects = "all", component = "all") test_that("rope", { testthat::expect_equal( rope(m, effects = "all", component = "all")$ROPE_Percentage, rope(p)$ROPE_Percentage, tolerance = 1e-3 ) }) } }bayestestR/tests/testthat/test-bayesfactor_restricted.R0000644000176200001440000000311313614722624023240 0ustar liggesusersif (require("rstanarm") && require("BayesFactor") && require("testthat")) { context("bayesfactor_restricted") test_that("bayesfactor_restricted df", { set.seed(444) prior <- data.frame( X = rnorm(100), X1 = rnorm(100), X3 = rnorm(100) ) posterior <- data.frame( X = rnorm(100, .4, .2), X1 = rnorm(100, -.2, .2), X3 = rnorm(100, .2) ) hyps <- c( "X > X1 & X1 > X3", "X > X1" ) bfr <- bayestestR::bayesfactor_restricted(posterior, hypothesis = hyps, prior = prior) testthat::expect_equal(bfr$Prior_prob, c(1 / 6, 1 / 2), tolerance = 0.1) testthat::expect_equal(bfr$Posterior_prob, c(0.32, 0.99), tolerance = 0.1) testthat::expect_equal(log(bfr$BF), c(0.52, 0.76), tolerance = 0.1) testthat::expect_equal(bfr$BF, bfr$Posterior_prob / bfr$Prior_prob, tolerance = 0.1) testthat::expect_error(bayestestR::bayesfactor_restricted(posterior, prior, hypothesis = "Y < 0")) }) test_that("bayesfactor_restricted RSTANARM", { set.seed(444) library(rstanarm) fit_stan <- stan_glm(mpg ~ wt + cyl + am, data = mtcars, refresh = 0) hyps <- c( "am > 0 & cyl < 0", "cyl < 0", "wt - cyl > 0" ) bfr <- bayestestR::bayesfactor_restricted(fit_stan, hypothesis = hyps) testthat::expect_equal(bfr$Prior_prob, c(1 / 4, 1 / 2, 1 / 2), tolerance = 0.1) testthat::expect_equal(bfr$Posterior_prob, c(.57, 1, .11), tolerance = 0.1) testthat::expect_equal(log(bfr$BF), c(.85, .68, -1.46), tolerance = 0.1) }) }bayestestR/tests/testthat/test-ci.R0000644000176200001440000000322713611741734017107 0ustar liggesusersif (requireNamespace("rstanarm", quietly = TRUE)) { context("ci") test_that("ci", { testthat::expect_equal(ci(distribution_normal(1000), ci = .90)$CI_low[1], -1.6361, tolerance = 0.02) testthat::expect_equal(nrow(ci(distribution_normal(1000), ci = c(.80, .90, .95))), 3, tolerance = 0.01) testthat::expect_equal(ci(distribution_normal(1000), ci = 1)$CI_low[1], -3.09, tolerance = 0.02) # testthat::expect_equal(length(capture.output(print(ci(distribution_normal(1000)))))) # testthat::expect_equal(length(capture.output(print(ci(distribution_normal(1000), ci = c(.80, .90)))))) testthat::expect_warning(ci(c(2, 3, NA))) testthat::expect_warning(ci(c(2, 3))) testthat::expect_warning(ci(distribution_normal(1000), ci = 950)) x <- data.frame(replicate(4, rnorm(100))) x <- ci(x, ci = c(0.68, 0.89, 0.95)) a <- reshape_ci(x) testthat::expect_equal(c(nrow(x), ncol(x)), c(12, 4)) testthat::expect_true(all(reshape_ci(a) == x)) }) if (require("insight")) { m <- insight::download_model("stanreg_merMod_5") p <- insight::get_parameters(m, effects = "all") test_that("ci", { testthat::expect_equal( ci(m, ci = c(.5, .8), effects = "all")$CI_low, ci(p, ci = c(.5, .8))$CI_low, tolerance = 1e-3 ) }) m <- insight::download_model("brms_zi_3") p <- insight::get_parameters(m, effects = "all", component = "all") test_that("rope", { testthat::expect_equal( ci(m, ci = c(.5, .8), effects = "all", component = "all")$CI_low, ci(p, ci = c(.5, .8))$CI_low, tolerance = 1e-3 ) }) } }bayestestR/tests/testthat/test-emmGrid.R0000644000176200001440000001347513614722624020106 0ustar liggesusersif (require("rstanarm") && require("emmeans")) { context("emmGrid_*") library(rstanarm) library(emmeans) set.seed(300) model <- stan_glm(extra ~ group, data = sleep, refresh = 0) em_ <- emmeans(model, ~group) c_ <- pairs(em_) all_ <- rbind(em_, c_) all_summ <- summary(all_) test_that("emmGrid ci", { testthat::skip_on_travis() xci <- ci(all_, ci = 0.9) testthat::expect_equal(xci$CI_low, c(-0.236749774206338, 1.23103419307697, -2.99025704276072), tolerance = 0.2) testthat::expect_equal(xci$CI_high, c(1.83, 3.35, -0.02), tolerance = 0.2) }) test_that("emmGrid equivalence_test", { testthat::skip_on_travis() xeqtest <- equivalence_test(all_, ci = 0.9, range = c(-0.1, 0.1)) testthat::expect_equal(xeqtest$ROPE_Percentage, c(0.0553, 0, 0.0183), tolerance = 0.2) testthat::expect_equal(xeqtest$ROPE_Equivalence, c("Undecided", "Rejected", "Undecided")) }) test_that("emmGrid estimate_density", { testthat::skip_on_travis() xestden <- estimate_density(c_, method = "logspline", precision = 5) testthat::expect_equal(xestden$x, c(-4.67, -2.91, -1.16, 0.60, 2.35), tolerance = 0.2) testthat::expect_equal(log(xestden$y), c(-6.18, -2.12, -0.86, -3.62, -7.90), tolerance = 0.2) }) test_that("emmGrid hdi", { testthat::skip_on_travis() xhdi <- hdi(all_, ci = 0.95) testthat::expect_equal(xhdi$CI_low, c(-0.41, 0.99, -3.23), tolerance = 0.2) testthat::expect_equal(xhdi$CI_high, c(2.06, 3.56, 0.28), tolerance = 0.2) testthat::expect_equal(xhdi$CI_low, all_summ$lower.HPD, tolerance = 0.2) testthat::expect_equal(xhdi$CI_high, all_summ$upper.HPD, tolerance = 0.2) }) test_that("emmGrid p_direction", { testthat::skip_on_travis() xpd <- p_direction(all_, method = "direct") testthat::expect_equal(xpd$pd, c(0.9025, 0.999, 0.952), tolerance = 0.01) }) test_that("emmGrid p_map", { testthat::skip_on_travis() xpmap <- p_map(all_, precision = 2^9) testthat::expect_equal(xpmap$p_MAP, c(0.42, 0, 0.26), tolerance = 0.1) }) test_that("emmGrid mhdior", { testthat::skip_on_travis() xprope <- mhdior(all_, range = c(-0.1, 0.1), precision = 0.5) testthat::expect_equal(xprope$mhdior, c(0.695, 1, 0.87), tolerance = 0.1) }) test_that("emmGrid point_estimate", { testthat::skip_on_travis() xpest <- point_estimate(all_, centrality = "median", dispersion = TRUE) testthat::expect_equal(xpest$Median, c(0.78, 2.29, -1.52), tolerance = 0.1) testthat::expect_equal(xpest$MAD, c(0.60, 0.61, 0.88), tolerance = 0.1) testthat::expect_equal(xpest$Median, all_summ$emmean, tolerance = 0.1) }) test_that("emmGrid rope", { testthat::skip_on_travis() xrope <- rope(all_, range = "default", ci = .9) testthat::expect_equal(xrope$ROPE_Percentage, c(0.0553, 0, 0.0183), tolerance = 0.1) }) test_that("emmGrid bayesfactor_parameters", { testthat::skip_on_travis() testthat::skip_on_cran() set.seed(4) xsdbf <- bayesfactor_parameters(all_, prior = model) testthat::expect_equal(log(xsdbf$BF), c(-2.5756125848835, 1.69713280431204, -0.212277519930343), tolerance = .1) testthat::expect_warning(bayesfactor_parameters(all_)) }) test_that("emmGrid bayesfactor_restricted", { testthat::skip_on_travis() testthat::skip_on_cran() set.seed(4) hyps <- c("`1` < `2`", "`1` < 0") xrbf <- bayesfactor_restricted(em_, prior = model, hypothesis = hyps) testthat::expect_equal(log(xrbf$BF), c(0.667225563308528, -1.62521030757486), tolerance = .1) testthat::expect_equal(xrbf$Prior_prob, c(0.49775, 0.504), tolerance = 1e-2) testthat::expect_equal(xrbf$Posterior_prob, c(0.952, 0.0975), tolerance = 1e-2) testthat::expect_warning(bayesfactor_restricted(em_, hypothesis = hyps)) }) test_that("emmGrid si", { testthat::skip_on_travis() testthat::skip_on_cran() set.seed(4) xrsi <- si(em_, prior = model) testthat::expect_equal(xrsi$CI_low, c(-0.8479125, 0.5738828), tolerance = .1) testthat::expect_equal(xrsi$CI_high, c(2.387275, 4.004303), tolerance = .1) }) test_that("emmGrid describe_posterior", { testthat::skip_on_travis() testthat::skip_on_cran() set.seed(4) xpost <- describe_posterior( all_, centrality = "median", dispersion = TRUE, ci = 0.95, ci_method = "hdi", test = c("pd", "rope", "bf"), rope_range = "default", rope_ci = 0.89, bf_prior = model ) testthat::expect_equal(log(xpost$BF), c(-2.58, 2.00, -0.25), tolerance = 0.1) testthat::expect_warning(describe_posterior(all_, test = "bf")) }) ## For non linear models set.seed(333) df <- data.frame( G = rep(letters[1:3], each = 2), Y = rexp(6) ) fit_bayes <- stan_glm(Y ~ G, data = df, family = Gamma(link = "identity"), refresh = 0 ) fit_bayes_prior <- update(fit_bayes, prior_PD = TRUE) bayes_sum <- emmeans(fit_bayes, ~G) bayes_sum_prior <- emmeans(fit_bayes_prior, ~G) test_that("emmGrid bayesfactor_restricted2", { testthat::skip_on_travis() testthat::skip_on_cran() hyps <- c("a < b", "b < c") xrbf1 <- bayesfactor_restricted(bayes_sum, fit_bayes, hypothesis = hyps) xrbf2 <- bayesfactor_restricted(bayes_sum, bayes_sum_prior, hypothesis = hyps) testthat::expect_equal(xrbf1, xrbf2, tolerance = 0.1) }) test_that("emmGrid bayesfactor_parameters", { set.seed(333) testthat::skip_on_travis() testthat::skip_on_cran() xsdbf1 <- bayesfactor_parameters(bayes_sum, prior = fit_bayes) xsdbf2 <- bayesfactor_parameters(bayes_sum, prior = bayes_sum_prior) # testthat::expect_equal(log(xsdbf1$BF), log(xsdbf2$BF), tolerance = 0.1) }) }bayestestR/tests/testthat.R0000644000176200001440000000010413474167311015526 0ustar liggesuserslibrary(testthat) library(bayestestR) test_check("bayestestR") bayestestR/vignettes/0000755000176200001440000000000013620150641014405 5ustar liggesusersbayestestR/vignettes/example1.Rmd0000644000176200001440000004747013620150172016600 0ustar liggesusers--- title: "1. Initiation to Bayesian models" output: github_document: toc: true fig_width: 10.08 fig_height: 6 rmarkdown::html_vignette: toc: true fig_width: 10.08 fig_height: 6 tags: [r, bayesian, posterior, test] vignette: > \usepackage[utf8]{inputenc} %\VignetteIndexEntry{Example 1: Initiation to Bayesian models} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console bibliography: bibliography.bib csl: apa.csl --- This vignette can be referred to by citing the package: - Makowski, D., Ben-Shachar, M. S., \& Lüdecke, D. (2019). *bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework*. Journal of Open Source Software, 4(40), 1541. https://doi.org/10.21105/joss.01541 --- ```{r message=FALSE, warning=FALSE, include=FALSE} library(knitr) library(insight) options(knitr.kable.NA = '') knitr::opts_chunk$set(comment=">") options(digits=2) set.seed(333) if (!requireNamespace("rstanarm", quietly = TRUE) || !requireNamespace("dplyr", quietly = TRUE) || !requireNamespace("ggplot2", quietly = TRUE)) { knitr::opts_chunk$set(eval = FALSE) } format_percent <- function(x, digits = 0, ...) { paste0(format_value(x*100, digits = digits, ...), "%") } ``` Now that you've read the [**Get started**](https://easystats.github.io/bayestestR/articles/bayestestR.html) section, let's dive in the **subtleties of Bayesian modelling using R**. ## Loading the packages Once you've [installed](https://easystats.github.io/bayestestR/articles/bayestestR.html#bayestestr-installation) the necessary packages, we can load `rstanarm` (to fit the models), `bayestestR` (to compute useful indices) and `insight` (to access the parameters). ```{r message=FALSE, warning=FALSE} library(rstanarm) library(bayestestR) library(insight) ``` ## Simple linear model (*aka* a regression) We will begin by conducting a simple linear regression to test the relationship between `Petal.Length` (our predictor, or *independent*, variable) and `Sepal.Length` (our response, or *dependent*, variable) from the [`iris`](https://en.wikipedia.org/wiki/Iris_flower_data_set) dataset which is included by default in R. ### Fitting the model Let's start by fitting the **frequentist** version of the model, just to have a reference point: ```{r message=FALSE, warning=FALSE} model <- lm(Sepal.Length ~ Petal.Length, data=iris) summary(model) ``` In this model, the linear relationship between `Petal.Length` and `Sepal.Length` is **positive and significant** (beta = 0.41, *t*(148) = 21.6, *p* < .001). This means that for each one-unit increase in `Petal.Length` (the predictor), you can expect `Sepal.Length` (the response) to increase by **0.41**. This effect can be visualized by plotting the predictor values on the `x` axis and the response values as `y` using the `ggplot2` package: ```{r message=FALSE, warning=FALSE} library(ggplot2) # Load the package # The ggplot function takes the data as argument, and then the variables # related to aesthetic features such as the x and y axes. ggplot(iris, aes(x=Petal.Length, y=Sepal.Length)) + geom_point() + # This adds the points geom_smooth(method="lm") # This adds a regression line ``` Now let's fit a **Bayesian version** of the model by using the `stan_glm` function in the `rstanarm` package: ```{r message=FALSE, warning=FALSE, eval=FALSE} model <- stan_glm(Sepal.Length ~ Petal.Length, data=iris) ``` ```{r echo=FALSE, message=FALSE, warning=FALSE, comment=NA, results='hide'} library(rstanarm) set.seed(333) model <- stan_glm(Sepal.Length ~ Petal.Length, data=iris) ``` You can see the sampling algorithm being run. ### Extracting the posterior Once it is done, let us extract the parameters (*i.e.*, coefficients) of the model. ```{r message=FALSE, warning=FALSE, eval=FALSE} posteriors <- insight::get_parameters(model) head(posteriors) # Show the first 6 rows ``` ```{r message=FALSE, warning=FALSE, echo=FALSE} posteriors <- insight::get_parameters(model) head(posteriors) # Show the first 6 rows ``` As we can see, the parameters take the form of a lengthy dataframe with two columns, corresponding to the `intercept` and the effect of `Petal.Length`. These columns contain the **posterior distributions** of these two parameters. In simple terms, the posterior distribution is a set of different plausible values for each parameter. #### About posterior draws Let's look at the length of the posteriors. ```{r message=FALSE, warning=FALSE} nrow(posteriors) # Size (number of rows) ``` > **Why is the size 4000, and not more or less?** First of all, these observations (the rows) are usually referred to as **posterior draws**. The underlying idea is that the Bayesian sampling algorithm (*e.g.*, **Monte Carlo Markov Chains - MCMC**) will *draw* from the hidden true posterior distribution. Thus, it is through these posterior draws that we can estimate the underlying true posterior distribution. **Therefore, the more draws you have, the better your estimation of the posterior distribution**. However, increased draws also means longer computation time. If we look at the documentation (`?sampling`) for the rstanarm `"sampling"` algorithm used by default in the model above, we can see several parameters that influence the number of posterior draws. By default, there are **4** `chains` (you can see it as distinct sampling runs), that each create **2000** `iter` (draws). However, only half of these iterations are kept, as half are used for `warm-up` (the convergence of the algorithm). Thus, the total is **`4 chains * (2000 iterations - 1000 warm-up) = 4000`** posterior draws. We can change that, for instance: ```{r message=FALSE, warning=FALSE, eval=FALSE} model <- stan_glm(Sepal.Length ~ Petal.Length, data=iris, chains = 2, iter = 1000, warmup = 250) nrow(insight::get_parameters(model)) # Size (number of rows) ``` ```{r echo=FALSE, message=FALSE, warning=FALSE, comment=NA, echo=FALSE} model <- stan_glm(Sepal.Length ~ Petal.Length, data=iris, chains = 2, iter = 1000, warmup = 250, refresh = 0) nrow(insight::get_parameters(model)) # Size (number of rows) ``` In this case, as would be expected, we have **`2 chains * (1000 iterations - 250 warm-up) = 1500`** posterior draws. But let's keep our first model with the default setup (as it has more draws). #### Visualizing the posterior distribution Now that we've understood where these values come from, let's look at them. We will start by visualizing the posterior distribution of our parameter of interest, the effect of `Petal.Length`. ```{r message=FALSE, warning=FALSE} ggplot(posteriors, aes(x = Petal.Length)) + geom_density(fill = "orange") ``` This distribution represents the [probability](https://en.wikipedia.org/wiki/Probability_density_function) (the y axis) of different effects (the x axis). The central values are more probable than the extreme values. As you can see, this distribution ranges from about **0.35 to 0.50**, with the bulk of it being at around **0.41**. > **Congrats! You've just described your posterior distribution.** And this is at the heart of Bayesian analysis. We don't need *p*-values, *t*-values or degrees of freedom: **everything is there**, within this posterior distribution. Our description above is consistent with the values obtained from the frequentist regression (which resulted in a beta of **0.41**). This is reassuring! Indeed, **in most cases a Bayesian analysis does not drastically change the results** or their interpretation. Rather, it makes the results more interpretable and intuitive, and easier to understand and describe. We can now go ahead and **precisely characterize** this posterior distribution. ### Describing the Posterior Unfortunately, it is often not practical to report the whole posterior distributions as graphs. We need to find a **concise way to summarize it**. We recommend to describe the posterior distribution with **3 elements**: 1. A **point-estimate** which is a one-value summary (similar to the *beta* in frequentist regressions). 2. A **credible interval** representing the associated uncertainty. 3. Some **indices of significance**, giving information about the relative importance of this effect. #### Point-estimate **What single value can best represent my posterior distribution?** Centrality indices, such as the *mean*, the *median* or the *mode* are usually used as point-estimates - but what's the difference between them? Let's answer this by first inspecting the **mean**: ```{r message=FALSE, warning=FALSE} mean(posteriors$Petal.Length) ``` This is close to the frequentist beta. But as we know, the mean is quite sensitive to outliers or extremes values. Maybe the **median** could be more robust? ```{r message=FALSE, warning=FALSE} median(posteriors$Petal.Length) ``` Well, this is **very close to the mean** (and identical when rounding the values). Maybe we could take the **mode**, that is, the *peak* of the posterior distribution? In the Bayesian framework, this value is called the **Maximum A Posteriori (MAP)**. Let's see: ```{r message=FALSE, warning=FALSE} map_estimate(posteriors$Petal.Length) ``` **They are all very close!** Let's visualize these values on the posterior distribution: ```{r message=FALSE, warning=FALSE} ggplot(posteriors, aes(x = Petal.Length)) + geom_density(fill = "orange") + # The mean in blue geom_vline(xintercept=mean(posteriors$Petal.Length), color="blue", size=1) + # The median in red geom_vline(xintercept=median(posteriors$Petal.Length), color="red", size=1) + # The MAP in purple geom_vline(xintercept=map_estimate(posteriors$Petal.Length), color="purple", size=1) ``` Well, all these values give very similar results. Thus, **we will choose the median**, as this value has a direct meaning from a probabilistic perspective: **there is 50\% chance that the true effect is higher and 50\% chance that the effect is lower** (as it divides the distribution in two equal parts). #### Uncertainty Now that the have a point-estimate, we have to **describe the uncertainty**. We could compute the range: ```{r message=FALSE, warning=FALSE} range(posteriors$Petal.Length) ``` But does it make sense to include all these extreme values? Probably not. Thus, we will compute a [**credible interval**](https://easystats.github.io/bayestestR/articles/credible_interval.html). Long story short, it's kind of similar to a frequentist **confidence interval**, but easier to interpret and easier to compute — *and it makes more sense*. We will compute this **credible interval** based on the [Highest Density Interval (HDI)](https://easystats.github.io/bayestestR/articles/credible_interval.html#different-types-of-cis). It will give us the range containing the 89\% most probable effect values. **Note that we will use 89\% CIs instead of 95\%** CIs (as in the frequentist framework), as the 89\% level gives more [stable results](https://easystats.github.io/bayestestR/articles/credible_interval.html#why-is-the-default-89) [@kruschke2014doing] and reminds us about the arbitrarity of such conventions [@mcelreath2018statistical]. ```{r message=FALSE, warning=FALSE} hdi(posteriors$Petal.Length, ci=0.89) ``` Nice, so we can conclude that **the effect has 89\% chance of falling within the `[0.38, 0.44]` range**. We have just computed the two most important pieces of information for describing our effects. #### Effect significance However, in many scientific fields it not sufficient to simply describe the effects. Scientists also want to know if this effect has significance in practical or statistical terms, or in other words, whether the effect is important. For instnace, is the effect different from 0? So how do we **assess the *significance* of an effect**. How can we do this? Well, in this particular case, it is very eloquent: **all possible effect values (*i.e.*, the whole posterior distribution) are positive and over 0.35, which is already substantial evidence the effect is not zero**. But still, we want some objective decision criterion, to say if **yes or no the effect is 'significant'**. One approach, similar to the frequentist framework, would be to see if the **Credible Interval** contains 0. If it is not the case, that would mean that our **effect is 'significant'**. But this index is not very fine-grained, isn't it? **Can we do better? Yes.** ## A linear model with a categorical predictor Imagine for a moment you are interested in how the weight of chickens varies depending on two different **feed types**. For this exampe, we will start by selecting from the `chickwts` dataset (available in base R) two feed types of interest for us (*we do have peculiar interests*): **meat meals** and **sunflowers**. ### Data preparation and model fitting ```{r message=FALSE, warning=FALSE} library(dplyr) # We keep only rows for which feed is meatmeal or sunflower data <- chickwts %>% filter(feed %in% c("meatmeal", "sunflower")) ``` Let's run another Bayesian regression to predict the **weight** with the **two types of feed type**. ```{r message=FALSE, warning=FALSE, eval=FALSE} model <- stan_glm(weight ~ feed, data=data) ``` ```{r echo=FALSE, message=FALSE, warning=FALSE, comment=NA, results='hide'} model <- stan_glm(weight ~ feed, data=data) ``` ### Posterior description ```{r message=FALSE, warning=FALSE} posteriors <- insight::get_parameters(model) ggplot(posteriors, aes(x=feedsunflower)) + geom_density(fill = "red") ``` This represents the **posterior distribution of the difference between `meatmeal` and `sunflowers`**. Seems that the difference is rather **positive** (the values seems concentrated on the right side of 0)... Eating sunflowers makes you more fat (*at least, if you're a chicken*). But, **by how much?** Let us compute the **median** and the **CI**: ```{r message=FALSE, warning=FALSE} median(posteriors$feedsunflower) hdi(posteriors$feedsunflower) ``` It makes you fat by around 51 grams (the median). However, the uncertainty is quite high: **there is 89\% chance that the difference between the two feed types is between 14 and 91.** > **Is this effect different from 0?** ### ROPE Percentage Testing whether this distribution is different from 0 doesn't make sense, as 0 is a single value (*and the probability that any distribution is different from a single value is infinite*). However, one way to assess **significance** could be to define an area around 0, which will consider as *practically equivalent* to zero (*i.e.*, absence of, or negligible, effect). This is called the [**Region of Practical Equivalence (ROPE)**](https://easystats.github.io/bayestestR/articles/region_of_practical_equivalence.html), and is one way of testing the significance of parameters. **How can we define this region?** > ***Driing driiiing*** -- ***The easystats team speaking. How can we help?*** -- ***I am Prof. Sanders. An expert in chicks... I mean chickens. Just calling to let you know that based on my expert knowledge, an effect between -20 and 20 is negligible. Bye.*** Well, that's convenient. Now we know that we can define the ROPE as the `[-20, 20]` range. All effects within this range are considered as *null* (negligible). We can now compute the **proportion of the 89\% most probable values (the 89\% CI) which are not null**, *i.e.*, which are outside this range. ```{r message=FALSE, warning=FALSE} rope(posteriors$feedsunflower, range = c(-20, 20), ci=0.89) ``` **5\% of the 89\% CI can be considered as null**. Is that a lot? Based on our [**guidelines**](https://easystats.github.io/bayestestR/articles/guidelines.html), yes, it is too much. **Based on this particular definition of ROPE**, we conclude that this effect is not significant (the probability of being negligible is too high). Although, to be honest, I have **some doubts about this Prof. Sanders**. I don't really trust **his definition of ROPE**. Is there a more **objective** way of defining it? ```{r echo=FALSE, fig.cap="Prof. Sanders giving default values to define the Region of Practical Equivalence (ROPE).", fig.align='center', out.width="75%"} knitr::include_graphics("https://github.com/easystats/easystats/raw/master/man/figures/bayestestR/profsanders.png") ``` **Yes.** One of the practice is for instance to use the **tenth (`1/10 = 0.1`) of the standard deviation (SD)** of the response variable, which can be considered as a "negligible" effect size [@cohen1988statistical]. ```{r message=FALSE, warning=FALSE} rope_value <- 0.1 * sd(data$weight) rope_range <- c(-rope_value, rope_value) rope_range ``` Let's redefine our ROPE as the region within the `[-6.2, 6.2]` range. **Note that this can be directly obtained by the `rope_range` function :)** ```{r message=FALSE, warning=FALSE} rope_value <- rope_range(model) rope_range ``` Let's recompute the **percentage in ROPE**: ```{r message=FALSE, warning=FALSE} rope(posteriors$feedsunflower, range = rope_range, ci=0.89) ``` With this reasonable definition of ROPE, we observe that the 89\% of the posterior distribution of the effect does **not** overlap with the ROPE. Thus, we can conclude that **the effect is significant** (in the sense of *important* enough to be noted). ### Probability of Direction (pd) Maybe we are not interested in whether the effect is non-negligible. Maybe **we just want to know if this effect is positive or negative**. In this case, we can simply compute the proportion of the posterior that is positive, no matter the "size" of the effect. ```{r message=FALSE, warning=FALSE} n_positive <- posteriors %>% filter(feedsunflower > 0) %>% # select only positive values nrow() # Get length n_positive / nrow(posteriors) * 100 ``` We can conclude that **the effect is positive with a probability of 98\%**. We call this index the [**Probability of Direction (pd)**](https://easystats.github.io/bayestestR/articles/probability_of_direction.html). It can, in fact, be computed more easily with the following: ```{r message=FALSE, warning=FALSE} p_direction(posteriors$feedsunflower) ``` Interestingly, it so happens that **this index is usually highly correlated with the frequentist *p*-value**. We could almost roughly infer the corresponding *p*-value with a simple transformation: ```{r message=FALSE, warning=FALSE, eval=TRUE} pd <- 97.82 onesided_p <- 1 - pd / 100 twosided_p <- onesided_p * 2 twosided_p ``` If we ran our model in the frequentist framework, we should approximately observe an effect with a *p*-value of `r round(twosided_p, digits=3)`. **Is that true?** #### Comparison to frequentist ```{r message=FALSE, warning=FALSE} lm(weight ~ feed, data=data) %>% summary() ``` The frequentist model tells us that the difference is **positive and significant** (beta = 52, p = 0.04). **Although we arrived to a similar conclusion, the Bayesian framework allowed us to develop a more profound and intuitive understanding of our effect, and of the uncertainty of its estimation.** ## All with one function And yet, I agree, it was a bit **tedious** to extract and compute all the indices. **But what if I told you that we can do all of this, and more, with only one function?** > **Behold, `describe_posterior`!** This function computes all of the adored mentioned indices, and can be run directly on the model: ```{r message=FALSE, warning=FALSE} describe_posterior(model, test = c("p_direction","rope","bayesfactor")) ``` **Tada!** There we have it! The **median**, the **CI**, the **pd** and the **ROPE percentage**! Understanding and describing posterior distributions is just one aspect of Bayesian modelling... **Are you ready for more?** [**Click here**](https://easystats.github.io/bayestestR/articles/example2_GLM.html) to see the next example. ## References bayestestR/vignettes/guidelines.Rmd0000644000176200001440000002032513605504705017212 0ustar liggesusers--- title: "Reporting Guidelines" output: github_document: toc: true fig_width: 10.08 fig_height: 6 rmarkdown::html_vignette: toc: true fig_width: 10.08 fig_height: 6 tags: [r, bayesian, posterior, test] vignette: > %\VignetteIndexEntry{Reporting Guidelines} \usepackage[utf8]{inputenc} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console bibliography: bibliography.bib csl: apa.csl --- ```{r message=FALSE, warning=FALSE, include=FALSE} library(knitr) options(knitr.kable.NA = '') knitr::opts_chunk$set(comment=">") options(digits=2) ``` This vignette can be referred to by citing the package: - Makowski, D., Ben-Shachar, M. S., \& Lüdecke, D. (2019). *bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework*. Journal of Open Source Software, 4(40), 1541. https://doi.org/10.21105/joss.01541 - Makowski, D., Ben-Shachar, M. S., Chen, S. H. A., \& Lüdecke, D. (2019). *Indices of Effect Existence and Significance in the Bayesian Framework*. Frontiers in Psychology 2019;10:2767. [10.3389/fpsyg.2019.02767](https://doi.org/10.3389/fpsyg.2019.02767) --- # Reporting Guidelines ## How to describe and report the parameters of a model A Bayesian analysis returns a posterior distribution for each parameter (or *effect*). To minimally describe these distributions, we recommend reporting a point-estimate of [centrality](https://en.wikipedia.org/wiki/Central_tendency) as well as information characterizing the estimation uncertainty (the [dispersion](https://en.wikipedia.org/wiki/Statistical_dispersion)). Additionally, one can also report indices of effect existence and/or significance. Based on the previous [**comparison of point-estimates**](https://easystats.github.io/bayestestR/articles/indicesEstimationComparison.html) and [**indices of effect existence**](https://www.frontiersin.org/articles/10.3389/fpsyg.2019.02767/full), we can draw the following recommendations. #### **Centrality** We suggest reporting the [**median**](https://easystats.github.io/bayestestR/reference/point_estimate.html) as an index of centrality, as it is more robust compared to the [mean](https://easystats.github.io/bayestestR/reference/point_estimate.html) or the [MAP estimate](https://easystats.github.io/bayestestR/reference/map_estimate.html). However, in case of severly skewed posterior distributions, the MAP estimate could be a good alternative. #### **Uncertainty** The [**89\% Credible Interval (CI)**](https://easystats.github.io/bayestestR/articles/credible_interval.html) appears as a reasonable range to characterize the uncertainty related to the estimation, being more stable than higher thresholds (such as 90\% and 95\%). We also recommend computing the CI based on the [HDI](https://easystats.github.io/bayestestR/reference/hdi.html) rather than [quantiles](https://easystats.github.io/bayestestR/reference/ci.html), favouring probable, - over central - values. Note that a CI based on the quantile (equal-tailed interval) might be more appropriate in case of transformations (for instance when transforming log-odds to probabilities). Otherwise, intervals that originally do not cover the null might cover it after transformation (see [here](https://easystats.github.io/bayestestR/articles/credible_interval.html#different-types-of-cis)). #### **Existence** ```{r echo=FALSE, fig.cap="Reviewer 2 (circa a long time ago in a galaxy far away).", fig.align='center', out.width="60%"} knitr::include_graphics("https://github.com/easystats/easystats/raw/master/man/figures/bayestestR/watto.jpg") ``` The Bayesian framework can neatly delineate and quantify different aspects of hypothesis testing, such as effect *existence* and *significance*. The most straightforward index to describe effect existence is the [**Probability of Direction (pd)**](https://easystats.github.io/bayestestR/articles/probability_of_direction.html), representing the certainty associated with the most probable direction (positive or negative) of the effect. This index is easy to understand, simple to interpret, straightforward to compute, robust to model characteristics and independent from the scale of the data. Moreover, it is strongly correlated with the frequentist ***p*-value**, and can thus be used to draw parallels and give some reference to readers non-familiar with Bayesian statistics. A **two-sided *p*-value** of respectively `.1`, `.05`, `.01` and `.001` would correspond approximately to a ***pd*** of 95\%, 97.5\%, 99.5\% and 99.95\%. Thus, for convenience, we suggest the following reference values as an interpretation helpers: - *pd* **\<= 95\%** ~ *p* \> .1: uncertain - *pd* **\> 95\%** ~ *p* \< .1: possibly existing - *pd* **\> 97\%**: likely existing - *pd* **\> 99\%**: probably existing - *pd* **\> 99.9\%**: certainly existing #### **Significance** The percentage in **ROPE** is a index of **significance** (in its primary meaning), informing us whether a parameter is related - or not - to a non-negligible change (in terms of magnitude) in the outcome. We suggest reporting the **percentage of the full posterior distribution** (the *full* ROPE) instead of a given proportion of CI, in the ROPE, which appears as more sensitive (especially to delineate highly significant effects). Rather than using it as a binary, all-or-nothing decision criterion, such as suggested by the original [equivalence test](https://easystats.github.io/bayestestR/articles/region_of_practical_equivalence.html#equivalence-test), we recommend using the percentage as a continuous index of significance. However, based on [simulation data](https://www.frontiersin.org/articles/10.3389/fpsyg.2019.02767/full), we suggest the following reference values as an interpretation helpers: - **\> 99\%** in ROPE: negligible (we can accept the null hypothesis) - **\> 97.5\%** in ROPE: probably negligible - **\<= 97.5\%** \& **\>= 2.5\%** in ROPE: undecided significance - **\< 2.5\%** in ROPE: probably significant - **\< 1\%** in ROPE: significant (we can reject the null hypothesis) *Note that extra caution is required as its interpretation highly depends on other parameters such as sample size and ROPE range (see [here](https://easystats.github.io/bayestestR/articles/region_of_practical_equivalence.html#sensitivity-to-parameters-scale))*. #### **Template Sentence** Based on these suggestions, a template sentence for minimal reporting of a parameter based on its posterior distribution could be: - "the effect of *X* has a probability of ***pd*** of being *negative* (Median = ***median***, 89\% CI [ ***HDIlow*** , ***HDIhigh*** ] and can be considered as *significant* (***ROPE***\% in ROPE)." ## How to compare different models Altough it can also be used to assess effect existence and signficance, the **Bayes factor (BF)** is a versatile index that can be used to directly compare different models (or data generation processes). The [Bayes factor](https://easystats.github.io/bayestestR/articles/bayes_factors.html) is a ratio, informing us by how much more (or less) likely the observed data are under two compared models - usually a model with an effect vs. a model *without* the effect. Depending on the specifications of the null model (whether it is a point-estimate (e.g., **0**) or an interval), the Bayes factor could be used both in the context of effect existence and significance. In general, a Bayes factor greater than 1 giving evidence in favour of one of the models, and a Bayes factor smaller than 1 giving evidence in favour of the other model. Several rules of thumb exist to help the interpretation (see [here](https://easystats.github.io/report/articles/interpret_metrics.html#bayes-factor-bf)), with **\> 3** being one common treshold to categorize non-anecdotal evidence. #### **Template Sentence** When reporting Bayes factors (BF), one can use the following sentence: - "There is *moderate evidence* in favour of an *absence* of effect of *x* (BF = *BF*)." *Note: If you have any advice, opinion or such, we encourage you to let us know by opening an [discussion thread](https://github.com/easystats/bayestestR/issues) or making a pull request.* bayestestR/vignettes/indicesEstimationComparison.Rmd0000644000176200001440000002717513620150172022572 0ustar liggesusers--- title: "In-Depth 1: Comparison of Point-Estimates" output: github_document: toc: true fig_width: 10.08 fig_height: 6 rmarkdown::html_vignette: toc: true fig_width: 10.08 fig_height: 6 tags: [r, bayesian, posterior, test] vignette: > \usepackage[utf8]{inputenc} %\VignetteIndexEntry{In-Depth 1: Comparison of Point-Estimates} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console bibliography: bibliography.bib csl: apa.csl --- ```{r message=FALSE, warning=FALSE, include=FALSE} if (!requireNamespace("see", quietly = TRUE) || !requireNamespace("dplyr", quietly = TRUE) || !requireNamespace("ggplot2", quietly = TRUE) || !requireNamespace("tidyr", quietly = TRUE)) { knitr::opts_chunk$set(eval = FALSE) } library(knitr) options(knitr.kable.NA = '') knitr::opts_chunk$set(comment=">") options(digits=2) ``` This vignette can be referred to by citing the package: - Makowski, D., Ben-Shachar, M. S., \& Lüdecke, D. (2019). *bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework*. Journal of Open Source Software, 4(40), 1541. https://doi.org/10.21105/joss.01541 --- # Effect Point-Estimates in the Bayesian Framework ## Introduction One of the main difference between the Bayesian and the frequentist frameworks is that the former returns a probability distribution of each effect (*i.e.*, parameter of interest of a model, such as a regression slope) instead of a single value. However, there is still a need and demand, for reporting or use in further analysis, for a single value (**point-estimate**) that best characterise the underlying posterior distribution. There are three main indices used in the literature for effect estimation: the **mean**, the **median** or the **MAP** (Maximum A Posteriori) estimate (roughly corresponding to the mode (the "peak") of the distribution). Unfortunately, there is no consensus about which one to use, as no systematic comparison has ever been done. In the present work, we will compare these three point-estimates of effect between themselves, as well as with the widely known **beta**, extracted from a comparable frequentist model. With this comparison, we expect to draw bridges and relationships between the two frameworks, helping and easing the transition for the public. ## Experiment 1: Relationship with Error (Noise) and Sample Size ### Methods The simulation aimed at modulating the following characteristics: - **Model type**: linear or logistic. - **"True" effect** (original regression coefficient from which data is drawn): Can be 1 or 0 (no effect). - **Sample size**: From 20 to 100 by steps of 10. - **Error**: Gaussian noise applied to the predictor with SD uniformly spread between 0.33 and 6.66 (with 1000 different values). We generated a dataset for each combination of these characteristics, resulting in a total of `2 * 2 * 9 * 1000 = 36000` Bayesian and frequentist models. The code used for generation is avaible [here](https://easystats.github.io/circus/articles/bayesian_indices.html) (please note that it takes usually several days/weeks to complete). ```{r message=FALSE, warning=FALSE} library(ggplot2) library(dplyr) library(tidyr) library(see) df <- read.csv("https://raw.github.com/easystats/circus/master/data/bayesSim_study1.csv") ``` ### Results #### Sensitivity to Noise ```{r, message=FALSE, warning=FALSE} df %>% select(error, true_effect, outcome_type, Coefficient, Median, Mean, MAP) %>% gather(estimate, value, -error, -true_effect, -outcome_type) %>% mutate(temp = as.factor(cut(error, 10, labels = FALSE))) %>% group_by(temp) %>% mutate(error_group = round(mean(error), 1)) %>% ungroup() %>% filter(value < 6) %>% ggplot(aes(x = error_group, y = value, fill = estimate, group = interaction(estimate, error_group))) + # geom_hline(yintercept = 0) + # geom_point(alpha=0.05, size=2, stroke = 0, shape=16) + # geom_smooth(method="loess") + geom_boxplot(outlier.shape=NA) + theme_modern() + scale_fill_manual(values = c("Coefficient" = "#607D8B", "MAP" = "#795548", "Mean" = "#FF9800", "Median" = "#FFEB3B"), name = "Index") + ylab("Point-estimate") + xlab("Noise") + facet_wrap(~ outcome_type * true_effect, scales="free") ``` #### Sensitivity to Sample Size ```{r, message=FALSE, warning=FALSE} df %>% select(sample_size, true_effect, outcome_type, Coefficient, Median, Mean, MAP) %>% gather(estimate, value, -sample_size, -true_effect, -outcome_type) %>% mutate(temp = as.factor(cut(sample_size, 10, labels = FALSE))) %>% group_by(temp) %>% mutate(size_group = round(mean(sample_size))) %>% ungroup() %>% filter(value < 6) %>% ggplot(aes(x = size_group, y = value, fill = estimate, group = interaction(estimate, size_group))) + # geom_hline(yintercept = 0) + # geom_point(alpha=0.05, size=2, stroke = 0, shape=16) + # geom_smooth(method="loess") + geom_boxplot(outlier.shape=NA) + theme_modern() + scale_fill_manual(values = c("Coefficient" = "#607D8B", "MAP" = "#795548", "Mean" = "#FF9800", "Median" = "#FFEB3B"), name = "Index") + ylab("Point-estimate") + xlab("Sample size") + facet_wrap(~ outcome_type * true_effect, scales="free") ``` #### Statistical Modelling We fitted a (frequentist) multiple linear regression to statistically test the the predict the presence or absence of effect with the estimates as well as their interaction with noise and sample size. ```{r, message=FALSE, warning=FALSE} df %>% select(sample_size, error, true_effect, outcome_type, Coefficient, Median, Mean, MAP) %>% gather(estimate, value, -sample_size, -error, -true_effect, -outcome_type) %>% glm(true_effect ~ outcome_type / estimate / value, data=., family="binomial") %>% broom::tidy() %>% select(term, estimate, p=p.value) %>% filter(stringr::str_detect(term, 'outcome_type'), stringr::str_detect(term, ':value')) %>% arrange(desc(estimate)) %>% knitr::kable(digits=2) ``` This suggests that, in order to delineate between the presence and the absence of an effect, compared to the frequentist's beta: - For linear models, the **Mean** was the better predictor, closely followed by the **Median**, the **MAP** and the frequentist **Coefficient**. - For logistic models, the **MAP** was the better predictor, followed by the **Median**, the **Mean** and, behind, the frequentist **Coefficient**. Overall, the **median** seems to be appears as a safe and approriate choice, maintaining a a high performance accross different types of models. ## Experiment 2: Relationship with Sampling Characteristics ### Methods The simulation aimed at modulating the following characteristics: - **Model type**: linear or logistic. - **"True" effect** (original regression coefficient from which data is drawn): Can be 1 or 0 (no effect). - **draws**: from 10 to 5000 by step of 5 (1000 iterations). - **warmup**: Ratio of warmup iterations. from 1/10 to 9/10 by step of 0.1 (9 iterations). We generated 3 datasets for each combination of these characteristics, resulting in a total of `2 * 2 * 8 * 40 * 9 * 3 = 34560` Bayesian and frequentist models. The code used for generation is avaible [here](https://easystats.github.io/circus/articles/bayesian_indices.html) (please note that it takes usually several days/weeks to complete). ```{r message=FALSE, warning=FALSE} df <- read.csv("https://raw.github.com/easystats/circus/master/data/bayesSim_study2.csv") ``` ### Results #### Sensitivity to number of iterations ```{r, message=FALSE, warning=FALSE} df %>% select(iterations, true_effect, outcome_type, beta, Median, Mean, MAP) %>% gather(estimate, value, -iterations, -true_effect, -outcome_type) %>% mutate(temp = as.factor(cut(iterations, 5, labels = FALSE))) %>% group_by(temp) %>% mutate(iterations_group = round(mean(iterations), 1)) %>% ungroup() %>% filter(value < 6) %>% ggplot(aes(x = iterations_group, y = value, fill = estimate, group = interaction(estimate, iterations_group))) + geom_boxplot(outlier.shape=NA) + theme_classic() + scale_fill_manual(values = c("beta" = "#607D8B", "MAP" = "#795548", "Mean" = "#FF9800", "Median" = "#FFEB3B"), name = "Index") + ylab("Point-estimate of the true value 0\n") + xlab("\nNumber of Iterations") + facet_wrap(~ outcome_type * true_effect, scales="free") ``` #### Sensitivity to warmup ratio ```{r, message=FALSE, warning=FALSE} df %>% mutate(warmup = warmup / iterations) %>% select(warmup, true_effect, outcome_type, beta, Median, Mean, MAP) %>% gather(estimate, value, -warmup, -true_effect, -outcome_type) %>% mutate(temp = as.factor(cut(warmup, 3, labels = FALSE))) %>% group_by(temp) %>% mutate(warmup_group = round(mean(warmup), 1)) %>% ungroup() %>% filter(value < 6) %>% ggplot(aes(x = warmup_group, y = value, fill = estimate, group = interaction(estimate, warmup_group))) + geom_boxplot(outlier.shape=NA) + theme_classic() + scale_fill_manual(values = c("beta" = "#607D8B", "MAP" = "#795548", "Mean" = "#FF9800", "Median" = "#FFEB3B"), name = "Index") + ylab("Point-estimate of the true value 0\n") + xlab("\nNumber of Iterations") + facet_wrap(~ outcome_type * true_effect, scales="free") ``` ## Experiment 3: Relationship with Priors Specification ## Discussion Conclusions can be found in the [guidelines section](https://easystats.github.io/bayestestR/articles/guidelines.html). bayestestR/vignettes/credible_interval.Rmd0000644000176200001440000002066313620150172020534 0ustar liggesusers--- title: "Credible Intervals (CI)" output: github_document: toc: true fig_width: 10.08 fig_height: 6 rmarkdown::html_vignette: toc: true fig_width: 10.08 fig_height: 6 tags: [r, bayesian, posterior, test, ci, credible interval] vignette: > \usepackage[utf8]{inputenc} %\VignetteIndexEntry{Credible Intervals (CI)} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console bibliography: bibliography.bib csl: apa.csl --- This vignette can be referred to by citing the package: - Makowski, D., Ben-Shachar, M. S., \& Lüdecke, D. (2019). *bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework*. Journal of Open Source Software, 4(40), 1541. https://doi.org/10.21105/joss.01541 --- ```{r message=FALSE, warning=FALSE, include=FALSE} library(knitr) options(knitr.kable.NA = '') knitr::opts_chunk$set(comment=">") options(digits=2) if (!requireNamespace("ggplot2", quietly = TRUE) || !requireNamespace("dplyr", quietly = TRUE)) { knitr::opts_chunk$set(eval = FALSE) } set.seed(333) ``` # What is a *Credible* Interval? Credible intervals are an important concept in Bayesian statistics. Its core purpose is to describe and summarise **the uncertainty** related to your parameters. In this regards, it could appear as quite similar to the frequentist **Confidence Intervals**. However, while their goal is similar, **their statistical definition annd meaning is very different**. Indeed, while the latter is obtained through a complex algorithm full of rarely-tested assumptions and approximations, the credible intervals are fairly straightforward to compute. As the Bayesian inference returns a distribution of possible effect values (the posterior), the credible interval is just the range containing a particular percentage of probable values. For instance, the 95\% credible interval is simply the central portion of the posterior distribution that contains 95\% of the values. Note that this drastically improve the interpretability of the Bayesian interval compared to the frequentist one. Indeed, the Bayesian framework allows us to say *"given the observed data, the effect has 95% probability of falling within this range"*, while the frequentist less straightforward alternative (the 95\% ***Confidence* Interval**) would be "*there is a 95\% probability that when computing a confidence interval from data of this sort, the effect falls within this range*". # Why is the default 89\%? Naturally, when it came about choosing the CI level to report by default, **people started using 95\%**, the arbitrary convention used in the **frequentist** world. However, some authors suggested that 95\% might not be the most apppropriate for Bayesian posterior distributions, potentially lacking stability if not enough posterior samples are drawn [@kruschke2014doing]. The proposition was to use 90\% instead of 95\%. However, recently, McElreath (2014, 2018) suggested that if we were to use arbitrary tresholds in the first place, why not use 89\% as this value has the additional argument of being a prime number. Thus, by default, the CIs are computed with 89\% intervals (`ci = 0.89`), deemed to be more stable than, for instance, 95\% intervals [@kruschke2014doing]. An effective sample size (ESS; see [here](https://easystats.github.io/bayestestR/reference/diagnostic_posterior.html)) of at least 10.000 is recommended if 95\% intervals should be computed (Kruschke, 2014, p. 183ff). Moreover, 89 is the highest **prime number** that does not exceed the already unstable 95\% threshold. What does it have to do with anything? *Nothing*, but it reminds us of the total arbitrarity of any of these conventions [@mcelreath2018statistical]. # Different types of CIs The reader might notice that `bayestestR` provides **two methods** to compute credible intervals, the **Highest Density Interval (HDI)** (`hdi()`) and the **Equal-tailed Interval (ETI)** (`eti()`). These methods can also be changed via the `method` argument of the `ci()` function. What is the difference? Let's see: ```{r warning=FALSE, message=FALSE} library(bayestestR) library(dplyr) library(ggplot2) # Generate a normal distribution posterior <- distribution_normal(1000) # Compute HDI and ETI ci_hdi <- ci(posterior, method = "HDI") ci_eti <- ci(posterior, method = "ETI") # Plot the distribution and add the limits of the two CIs posterior %>% estimate_density(extend=TRUE) %>% ggplot(aes(x=x, y=y)) + geom_area(fill="orange") + theme_classic() + # HDI in blue geom_vline(xintercept=ci_hdi$CI_low, color="royalblue", size=3) + geom_vline(xintercept=ci_hdi$CI_high, color="royalblue", size=3) + # Quantile in red geom_vline(xintercept=ci_eti$CI_low, color="red", size=1) + geom_vline(xintercept=ci_eti$CI_high, color="red", size=1) ``` > **These are exactly the same...** But is it also the case for other types of distributions? ```{r warning=FALSE, message=FALSE} library(bayestestR) library(dplyr) library(ggplot2) # Generate a beta distribution posterior <- distribution_beta(1000, 6, 2) # Compute HDI and Quantile CI ci_hdi <- ci(posterior, method = "HDI") ci_eti <- ci(posterior, method = "ETI") # Plot the distribution and add the limits of the two CIs posterior %>% estimate_density(extend=TRUE) %>% ggplot(aes(x=x, y=y)) + geom_area(fill="orange") + theme_classic() + # HDI in blue geom_vline(xintercept=ci_hdi$CI_low, color="royalblue", size=3) + geom_vline(xintercept=ci_hdi$CI_high, color="royalblue", size=3) + # Quantile in red geom_vline(xintercept=ci_eti$CI_low, color="red", size=1) + geom_vline(xintercept=ci_eti$CI_high, color="red", size=1) ``` > **The difference is strong with this one.** Contrary to the **HDI**, for which all points within the interval have a higher probability density than points outside the interval, the **ETI** is **equal-tailed**. This means that a 90\% interval has 5\% of the distribution on either side of its limits. It indicates the 5th percentile and the 95th percentile. In symmetric distributions, the two methods of computing credible intervals, the ETI and the HDI, return similar results. This is not the case for skewed distributions. Indeed, it is possible that parameter values in the ETI have lower credibility (are less probable) than parameter values outside the ETI. This property seems undesirable as a summary of the credible values in a distribution. On the other hand, the ETI range does change when transformations are applied to the distribution (for instance, for a log odds scale to probabilities): the lower and higher bounds of the transformed distribution will correspond to the transformed lower and higher bounds of the original distribution. On the contrary, applying transformations to the distribution will change the resulting HDI. # The Support Interval Unlike the HDI and the ETI, which look at the posterior distribution, the **Support Interval (SI)** provides information regarding the change in the credability of values from the prior to the posterior - in other words, it indicates which values of a parameter are have gained support by the observed data by some factor greater or equal to *k* [@wagenmakers2018SI]. ```{r warning=FALSE, message=FALSE} prior <- distribution_normal(1000, mean = 0, sd = 1) posterior <- distribution_normal(1000, mean = .5, sd = .3) si_1 <- si(posterior, prior, BF = 1) si_3 <- si(posterior, prior, BF = 3) ggplot(mapping = aes(x=x, y=y)) + theme_classic() + # The posterior geom_area(fill = "orange", data = estimate_density(posterior, extend = TRUE)) + # The prior geom_area(color = "black", fill = NA, size = 1, linetype = "dashed", data = estimate_density(prior, extend = TRUE)) + # BF = 1 SI in blue geom_vline(xintercept=si_1$CI_low, color="royalblue", size=1) + geom_vline(xintercept=si_1$CI_high, color="royalblue", size=1) + # BF = 3 SI in red geom_vline(xintercept=si_3$CI_low, color="red", size=1) + geom_vline(xintercept=si_3$CI_high, color="red", size=1) ``` Between the blue lines are values the recived *some* support by the data (this is a BF = 1 SI), which received at least "moderate" support from the data. From the presepctive of the Savage-Dickey Bayes factor, testing against a point null hypothesis for any value within the support interval will yeild a Bayes factor smaller than 1/`BF`. # References bayestestR/vignettes/example2.Rmd0000644000176200001440000002654413620150172016600 0ustar liggesusers--- title: "2. Confirmation of Bayesian skills" output: github_document: toc: true fig_width: 10.08 fig_height: 6 rmarkdown::html_vignette: toc: true fig_width: 10.08 fig_height: 6 tags: [r, bayesian, posterior, test] vignette: > \usepackage[utf8]{inputenc} %\VignetteIndexEntry{Example 2: Confirmation of Bayesian skills} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console bibliography: bibliography.bib csl: apa.csl --- This vignette can be referred to by citing the package: - Makowski, D., Ben-Shachar, M. S., \& Lüdecke, D. (2019). *bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework*. Journal of Open Source Software, 4(40), 1541. https://doi.org/10.21105/joss.01541 --- ```{r message=FALSE, warning=FALSE, include=FALSE} if (!requireNamespace("see", quietly = TRUE) || !requireNamespace("dplyr", quietly = TRUE) || !requireNamespace("ggplot2", quietly = TRUE) || !requireNamespace("performance", quietly = TRUE) || !requireNamespace("BayesFactor", quietly = TRUE) || !requireNamespace("rstanarm", quietly = TRUE)) { knitr::opts_chunk$set(eval = FALSE) } data(iris) library(knitr) library(bayestestR) options(knitr.kable.NA = '') knitr::opts_chunk$set(comment=">") knitr::opts_chunk$set(dpi=150) options(digits=2) set.seed(333) ``` Now that [**describing and understanding posterior distributions**](https://easystats.github.io/bayestestR/articles/example1.html) of linear regressions has no secrets for you, we will take one step back and study some simpler models: **correlations** and ***t*-tests**. But before we do that, let us take a moment to remind ourselves and appreciate the fact that **all basic statistical pocedures** such as correlations, *t*-tests, ANOVAs or Chisquare tests ***are* linear regressions** (we strongly recommend [this excellent demonstration](https://lindeloev.github.io/tests-as-linear/)). Nevertheless, these simple models will be the occasion to introduce a more complex index, such as the **Bayes factor**. ## Correlations ### Frequentist version Let us start, again, with a **frequentist correlation** between two continuous variables, the **width** and the **length** of the sepals of some flowers. The data is available in R as the `iris` dataset (the same that was used in the [previous tutorial](https://easystats.github.io/bayestestR/articles/example1.html)). We will compute a Pearson's correlation test, store the results in an object called `result`, then display it: ```{r message=FALSE, warning=FALSE} result <- cor.test(iris$Sepal.Width, iris$Sepal.Length) result ``` As you can see in the output, the test that we did actually compared two hypotheses: the **null hypothesis** (*h0*; no correlation) with the **alternative hypothesis** (*h1*; a non-null correlation). Based on the *p*-value, the null hypothesis cannot be rejected: the correlation between the two variables is **negative but not significant** (r = -.12, p > .05). ### Bayesian correlation To compute a Bayesian correlation test, we will need the [`BayesFactor`](https://richarddmorey.github.io/BayesFactor/) package (you can install it by running `install.packages("BayesFactor")`). We can then load this package, compute the correlation using the `correlationBF()` function and store the results in a similar fashion. ```{r message=FALSE, warning=FALSE, results='hide'} library(BayesFactor) result <- correlationBF(iris$Sepal.Width, iris$Sepal.Length) ``` Now, let us run our `describe_posterior()` function on that: ```{r message=FALSE, warning=FALSE, eval=FALSE} describe_posterior(result) ``` ```{r echo=FALSE} structure(list(Parameter = "rho", Median = -0.114149129692488, CI = 89, CI_low = -0.240766308855643, CI_high = 0.00794997655649642, pd = 91.6, ROPE_CI = 89, ROPE_low = -0.1, ROPE_high = 0.1, ROPE_Percentage = 42.0949171581017, BF = 0.509017511647702, Prior_Distribution = "cauchy", Prior_Location = 0, Prior_Scale = 0.333333333333333), row.names = 1L, class = "data.frame") ``` We see again many things here, but the important indices for now are the **median** of the posterior distribution, `-.11`. This is (again) quite close to the frequentist correlation. We could, as previously, describe the [**credible interval**](https://easystats.github.io/bayestestR/articles/credible_interval.html), the [**pd**](https://easystats.github.io/bayestestR/articles/probability_of_direction.html) or the [**ROPE percentage**](https://easystats.github.io/bayestestR/articles/region_of_practical_equivalence.html), but we will focus here on another index provided by the Bayesian framework, the **Bayes factor (BF)**. ### Bayes factor (BF) We said previously that a correlation test actually compares two hypotheses, a null (absence of effect) with an altnernative one (presence of an effect). The [**Bayes factor (BF)**](https://easystats.github.io/bayestestR/articles/bayes_factors.html) allows the same comparison and determines **under which of two models the observed data are more probable**: a model with the effect of interest, and a null model without the effect of interest. We can use `bayesfactor()` to specifically compute the Bayes factor comparing those models: ```{r message=FALSE, warning=FALSE} bayesfactor(result) ``` We got a *BF* of `0.51`. What does it mean? Bayes factors are **continuous measures of relative evidence**, with a Bayes factor greater than 1 giving evidence in favour of one of the models (often referred to as *the numerator*), and a Bayes factor smaller than 1 giving evidence in favour of the other model (*the denominator*). > **Yes, you heard things right, evidence in favour of the null!** That's one of the reason why the Bayesian framework is sometimes considered as superior to the frequentist framework. Remember from your stats lessons, that the ***p*-value can only be used to reject *h0***, but not *accept* it. With the **Bayes factor**, you can measure **evidence against - and in favour of - the null**. BFs representing evidence for the alternative against the null can be reversed using $BF_{01}=1/BF_{10}$ (the *01* and *10* correspond to *h0* against *h1* and *h1* against *h0*, respectively) to provide evidence of the null againtt the alternative. This improves human readability in cases where the BF of the alternative against the null is smaller than 1 (i.e., in support of the null). In our case, `BF = 1/0.51 = 2`, indicates that the data are **2 times more probable under the null compared to the alternative hypothesis**, which, though favouring the null, is considered only [anecdotal evidence against the null](https://easystats.github.io/report/articles/interpret_metrics.html#bayes-factor-bf). We can thus conclude that there is **anecdotal evidence in favour of an absence of correlation between the two variables (rmedian = 0.11, BF = 0.51)**, which is a much more informative statement that what we can do with frequentist statistics. **And that's not all!** ### Visualise the Bayes factor In general, **pie charts are an absolute no-go in data visualisation**, as our brain's perceptive system heavily distorts the information presented in such way. Nevertheless, there is one exeption: pizza charts. It is an intuitive way of interpreting the strength of evidence provided by BFs as an amount of surprise. ```{r echo=FALSE, fig.cap="Wagenmakers' pizza poking analogy. From the great 'www.bayesianspectacles.org' blog.", fig.align='center', out.width="80%"} knitr::include_graphics("https://github.com/easystats/easystats/raw/master/man/figures/bayestestR/LetsPokeAPizza.jpg") ``` Such "pizza plots" can be directly created through the [`see`](https://github.com/easystats/see) visualisation companion package for easystats (you can install it by running `install.packages("see")`): ```{r message=FALSE, warning=FALSE} library(see) plot(bayesfactor(result)) + scale_fill_pizza() ``` So, after seeing this pizza, how much would you be suprised by the outcome of a blinded poke? ## *t*-tests ***"I know that I know nothing, and especially not if *versicolor* and *virginica* differ in terms of Sepal.Width"*, famously said Socrates**. Time to finally answer this answer this crucial question! ### Versicolor *vs.* virginica Bayesian *t*-tests can be performed in a very similar way to correlations. As we are particularly interested in two levels of the `Species` factor, *versicolor* and *virginica*. We will start by filtering out from `iris` the non-relevant observations corresponding to the *setosa* specie, and we will then visualise the observations and the distribution of the `Sepal.Width` variable. ```{r message=FALSE, warning=FALSE} library(dplyr) library(ggplot2) # Select only two relevant species data <- iris %>% filter(Species != "setosa") %>% droplevels() # Visualise distributions and observations data %>% ggplot(aes(x = Species, y = Sepal.Width, fill = Species)) + geom_violindot(fill_dots = "black", size_dots = 1) + scale_fill_material() + theme_modern() ``` It *seems* (visually) that *virgnica* flowers have, on average, a slightly higer width of sepals. Let's assess this difference statistically by using the `ttestBF` in the `BayesFactor` package. ### Compute the Bayesian *t*-test ```{r message=FALSE, warning=FALSE} result <- BayesFactor::ttestBF(formula = Sepal.Width ~ Species, data = data) describe_posterior(result) ``` From the indices, we can say that the difference of `Sepal.Width` between *virginica* and *versicolor* has a probability of **100% of being negative** [*from the pd and the sign of the median*] (median = -0.19, 89% CI [-0.29, -0.092]). The data provides a **strong evidence against the null hypothesis** (BF = 18). Keep that in mind as we will see another way of investigating this question. ## Logistic Model A hypothesis for which one uses a *t*-test can also be tested using a binomial model (*e.g.*, a **logistic model**). Indeed, it is possible to reformulate the following hypothesis, "*there is an important difference in this variable between the two groups*" by "*this variable is able to discriminate between (or classify) the two groups*". However, these models are much more powerful than a regular *t*-test. In the case of the difference of `Sepal.Width` between *virginica* and *versicolor*, the question becomes, *how well can we classify the two species using only* `Sepal.Width`. ### Fit the model ```{r message=FALSE, warning=FALSE, eval=FALSE} library(rstanarm) model <- stan_glm(Species ~ Sepal.Width, data = data, family = "binomial") ``` ```{r message=FALSE, warning=FALSE, echo=FALSE} library(rstanarm) model <- stan_glm(Species ~ Sepal.Width, data = data, family = "binomial", refresh = 0) ``` ### Visualise the model Using the [`estimate`](https://github.com/easystats/estimate) package. **Wait until estimate is on CRAN**. ### Performance and Parameters TO DO. ```{r message=FALSE, warning=FALSE} library(performance) model_performance(model) ``` ```{r message=FALSE, warning=FALSE} describe_posterior(model, test = c("pd", "ROPE", "BF")) ``` ### Visualise the indices TO DO. ```{r message=FALSE, warning=FALSE} # plot(rope(result)) ``` ### Diagnostic Indices About diagnostic indices such as Rhat and ESS. bayestestR/vignettes/bibliography.bib0000644000176200001440000003340213610210350017531 0ustar liggesusers@book{mcelreath2018statistical, title={Statistical rethinking: A Bayesian course with examples in R and Stan}, author={McElreath, Richard}, year={2018}, publisher={Chapman and Hall/CRC} } @article{wagenmakers2018bayesian, title={Bayesian inference for psychology. Part I: Theoretical advantages and practical ramifications}, author={Wagenmakers, Eric-Jan and Marsman, Maarten and Jamil, Tahira and Ly, Alexander and Verhagen, Josine and Love, Jonathon and Selker, Ravi and Gronau, Quentin F and {\v{S}}m{\'\i}ra, Martin and Epskamp, Sacha and others}, journal={Psychonomic bulletin \& review}, volume={25}, number={1}, pages={35--57}, year={2018}, publisher={Springer} } @article{morey2014simple, title={Simple relation between Bayesian order-restricted and point-null hypothesis tests}, author={Morey, Richard D and Wagenmakers, Eric-Jan}, journal={Statistics \& Probability Letters}, volume={92}, pages={121--124}, year={2014}, publisher={Elsevier} } @misc{morey_2015_blog, title={Multiple Comparisons with BayesFactor, Part 2 – order restrictions}, url={http://bayesfactor.blogspot.com/2015/01/multiple-comparisons-with-bayesfactor-2.html}, author={Morey, Richard D}, year={2015}, month={Jan} } @article{benjamin2018redefine, title={Redefine statistical significance}, author={Benjamin, Daniel J and Berger, James O and Johannesson, Magnus and Nosek, Brian A and Wagenmakers, E-J and Berk, Richard and Bollen, Kenneth A and Brembs, Bj{\"o}rn and Brown, Lawrence and Camerer, Colin and others}, journal={Nature Human Behaviour}, volume={2}, number={1}, pages={6}, year={2018}, publisher={Nature Publishing Group} } @article{dienes2018four, title={Four reasons to prefer Bayesian analyses over significance testing}, author={Dienes, Zoltan and Mclatchie, Neil}, journal={Psychonomic bulletin \& review}, volume={25}, number={1}, pages={207--218}, year={2018}, publisher={Springer} } @article{lakens2018equivalence, title={Equivalence testing for psychological research: A tutorial}, author={Lakens, Dani{\"e}l and Scheel, Anne M and Isager, Peder M}, journal={Advances in Methods and Practices in Psychological Science}, pages={2515245918770963}, year={2018}, publisher={SAGE Publications Sage CA: Los Angeles, CA} } @article{etz2018bayesian, title={Bayesian inference and testing any hypothesis you can specify}, author={Etz, Alexander and Haaf, Julia M and Rouder, Jeffrey N and Vandekerckhove, Joachim}, journal={Advances in Methods and Practices in Psychological Science}, pages={2515245918773087}, year={2018}, publisher={SAGE Publications Sage CA: Los Angeles, CA} } @article{kruschke2018bayesian, title={The Bayesian New Statistics: Hypothesis testing, estimation, meta-analysis, and power analysis from a Bayesian perspective}, author={Kruschke, John K and Liddell, Torrin M}, journal={Psychonomic Bulletin \& Review}, volume={25}, number={1}, pages={178--206}, year={2018}, publisher={Springer} } @article{wagenmakers2017need, title={The need for Bayesian hypothesis testing in psychological science}, author={Wagenmakers, Eric-Jan and Verhagen, Josine and Ly, Alexander and Matzke, Dora and Steingroever, Helen and Rouder, Jeffrey N and Morey, Richard D}, journal={Psychological science under scrutiny: Recent challenges and proposed solutions}, pages={123--138}, year={2017}, publisher={Wiley New York, NY} } @article{gronau2017bayesian, title={A Bayesian model-averaged meta-analysis of the power pose effect with informed and default priors: The case of felt power}, author={Gronau, Quentin F and Van Erp, Sara and Heck, Daniel W and Cesario, Joseph and Jonas, Kai J and Wagenmakers, Eric-Jan}, journal={Comprehensive Results in Social Psychology}, volume={2}, number={1}, pages={123--138}, year={2017}, publisher={Taylor \& Francis} } @article{gronau2017simple, title={A simple method for comparing complex models: Bayesian model comparison for hierarchical multinomial processing tree models using warp-III bridge sampling}, author={Gronau, Quentin F and Wagenmakers, Eric-Jan and Heck, Daniel W and Matzke, Dora}, journal={Psychometrika}, pages={1--24}, year={2017}, publisher={Springer} } @article{piironen2017comparison, title={Comparison of Bayesian predictive methods for model selection}, author={Piironen, Juho and Vehtari, Aki}, journal={Statistics and Computing}, volume={27}, number={3}, pages={711--735}, year={2017}, publisher={Springer} } @article{mills2017objective, title={Objective Bayesian Precise Hypothesis Testing}, author={Mills, Jeffrey A}, journal={University of Cincinnati [original version: 2007]}, year={2017} } @article{szucs2016empirical, title={Empirical assessment of published effect sizes and power in the recent cognitive neuroscience and psychology literature}, author={Szucs, Denes and Ioannidis, John PA}, journal={BioRxiv}, pages={071530}, year={2016}, publisher={Cold Spring Harbor Laboratory} } @article{wagenmakers2016bayesian, title={Bayesian benefits for the pragmatic researcher}, author={Wagenmakers, Eric-Jan and Morey, Richard D and Lee, Michael D}, journal={Current Directions in Psychological Science}, volume={25}, number={3}, pages={169--176}, year={2016}, publisher={Sage Publications Sage CA: Los Angeles, CA} } @article{ly2016harold, title={Harold Jeffreys’s default Bayes factor hypothesis tests: Explanation, extension, and application in psychology}, author={Ly, Alexander and Verhagen, Josine and Wagenmakers, Eric-Jan}, journal={Journal of Mathematical Psychology}, volume={72}, pages={19--32}, year={2016}, publisher={Elsevier} } @article{wasserstein2016asa, title={The ASA’s statement on p-values: context, process, and purpose}, author={Wasserstein, Ronald L and Lazar, Nicole A and others}, journal={The American Statistician}, volume={70}, number={2}, pages={129--133}, year={2016} } @article{etz2016bayesian, title={A Bayesian perspective on the reproducibility project: Psychology}, author={Etz, Alexander and Vandekerckhove, Joachim}, journal={PloS one}, volume={11}, number={2}, pages={e0149794}, year={2016}, publisher={Public Library of Science} } @article{burrell2016machine, title={How the machine ‘thinks’: Understanding opacity in machine learning algorithms}, author={Burrell, Jenna}, journal={Big Data \& Society}, volume={3}, number={1}, pages={2053951715622512}, year={2016}, publisher={SAGE Publications Sage UK: London, England} } @article{castelvecchi2016can, title={Can we open the black box of AI?}, author={Castelvecchi, Davide}, journal={Nature News}, volume={538}, number={7623}, pages={20}, year={2016} } @incollection{cohen2016earth, title={The earth is round (p<. 05)}, author={Cohen, Jacob}, booktitle={What if there were no significance tests?}, pages={69--82}, year={2016}, publisher={Routledge} } @article{maxwell2015psychology, title={Is psychology suffering from a replication crisis? What does “failure to replicate” really mean?}, author={Maxwell, Scott E and Lau, Michael Y and Howard, George S}, journal={American Psychologist}, volume={70}, number={6}, pages={487}, year={2015}, publisher={American Psychological Association} } @article{lilienfeld2015fifty, doi = {10.3389/fpsyg.2015.01100}, title={Fifty psychological and psychiatric terms to avoid: a list of inaccurate, misleading, misused, ambiguous, and logically confused words and phrases}, author={Lilienfeld, Scott O and Sauvign{\'e}, Katheryn C and Lynn, Steven Jay and Cautin, Robin L and Latzman, Robert D and Waldman, Irwin D}, journal={Frontiers in Psychology}, volume={6}, pages={1100}, year={2015}, publisher={Frontiers} } @misc{mcelreath2014rethinking, title={rethinking: Statistical Rethinking book package. R package version 1.391}, author={McElreath, R}, year={2014} } @book{kruschke2014doing, title={Doing Bayesian data analysis: A tutorial with R, JAGS, and Stan}, author={Kruschke, John}, year={2014}, publisher={Academic Press} } @article{chambers2014instead, title={Instead of 'playing the game' it is time to change the rules: Registered Reports at AIMS Neuroscience and beyond}, author={Chambers, Christopher D and Feredoes, Eva and Muthukumaraswamy, Suresh Daniel and Etchells, Peter}, journal={AIMS Neuroscience}, volume={1}, number={1}, pages={4--17}, year={2014}, publisher={Aims Press} } @article{dienes2014using, title={Using Bayes to get the most out of non-significant results}, author={Dienes, Zoltan}, journal={Frontiers in psychology}, volume={5}, pages={781}, year={2014}, publisher={Frontiers} } @article{jarosz2014odds, title={What are the odds? A practical guide to computing and reporting Bayes factors}, author={Jarosz, Andrew F and Wiley, Jennifer}, journal={The Journal of Problem Solving}, volume={7}, number={1}, pages={2}, year={2014}, publisher={Purdue University Press} } @incollection{mills2014bayesian, title={Bayesian MCMC estimation}, author={Mills, Jeffrey A and Parent, Olivier}, booktitle={Handbook of Regional Science}, pages={1571--1595}, year={2014}, publisher={Springer} } @article{andrews2013prior, title={Prior approval: The growth of Bayesian methods in psychology}, author={Andrews, Mark and Baguley, Thom}, journal={British Journal of Mathematical and Statistical Psychology}, volume={66}, number={1}, pages={1--7}, year={2013}, publisher={Wiley Online Library} } @article{kruschke2012time, title={The time has come: Bayesian methods for data analysis in the organizational sciences}, author={Kruschke, John K and Aguinis, Herman and Joo, Harry}, journal={Organizational Research Methods}, volume={15}, number={4}, pages={722--752}, year={2012}, publisher={Sage Publications Sage CA: Los Angeles, CA} } @inproceedings{snoek2012practical, title={Practical bayesian optimization of machine learning algorithms}, author={Snoek, Jasper and Larochelle, Hugo and Adams, Ryan P}, booktitle={Advances in neural information processing systems}, pages={2951--2959}, year={2012} } @article{wagenmakers2010bayesian, title={Bayesian hypothesis testing for psychologists: A tutorial on the Savage--Dickey method}, author={Wagenmakers, Eric-Jan and Lodewyckx, Tom and Kuriyal, Himanshu and Grasman, Raoul}, journal={Cognitive psychology}, volume={60}, number={3}, pages={158--189}, year={2010}, publisher={Elsevier} } @article{morey2011bayesinterval, title={Bayes factor approaches for testing interval null hypotheses}, author={Morey, Richard D and Rouder, Jeffrey N}, journal={Psychological methods}, volume={16}, number={4}, pages={406}, year={2011}, publisher={American Psychological Association} } @article{clyde2011bayesian, title={Bayesian adaptive sampling for variable selection and model averaging}, author={Clyde, Merlise A and Ghosh, Joyee and Littman, Michael L}, journal={Journal of Computational and Graphical Statistics}, volume={20}, number={1}, pages={80--101}, year={2011}, publisher={Taylor \& Francis} } @article{kruschke2010believe, title={What to believe: Bayesian methods for data analysis}, author={Kruschke, John K}, journal={Trends in cognitive sciences}, volume={14}, number={7}, pages={293--300}, year={2010}, publisher={Elsevier} } @article{wagenmakers2007practical, title={A practical solution to the pervasive problems ofp values}, author={Wagenmakers, Eric-Jan}, journal={Psychonomic bulletin \& review}, volume={14}, number={5}, pages={779--804}, year={2007}, publisher={Springer} } @book{jeffreys1998theory, title={The theory of probability}, author={Jeffreys, Harold}, year={1998}, publisher={OUP Oxford} } @article{kirk1996practical, title={Practical significance: A concept whose time has come}, author={Kirk, Roger E}, journal={Educational and psychological measurement}, volume={56}, number={5}, pages={746--759}, year={1996}, publisher={Sage Publications Sage CA: Thousand Oaks, CA} } @article{cohen1988statistical, title={Statistical power analysis for the social sciences}, author={Cohen, Jacob}, year={1988}, publisher={Hillsdale, NJ: Erlbaum} } @article{rouder2012default, title={Default Bayes factors for ANOVA designs}, author={Rouder, Jeffrey N and Morey, Richard D and Speckman, Paul L and Province, Jordan M}, journal={Journal of Mathematical Psychology}, volume={56}, number={5}, pages={356--374}, year={2012}, publisher={Elsevier} } @article{wagenmakers2018SI, title={The Support Interval}, author={Wagenmakers, Eric-Jan and Gronau, Quentin Frederik and Dablander, Fabian and Etz, Alexander}, year={2018}, publisher={PsyArXiv}, url = {https://psyarxiv.com/zwnxb/}, doi = {10.31234/osf.io/zwnxb} } @article{rouder2018bayesian, title={Bayesian inference for psychology, part IV: Parameter estimation and Bayes factors}, author={Rouder, Jeffrey N and Haaf, Julia M and Vandekerckhove, Joachim}, journal={Psychonomic bulletin \& review}, volume={25}, number={1}, pages={102--113}, year={2018}, publisher={Springer} } @article{van2019cautionary, title={A cautionary note on estimating effect size}, author={van den Bergh, Don and Haaf, Julia M and Ly, Alexander and Rouder, Jeffrey N and Wagenmakers, Eric-Jan}, year={2019}, publisher={PsyArXiv} }bayestestR/vignettes/bayes_factors.Rmd0000644000176200001440000012420413620150172017677 0ustar liggesusers--- title: "Bayes Factors" output: github_document: toc: true fig_width: 10.08 fig_height: 6 rmarkdown::html_vignette: toc: true toc_depth: 2 fig_width: 10.08 fig_height: 6 tags: [r, bayesian, bayes factors] vignette: > \usepackage[utf8]{inputenc} %\VignetteIndexEntry{Bayes Factors} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console bibliography: bibliography.bib csl: apa.csl --- This vignette can be referred to by citing the following: - Makowski, D., Ben-Shachar, M. S., \& Lüdecke, D. (2019). *bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework*. Journal of Open Source Software, 4(40), 1541. https://doi.org/10.21105/joss.01541 - Makowski, D., Ben-Shachar, M. S., Chen, S. H. A., \& Lüdecke, D. (2019). *Indices of Effect Existence and Significance in the Bayesian Framework*. Retrieved from [10.3389/fpsyg.2019.02767](https://doi.org/10.3389/fpsyg.2019.02767) --- ```{r setup, include=FALSE} if (!requireNamespace("rstanarm", quietly = TRUE) || !requireNamespace("BayesFactor", quietly = TRUE) || !requireNamespace("emmeans", quietly = TRUE) || !requireNamespace("logspline", quietly = TRUE) || !requireNamespace("lme4", quietly = TRUE) || !requireNamespace("ggplot2", quietly = TRUE) || !requireNamespace("see", quietly = TRUE) ) { knitr::opts_chunk$set(eval = FALSE) } else { library(knitr) library(insight) library(bayestestR) library(rstanarm) library(BayesFactor) library(emmeans) library(ggplot2) library(see) options(knitr.kable.NA = '', digits = 2) opts_chunk$set(echo = TRUE, comment = ">", message = FALSE, warning = FALSE, dpi = 150) theme_set(theme_modern()) set.seed(4) } ``` The adoption of the Bayesian framework for applied statistics, especially in the social and psychological sciences, seems to be developing in two distinct directions. One of the key topics marking their separation is their opinion about **the Bayes factor**. In short, some authors (e.g., the "Amsterdam school", led by [Wagenmakers](https://www.bayesianspectacles.org/)) advocate its use and emphasize its qualities as a statistical index, while others point to its limits and prefer, instead, the precise description of posterior distributions (using [CIs](https://easystats.github.io/bayestestR/reference/hdi.html), [ROPEs](https://easystats.github.io/bayestestR/reference/rope.html), etc.). **bayestestR** does not take a side in this debate, rather offering tools to help you in whatever analysis you want to achieve. Instead, it strongly supports the notion of an *informed choice:* **discover the methods, try them, understand them, learn about them, and decide for yourself**. Having said that, here's an introduction to Bayes factors :) # The Bayes Factor **Bayes factors (BFs) are indices of *relative* evidence of one "model" over another**, which can be used in the Bayesian framework as alternatives to classical (frequentist) hypothesis testing indices (such as $p-values$). According to Bayes' theorem, we can update prior probabilities of some model $M$ ($P(M)$) to posterior probabilities ($P(M|D)$) after observing some datum $D$ by accounting for the probability of observing that datum given the model ($P(D|M)$, also known as the *likelihood*): $$ P(M|D) = \frac{P(D|M)\times P(M)}{P(D)} $$ Using this equation, We can compare the probability-odds of two models: $$ \frac{P(M_1|D)}{P(M_2|D)} = \frac{P(D|M_1)}{P(D|M_2)} \times \frac{P(M_1)}{P(M_2)} $$ Where the left-most term are the *posterior odds*, the right-most term are the *prior odds*, and the middle term is the *Bayes factor*: $$ BF_{12}=\frac{P(D|M_1)}{P(D|M_2)} $$ Thus, Bayes factors can be seen either as a ratio quantifying ***the relative probability of some observed data by two models*** as they can be computed by comparing the marginal likelihoods of the two models, or as ***the degree by which some prior beliefs about the relative credibility of two models are to be updated*** as they can be computed by dividing posterior odds by prior odds, as we will soon demonstrate. Here we provide functions for computing Bayes factors in two different applications: **testing single parameters (coefficients) within a model** and **comparing statistical models themselves**. ## Testing Models' Parameters with Bayes Factors {#bayesfactor_parameters} A ***Bayes factor for a single parameter*** can be used to answer the question: > **Given the observed data, has the null hypothesis of an absence of an effect become more, or less credible?** ```{r deathsticks_fig, echo=FALSE, fig.cap="Bayesian analysis of the Students' (1908) Sleep data set.", fig.align='center', out.width="80%"} knitr::include_graphics("https://github.com/easystats/easystats/raw/master/man/figures/bayestestR/deathsticks.jpg") ``` Let's use the Students' (1908) Sleep data set (`data("sleep")`), in which **people took some drug** and where the researchers measured the **extra hours of sleep** that they slept afterwards. We will try answering the following question: *given the observed data, has the hypothesis that the drug (the effect of `group`) **has no effect** on the numbers of hours of **extra sleep** (variable `extra`) become more of less credible?* ```{r sleep_boxplot, echo=FALSE} ggplot(sleep, aes(x = group, y = extra, fill= group)) + geom_boxplot() + theme_classic() ``` The **bloxplot** suggests that the second group has a higher number of hours of extra sleep. *By how much?* Let's fit a simple [Bayesian linear model](https://easystats.github.io/bayestestR/articles/example1_GLM.html), with a prior of $b_{group} \sim N(0, 3)$: ```{r rstanarm_model, eval = FALSE} library(rstanarm) model <- stan_glm(extra ~ group, data = sleep, prior = normal(0, 3, autoscale = FALSE)) ``` ```{r, echo=FALSE} model <- stan_glm(extra ~ group, data = sleep, prior = normal(0, 3, autoscale = FALSE), refresh = 0) ``` ### Testing against a null-*region* One way of operationlizing the null-hypothesis is by setting a null region, such that an effect that falls within this interval would be practically equivalent to the the null [@kruschke2010believe]. In our case, that means defining a range of effects we would consider equal to the drug having no effect at all. We can then compute the prior probability of the drug's effect falling *within this null-region*, and the prior probability of the drug's effect falling *outside the null-region* to get our *prior odds*. Say any effect smaller than an hour of extra sleep is practically equivalent to being no effect at all, we would define our prior odds as: $$ \frac {P(b_{drug} \in [-1, 1])} {P(b_{drug} \notin [-1, 1])} $$ Given our prior has a normal distribution centered at 0 hours with a scale (an SD) of 2.5 hours, our priors would look like this: ```{r, echo=FALSE} null <- c(-1,1) xrange <- c(-10,10) x_vals <- seq(xrange[1], xrange[2], length.out = 400) d_vals <- dnorm(x_vals, sd = 3) in_null <- null[1] < x_vals & x_vals < null[2] range_groups <- rep(0, length(x_vals)) range_groups[!in_null & x_vals < 0] <- -1 range_groups[!in_null & x_vals > 0] <- 1 ggplot(mapping = aes(x_vals, d_vals, fill = in_null, group = range_groups)) + geom_area(color = "black", size = 1) + scale_fill_flat(name = "", labels = c("Alternative", "Null")) + labs(x = "Drug effect", y = "Density") + theme_modern() + theme(legend.position = c(0.2, 0.8)) pnull <- diff(pnorm(null, sd = 2.5)) prior_odds <- (1 - pnull) / pnull ``` and the prior odds would be 2.2. By looking at the posterior distribution, can now compute the posterior probability of the drug's effect falling *within the null-region*, and the posterior probability of the drug's effect falling *outside the null-region* to get our *posterior odds*: $$ \frac {P(b_{drug} \in [-1,1] | Data)} {P(b_{drug} \notin [-1,1] | Data)} $$ ```{r rstanarm_fit, echo=FALSE} model_prior <- bayestestR:::.update_to_priors.stanreg(model) posterior <- insight::get_parameters(model)$group2 prior <- insight::get_parameters(model_prior)$group2 f_post <- logspline::logspline(posterior) d_vals_post <- logspline::dlogspline(x_vals,f_post) ggplot(mapping = aes(x_vals, d_vals_post, fill = in_null, group = range_groups)) + geom_area(color = "black", size = 1) + scale_fill_flat(name = "", labels = c("Alternative", "Null")) + labs(x = "Drug effect", y = "Density") + theme_modern() + theme(legend.position = c(0.2, 0.8)) My_first_BF <- bayesfactor_parameters(model, null = c(-1,1), prior = model_prior) BF <- My_first_BF$BF[2] post_odds <- prior_odds * BF med_post <- point_estimate(posterior)$Median ``` We can see that the center of the posterior distribution has shifted away from 0 (to ~1.5). Likewise, the posterior odds are 2 - which seems to favor **the effect being non-null**, but... *does this mean the data support the alternative over the null?* Hard to say, since even before the data were observed, the priors already favored the alternative - so we need to take our priors into account here! Let's compute the Bayes factor as the change from the prior odds to the posterior odds: $BF_{10} = Odds_{posterior} / Odds_{prior} = 0.9$! This BF indicates that the data provide 1/0.9 = 1.1 times more evidence for the effect of the drug being practically nothing than it does for the drug having some clinically significant effect. Thus, although the center of distribution has shifted away from 0, and the posterior distribution seems to favor a non-null effect of the drug, it seems that given the observed data, the probability mass has *overall* shifted closer to the null interval, making the values in the null interval more probable! [see *Non-overlapping Hypotheses* in @morey2011bayesinterval] Note that **interpretation guides** for Bayes factors can be found [**here**](https://easystats.github.io/report/articles/interpret_metrics.html#bayes-factor-bf). All of this can be achieved with the function `bayesfactor_parameters()`, which computes a Bayes factor for each of the model's parameters: ```{r, eval=FALSE} My_first_BF <- bayesfactor_parameters(model, null = c(-1, 1)) My_first_BF ``` ```{r, echo=FALSE} print(My_first_BF) ``` We can also plot using the `see` package: ```{r} library(see) plot(My_first_BF) ``` ### Testing against the *point*-null (0) **What if we don't know what region would be practically equivalent to 0?** Or if we just want the null to be exactly zero? Not a problem - as the width of null region shrinks to a point, the change from the prior probability to the posterior probability of the null can be estimated by comparing the the density of the null value between the two distributions.^[Note that as the width of null interval shrinks to zero, the prior probability and posterior probability of the alternative tends towards 1.00.] This ratio is called the **Savage-Dickey ratio**, and has the added benefit of also being an approximation of a Bayes factor comparing the estimated model against the a model in which the parameter of interest has been restricted to a point-null: > "[...] the Bayes factor for $H_0$ versus $H_1$ could be obtained by analytically integrating out the model parameter $\theta$. However, the Bayes factor may likewise be obtained by only considering $H_1$, and dividing the height of the posterior for $\theta$ by the height of the prior for $\theta$, at the point of interest." [@wagenmakers2010bayesian] ```{r, eval=FALSE} My_second_BF <- bayesfactor_parameters(model, null = 0) My_second_BF ``` ```{r, echo=FALSE} My_second_BF <- bayesfactor_parameters( data.frame(group2 = posterior), data.frame(group2 = prior), null = 0) print(My_second_BF) ``` ```{r} plot(My_second_BF) ``` ### One-sided tests We can also conduct a directional test (a "one sided" or "one tailed" test) if we have a prior hypotheses about the direction of the effect. This can be done by setting an order restriction on the prior distribution (and thus also on the posterior distribution) of the alternative [@morey2014simple]. For example, if we have a prior hypothesis that the effect of the drug is an *increase* in the number of sleep hours, the alternative will be restricted to the region to the right of the null (point or interval): ```{r savagedickey_one_sided, eval=FALSE} test_group2_right <- bayesfactor_parameters(model, direction = ">") test_group2_right ``` ```{r prior_n_post_plot_one_sided, echo=FALSE} test_group2_right <- bayesfactor_parameters( data.frame(group2 = posterior), data.frame(group2 = prior), null = 0, direction = ">" ) BF <- test_group2_right$BF print(test_group2_right) ``` ```{r} plot(test_group2_right) ``` As we can see, given that we have an *a priori* assumption about the direction of the effect (*that the effect is positive*), **the presence of an effect is 2.8 times more likely than the absence of an effect**, given the observed data (or that the data are 2.8 time more probable under $H_1$ than $H_0$). This indicates that, given the observed data, and a priori hypothesis, the posterior mass has shifted away from the null value, giving some evidence against the null (note that a Bayes factor of 2.8 is still considered quite [weak evidence](https://easystats.github.io/report/articles/interpret_metrics.html#bayes-factor-bf)). **NOTE**: See the *Testing Contrasts* appendix below. ### Support intervals {#si} So far we've seen that Bayes factors quantify relative support between competing hypotheses. However, we can also ask: > **Upon observing the data, the credibility of which of the parameter’s values has increased (or decreased)?** For example, we've seen that the point null has become somewhat less credible after observing the data, but we might also ask *which values have gained some credibility given the observed data?*. The resulting range of values is called **the support interval** as it indicates which values are supported by the data [@wagenmakers2018SI]. We can do this by once again comparing the prior and posterior distributions and checking where the posterior densities are higher than the prior densities. This can be achieved with the `si()` function: ```{r, eval=FALSE} my_first_si <- si(model, BF = 1) my_first_si ``` ```{r, echo=FALSE} my_first_si <- si(data.frame(group2 = posterior), data.frame(group2 = prior), BF = 1) print(my_first_si) ``` The argument `BF = 1` indicates that we want the interval to contain values that have gained support by a factor of at least 1 (that is, any support at all). Visually, we can see that the credibility of all the values within this interval has increased (and likewise the credibility of all the values outside this interval has decreased): ```{r} plot(my_first_si) ``` We can also see the this support interval (just barely) excludes the point null (0) - whose credibility we've already seen has decreased by the observed data. This emphasizes the relationship between the support interval and the Bayes factor: > "The interpretation of such intervals would be analogous to how a frequentist confidence interval contains all the parameter values that would not have been rejected if tested at level $\alpha$. For instance, a BF = 1/3 support interval encloses all values of theta for which the updating factor is not stronger than 3 against." [@wagenmakers2018SI] Thus, the choice of BF (the level of support the interval should indicate) depends on what we want our interval to represent: - A $BF = 1$ contains values whose credibility has merely not decreased by observing the data. - A $BF > 1$ contains values who received more impressive support from the data. - A $BF < 1$ contains values whose credibility has *not* been impressively decreased by observing the data. Testing against values outside this interval will produce a Bayes factor larger than $1/BF$ in support of the alternative. ## Comparing Models using Bayes Factors {#bayesfactor_models} Bayes factors can also be used to compare statistical models, for which they answer the question: > **Under which model are the the observed data more probable?** In other words, which model is more likely to have produced the observed data? This is usually done by comparing the marginal likelihoods of two models. In such a case, the Bayes factor is a measure of the *relative* evidence of one of the compared models over the other. Let's use Bayes factors for model comparison to find a model that best describes the length of an iris' sepal using the `iris` data set. ### For Bayesian models (`brms` and `rstanarm`) **Note: In order to compute Bayes factors for Bayesian models, non-default arguments must be added upon fitting:** - `brmsfit` models **must** have been fitted with `save_all_pars = TRUE` - `stanreg` models **must** have been fitted with a defined `diagnostic_file`. Let's first fit 5 Bayesian regressions with `brms` to predict `Sepal.Length`: ```{r brms_disp, eval=FALSE} library(brms) m0 <- brm(Sepal.Length ~ 1, # intercept only model data = iris, save_all_pars = TRUE) m1 <- brm(Sepal.Length ~ Petal.Length, data = iris, save_all_pars = TRUE) m2 <- brm(Sepal.Length ~ Species, data = iris, save_all_pars = TRUE) m3 <- brm(Sepal.Length ~ Species + Petal.Length, data = iris, save_all_pars = TRUE) m4 <- brm(Sepal.Length ~ Species * Petal.Length, data = iris, save_all_pars = TRUE) ``` We can now compare these models with the `bayesfactor_models()` function, using the `denominator` argument to specify which model all models will be compared against (in this case, the intercept-only model): ```{r brms_models_disp, eval=FALSE} library(bayestestR) comparison <- bayesfactor_models(m1, m2, m3, m4, denominator = m0) comparison ``` ```{r brms_models_print, echo=FALSE} comparison <- structure( list( Model = c( "Petal.Length", "Species", "Species + Petal.Length", "Species * Petal.Length", "1" ), BF = c(3.44736e+44, 5.628679e+29, 7.121386e+55, 9.149948e+55, 1) ), class = c("bayesfactor_models", "see_bayesfactor_models", "data.frame"), row.names = c(NA, -5L), denominator = 5L, BF_method = "marginal likelihoods (bridgesampling)", unsupported_models = FALSE ) print(comparison) ``` We can see that the full model is the best model - with $BF_{\text{m0}}=9\times 10^{55}$ compared to the null (intercept only). Due to the transitive property of Bayes factors, we can easily change the reference model to the main effects model: ```{r update_models1} update(comparison, reference = 3) ``` As we can see, though the full model is the best, there is hardly any evidence that it is preferable to the main effects model. We can also change the reference model to the `Species` model: ```{r update_models2} update(comparison, reference = 2) ``` Notice that in the Bayesian framework the compared models *do not* need to be nested models, as happened here when we compared the `Petal.Length`-only model to the `Species`-only model (something that cannot be done in the frequentists framework, where compared models must be nested in one another). **NOTE:** In order to correctly and precisely estimate Bayes Factors, you always need the 4 P's: **P**roper **P**riors ^[[Robert, 2016](https://doi.org/10.1016/j.jmp.2015.08.002); [Kass & Raftery, 1993](https://doi.org/10.1080/01621459.1995.10476572); [Fernández, Ley, & Steel, 2001](https://doi.org/10.1016/S0304-4076(00)00076-2)], and a **P**lentiful **P**osterior ^[[Gronau, Wagenmakers, Heck, & Matzke, 2019](https://doi.org/10.1007/s11336-018-9648-3)]. ### For Frequentist models via the BIC approximation It is also possible to compute Bayes factors for the comparison of frequentist models. This is done by comparing BIC measures, allowing a Bayesian comparison of non-nested frequentist models [@wagenmakers2007practical]. Let's try it out on some **linear mixed models**: ```{r lme4_models, eval=FALSE} library(lme4) m0 <- lmer(Sepal.Length ~ (1 | Species), data = iris) m1 <- lmer(Sepal.Length ~ Petal.Length + (1 | Species), data = iris) m2 <- lmer(Sepal.Length ~ Petal.Length + (Petal.Length | Species), data = iris) m3 <- lmer(Sepal.Length ~ Petal.Length + Petal.Width + (Petal.Length | Species), data = iris) m4 <- lmer(Sepal.Length ~ Petal.Length * Petal.Width + (Petal.Length | Species), data = iris) bayesfactor_models(m1, m2, m3, m4, denominator = m0) ``` ```{r, echo=FALSE} structure(list(Model = c( "Petal.Length + (1 | Species)", "Petal.Length + (Petal.Length | Species)", "Petal.Length + Petal.Width + (Petal.Length | Species)", "Petal.Length * Petal.Width + (Petal.Length | Species)", "1 + (1 | Species)"), BF = c(8.24027869011648e+24, 4.7677519818206e+23, 1.52492156042604e+22, 5.93045520305254e+20, 1)), class = c("bayesfactor_models", "see_bayesfactor_models", "data.frame"), row.names = c(NA, -5L), denominator = 5L, BF_method = "BIC approximation", unsupported_models = FALSE) ``` ### Order restricted models {#bayesfactor_restricted} As stated above when discussing one-sided hypothesis tests, we can create new models by imposing order restrictions on a given model. For example, consider the following model, in which we predict the length of an iris' sepal from the length of its petal, as well as from its species, with a prior of $b_{petal} \sim N(0,2)$ $b_{versicolors}\ \&\ b_{virginica} \sim N(0,1.2)$: ```{r, eval=FALSE} iris_model <- stan_glm(Sepal.Length ~ Species + Petal.Length, data = iris, prior = normal(0, c(2, 1.2, 1.2), autoscale = FALSE)) ``` ```{r, echo=FALSE} iris_model <- stan_glm(Sepal.Length ~ Species + Petal.Length, data = iris, prior = normal(0, c(2, 1.2, 1.2), autoscale = FALSE), refresh = 0) ``` These priors are **unrestricted** - that is, all values between $-\infty$ and $\infty$ of all parameters in the model have some non-zero credibility (no matter how small; this is true for both the prior and posterior distribution). Subsequently, *a priori* the ordering of the parameters relating to the iris species can have any ordering, such that (a priori) setosa can have larger sepals than virginica, but it is also possible for virginica to have larger sepals than setosa! Does it make sense to let our priors cover all of these possibilities? That depends on our *prior* knowledge or hypotheses. For example, even a novice botanist will assume that it is unlikely that petal length will be *negatively* associated with sepal length - an iris with longer petals is likely larger, and thus will also have a longer sepal. And an expert botanist will perhaps assume that setosas have smaller sepals than both versicolors and virginica. These priors can be formulated as **restricted** priors [@morey_2015_blog; @morey2011bayesinterval]: 1. The novice botanist: $b_{petal} > 0$ 2. The expert botanist: $b_{versicolors} > 0\ \&\ b_{virginica} > 0$ By testing these restrictions on prior and posterior samples, we can see how the probabilities of the restricted distributions change after observing the data. This can be achieved with `bayesfactor_restricted()`, that compute a Bayes factor for these restricted model vs the unrestricted model. Let's first specify these restrictions as logical conditions: ```{r} botanist_hypotheses <- c( "Petal.Length > 0", "(Speciesversicolor > 0) & (Speciesvirginica > 0)" ) ``` Let's test these hypotheses: ```{r, eval=FALSE} botanist_BFs <- bayesfactor_restricted(iris_model, hypothesis = botanist_hypotheses) botanist_BFs ``` ```{r, echo=FALSE} model_prior <- bayestestR:::.update_to_priors.stanreg(iris_model) botanist_BFs <- bayesfactor_restricted(iris_model, prior = model_prior, hypothesis = botanist_hypotheses) print(botanist_BFs) ``` We can see that the novice botanist's hypothesis gets a Bayes factor of ~2, indicating the data provides twice as much evidence for a model in which petal length is restricted to be positively associated with sepal length than for a model with not such restriction. What about our expert botanist? He seems to have failed miserably, with a BF favoring the *unrestricted* model many many times over ($BF\gg1,000$). How is this possible? It seems that when *controlling for petal length*, versicolor and virginica actually have shorter sepals! ```{r plot_iris, echo=FALSE} ggplot(iris, aes(Petal.Length, Sepal.Length, color = Species)) + geom_point() + scale_color_flat() + theme(legend.position = c(0.2, 0.8)) ``` Note that these Bayes factors compare the restricted model to the unrestricted model. If we wanted to compare the restricted model to the null model, we could use the transitive property of Bayes factors like so: $$ BF_{restricted / NULL} = \frac {BF_{restricted / un-restricted}} {BF_{un-restricted / NULL}} $$ **Because these restrictions are on the prior distribution, they are only appropriate for testing pre-planned (*a priori*) hypotheses, and should not be used for any post hoc comparisons [@morey_2015_blog].** **NOTE**: See the *Specifying Correct Priors for Factors with More Than 2 Levels* appendix below. ## Bayesian Model Averaging In the previous section we discussed the direct comparison of two models to determine if an effect is supported by the data. However, in many cases there are too many models to consider or perhaps it is not straightforward which models we should compare to determine if an effect is supported by the data. For such cases we can use Bayesian model averaging (BMA) to determine the support provided by the data for a parameter or term across many models. ### Inclusion Bayes factors {#bayesfactor_inclusion} Inclusion Bayes factors answer the question: > **Are the observed data more probable under models with a particular predictor, than they are under models without that particular predictor?** In other words, on average - are models with predictor $X$ more likely to have produced the observed data than models without predictor $X$?^[A model without predictor $X$ can be thought of as a model in which the parameter(s) of the predictor have been restricted to a null-point of 0.] Since each model has a prior probability, it is possible to sum the prior probability of all models that include a predictor of interest (the *prior inclusion probability*), and of all models that do not include that predictor (the *prior exclusion probability*). After the data are observed, and each model is assigned a posterior probability, we can similarly consider the sums of the posterior models' probabilities to obtain the *posterior inclusion probability* and the *posterior exclusion probability*. Once again, the change from prior inclusion odds to the posterior inclusion odds is the Inclusion Bayes factor ["$BF_{Inclusion}$"; @clyde2011bayesian]. Lets use the `brms` example from above: ```{r inclusion_brms} bayesfactor_inclusion(comparison) ``` If we examine the interaction term's inclusion Bayes factor, we can see that across all 5 models, a model with the interaction term (`Species:Petal.Length`) is *on average* 5 times more likely than a model without the interaction term. **Note** that `Species`, a factor represented in the model with several parameters, gets a single Bayes factor - inclusion Bayes factors are given per predictor! We can also compare only matched models - such that averaging is done only across models that (1) do not include any interactions with the predictor of interest; (2) for interaction predictors, averaging is done only across models that contain the main effect from which the interaction predictor is comprised (see explanation for why you might want to do this [here](https://www.cogsci.nl/blog/interpreting-bayesian-repeated-measures-in-jasp)). ```{r inclusion_brms2} bayesfactor_inclusion(comparison, match_models = TRUE) ``` #### Comparison with JASP `bayesfactor_inclusion()` is meant to provide Bayes Factors per predictor, similar to JASP's *Effects* option. Let's compare the two: 1. Across all models: ```{r JASP_all} library(BayesFactor) data(ToothGrowth) ToothGrowth$dose <- as.factor(ToothGrowth$dose) BF_ToothGrowth <- anovaBF(len ~ dose*supp, ToothGrowth, progress = FALSE) bayesfactor_inclusion(BF_ToothGrowth) ``` ```{r JASP_all_fig, echo=FALSE} knitr::include_graphics("https://github.com/easystats/easystats/raw/master/man/figures/bayestestR/JASP1.PNG") ``` 2. Across matched models: ```{r JASP_matched} bayesfactor_inclusion(BF_ToothGrowth, match_models = TRUE) ``` ```{r JASP_matched_fig, echo=FALSE} knitr::include_graphics("https://github.com/easystats/easystats/raw/master/man/figures/bayestestR/JASP2.PNG") ``` 3. With Nuisance Effects: We'll add `dose` to the null model in JASP, and do the same in `R`: ```{r JASP_Nuisance} BF_ToothGrowth_against_dose <- BF_ToothGrowth[3:4]/BF_ToothGrowth[2] # OR: # update(bayesfactor_models(BF_ToothGrowth), # subset = c(4, 5), # reference = 3) BF_ToothGrowth_against_dose bayesfactor_inclusion(BF_ToothGrowth_against_dose) ``` ```{r JASP_Nuisance_fig, echo=FALSE} knitr::include_graphics("https://github.com/easystats/easystats/raw/master/man/figures/bayestestR/JASP3.PNG") ``` ### Averaging posteriors {#weighted_posteriors} Similar to how we can average evidence for a predictor across models, we can also average the posterior estimate across models. One situation in which this is useful in **situations where Bayes factors seem to support a null effect, yet the *HDI* of the alternative excludes the null value** (also see `si()` described above). For example, looking at Motor *Trend Car Road Tests* (`data(mtcars)`), we would naturally predict miles/gallon (`mpg`) from transition type (`am`) and weight (`wt`), but what about number of carburetors (`carb`)? Is this a good predictor? We can determine this by comparing the following models: ```{r, eval=FALSE} mod <- stan_glm(mpg ~ wt + am, data = mtcars, prior = normal(0, c(10,10), autoscale = FALSE), diagnostic_file = file.path(tempdir(), "df1.csv")) mod_carb <- stan_glm(mpg ~ wt + am + carb, data = mtcars, prior = normal(0, c(10,10,20), autoscale = FALSE), diagnostic_file = file.path(tempdir(), "df0.csv")) bayesfactor_models(mod_carb, denominator = mod) ``` ```{r, echo=FALSE} mod <- stan_glm(mpg ~ wt + am, data = mtcars, prior = normal(0, c(10,10), autoscale = FALSE), diagnostic_file = file.path(tempdir(), "df1.csv"), refresh = 0) mod_carb <- stan_glm(mpg ~ wt + am + carb, data = mtcars, prior = normal(0, c(10,10,20), autoscale = FALSE), diagnostic_file = file.path(tempdir(), "df0.csv"), refresh = 0) BF_carb <- bayesfactor_models(mod_carb, denominator = mod, verbose = FALSE) BF <- BF_carb$BF[1] print(BF_carb) ``` It seems that the model without `carb` as a predictor is $1/BF=1.2$ times more likely than the model *with* `carb` as a predictor. We might then assume that in the latter model, the HDI will include the point-null value of 0 effect, to also indicate the credibility of the null in the posterior. However, this is not the case: ```{r} hdi(mod_carb, ci = .95) ``` How can this be? By estimating the HDI of the effect for `carb` in the full model, we are acting under the assumption that that model is correct. However, as we've just seen, both models are practically tied, and in fact it was the no-`carb` model, in which the effect for `carb` is fixed at 0, that was slightly more supported by the data. If this is the case **why limit our estimation of the effect just to one model?** [@van2019cautionary]. Using Bayesian model averaging, we can combine the posteriors samples from several models, weighted by the models' marginal likelihood (done via the `bayesfactor_models()` function). If some parameter is part of some of the models but is missing from others, it is assumed to be fixed a 0 (which can also be seen as a method of applying shrinkage to our estimates). This results in a posterior distribution across several models, which we can now treat like any posterior distribution, and estimate the HDI. We can do this with the `weighted_posteriors()` function: ```{r} BMA_draws <- weighted_posteriors(mod, mod_carb) BMA_hdi <- hdi(BMA_draws, ci = .95) BMA_hdi plot(BMA_hdi) ``` We can see that across both models under consideration, the posterior of the `carb` effect is almost equally weighted between the alternative model and the null model - as represented by about half of the posterior mass concentrated at 0 - which makes sense as both models were almost equally supported by the data. We can also see that across both models, that now **the HDI does contain 0**. Thus we have resolved the conflict between the Bayes factor and the HDI [@rouder2018bayesian]! **Note** that parameters might play different roles across different models; For example, the parameter `A` plays a different role in the model `Y ~ A + B` (where it is a main effect) than it does in the model `Y ~ A + B + A:B` (where it is a simple effect). In many cases centering of predictors (mean subtracting for continuous variables, and effects coding via `contr.sum` or orthonormal coding via [`contr.bayes`](https://easystats.github.io/bayestestR/reference/contr.bayes.html) for factors) can in some cases reduce this issue. ## Appendices ### Testing contrasts (with `emmeans` / `modelbased`) Besides testing parameter `bayesfactor_parameters()` can be used to test any estimate based on the prior and posterior distribution of the estimate. One way to achieve this is with a mix of `bayesfactor_parameters()` + [**`emmeans`**](https://cran.r-project.org/package=emmeans) to [test Bayesian contrasts](https://easystats.github.io/blog/posts/bayestestr_emmeans/). For example, in the `sleep` example from above, we can estimate the group means and the difference between them: ```{r, echo=FALSE} set.seed(1) ``` ```{r} library(emmeans) groups <- emmeans(model, ~ group) group_diff <- pairs(groups) (groups_all <- rbind(groups, group_diff)) # pass the original model via prior bayesfactor_parameters(groups_all, prior = model) ``` That is strong evidence for the mean of group 1 being 0, and for group 2 for not being 0, but hardly any evidence for the difference between them being not 0. Conflict? Uncertainty? That is the Bayesian way! We can also use the `easystats`' [**`modelbased`**](https://cran.r-project.org/package=modelbased) package to compute Bayes factors for contrasts: ```{r, echo=FALSE} set.seed(1) ``` ```{r} library(modelbased) estimate_contrasts(model, test = "bf") ``` **NOTE**: See the *Specifying Correct Priors for Factors with More Than 2 Levels* section below. ### Specifying correct priors for factors {#contr_bayes} This section introduces the biased priors obtained when using the common *effects* factor coding (`contr.sum`) or dummy factor coding (`contr.treatment`), and the solution of using orthonormal factor coding (`contr.bayes`) [as outlined in @rouder2012default, section 7.2]. Specifically, ***special care should be taken when working with factors which have 3 or more levels***. #### Contrasts (and marginal means) The *effects* factor coding commonly used in factorial analysis carries a hidden bias when it is applies to Bayesian priors. For example, if we want to test all pairwise differences between 3 levels of the same factor, we would expect all *a priori* differences to have the same distribution, but... For our example, we will be test all ***prior*** pairwise differences between the 3 species in the `iris` data-set. ```{r, eval=FALSE} contrasts(iris$Species) <- contr.sum fit_sum <- stan_glm(Sepal.Length ~ Species, data = iris, prior = normal(0, c(1, 1), autoscale = FALSE), prior_PD = TRUE, # sample priors family = gaussian()) pairs_sum <- pairs(emmeans(fit_sum, ~ Species)) pairs_sum ``` ```{r, echo=FALSE} contrasts(iris$Species) <- contr.sum fit_sum <- stan_glm(Sepal.Length ~ Species, data = iris, prior = normal(0, c(1, 1), autoscale = FALSE), prior_PD = TRUE, # sample priors family = gaussian(), refresh = 0) pairs_sum <- pairs(emmeans(fit_sum, ~ Species)) em_pairs_samples <- as.data.frame(as.matrix(emmeans::as.mcmc.emmGrid(pairs_sum, names = FALSE))) print(pairs_sum) ggplot(stack(em_pairs_samples), aes(x = values, fill = ind)) + geom_density(size = 1) + facet_grid(ind ~ .) + labs(x = "prior difference values") + theme(legend.position = "none") ``` We can see that the though the prior estimate for all 3 pairwise contrasts is ~0, the scale / HDI is much more narrow for the prior of the `setosa - versicolor` contrast! ***What happened???*** This is caused by an inherent bias in the priors introduced by the *effects* coding (it's even worse with the default treatment coding, because the prior for the intercept is usually drastically different from the effect's parameters). **And since it affects the priors, this bias will also bias the the Bayes factors over / understating evidence for some contrasts over others!** The solution is to use *orthonormal* factor coding, a-la `contr.bayes`, which can either specify this factor coding per-factor: ```{r} contrasts(iris$Species) <- contr.bayes ``` Or you can set it globally: ```{r, eval=FALSE} options(contrasts = c('contr.bayes', 'contr.poly')) ``` Let's again estimate the ***prior*** differences: ```{r, eval=FALSE} contrasts(iris$Species) <- contr.bayes fit_bayes <- stan_glm(Sepal.Length ~ Species, data = iris, prior = normal(0, c(1, 1), autoscale = FALSE), prior_PD = TRUE, # sample priors family = gaussian()) pairs_bayes <- pairs(emmeans(fit_bayes, ~ Species)) pairs_bayes ``` ```{r, echo=FALSE} contrasts(iris$Species) <- contr.bayes fit_bayes <- stan_glm(Sepal.Length ~ Species, data = iris, prior = normal(0, c(1, 1), autoscale = FALSE), prior_PD = TRUE, # sample priors family = gaussian(), refresh = 0) pairs_bayes <- pairs(emmeans(fit_bayes, ~ Species)) em_pairs_samples <- as.data.frame(as.matrix(emmeans::as.mcmc.emmGrid(pairs_bayes, names = FALSE))) print(pairs_bayes) ggplot(stack(em_pairs_samples), aes(x = values, fill = ind)) + geom_density(size = 1) + facet_grid(ind ~ .) + labs(x = "prior difference values") + theme(legend.position = "none") ``` We can see that using this coding scheme, we have equal priors on all pairwise contrasts. #### Order restrictions This bias also affect order restrictions involving 3 or more levels. For example, if we want to test an order restriction among A, B, and C, the *a priori* probability of obtaining the order A > C > B is 1/6 (reach back to *intro to stats* year 1), but... For our example, we will be interested in the following order restrictions in the `iris` data-set (each line is a separate restriction): ```{r} hyp <- c( # comparing 2 levels "setosa < versicolor", "setosa < virginica", "versicolor < virginica", # comparing 3 (or more) levels "setosa < virginica & virginica < versicolor", "virginica < setosa & setosa < versicolor", "setosa < versicolor & versicolor < virginica" ) ``` With the default factor coding, this looks like this: ```{r, eval=FALSE} contrasts(iris$Species) <- contr.sum fit_sum <- stan_glm(Sepal.Length ~ Species, data = iris, prior = normal(0, c(1, 1), autoscale = FALSE), family = gaussian()) em_sum <- emmeans(fit_sum, ~ Species) # the posterior marginal means bayesfactor_restricted(em_sum, fit_sum, hypothesis = hyp) ``` ```{r, echo=FALSE} contrasts(iris$Species) <- contr.sum fit_sum <- stan_glm(Sepal.Length ~ Species, data = iris, prior = normal(0, c(1, 1), autoscale = FALSE), family = gaussian(), refresh = 0) em_sum <- emmeans(fit_sum, ~ Species) # the posterior marginal means bayesfactor_restricted(em_sum, fit_sum, hypothesis = hyp) ``` ***What happened???*** 1. The comparison of 2 levels all have a prior of ~0.5, as expected. 2. The comparison of 3 levels has different priors, depending on the order restriction - i.e. **some orders are *a priori* more likely than others!!!** Again, this is solved by using the *orthonormal* factor coding (from above). ```{r, eval=FALSE} contrasts(iris$Species) <- contr.bayes fit_bayes <- stan_glm(Sepal.Length ~ Species, data = iris, prior = normal(0, c(1, 1), autoscale = FALSE), family = gaussian()) em_bayes <- emmeans(fit_sum, ~ Species) # the posterior marginal means bayesfactor_restricted(em_bayes, fit_sum, hypothesis = hyp) ``` ```{r, echo=FALSE} contrasts(iris$Species) <- contr.bayes fit_bayes <- stan_glm(Sepal.Length ~ Species, data = iris, prior = normal(0, c(1, 1), autoscale = FALSE), family = gaussian(), refresh = 0) em_bayes <- emmeans(fit_bayes, ~ Species) # the posterior marginal means bayesfactor_restricted(em_bayes, fit_bayes, hypothesis = hyp) ``` #### Conclusion When comparing the results from the two factor coding schemes, we find: 1. In both cases, the estimated (posterior) means are quite similar (if not identical). 2. The priors and Bayes factors differ between the two schemes. 3. Only with `contr.bayes`, the prior distribution of the difference or the order of 3 (or more) means is balanced. # ReferencesbayestestR/vignettes/apa.csl0000644000176200001440000016075013506417057015674 0ustar liggesusers bayestestR/vignettes/example3.Rmd0000644000176200001440000000355213610210142016564 0ustar liggesusers--- title: "3. Become a Bayesian master" output: github_document: toc: true fig_width: 10.08 fig_height: 6 rmarkdown::html_vignette: toc: true fig_width: 10.08 fig_height: 6 tags: [r, bayesian, posterior, test] vignette: > \usepackage[utf8]{inputenc} %\VignetteIndexEntry{Example 3: Become a Bayesian master} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console bibliography: bibliography.bib csl: apa.csl --- This vignette can be referred to by citing the package: - Makowski, D., Ben-Shachar, M. S., \& Lüdecke, D. (2019). *bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework*. Journal of Open Source Software, 4(40), 1541. https://doi.org/10.21105/joss.01541 --- ```{r message=FALSE, warning=FALSE, include=FALSE} library(knitr) options(knitr.kable.NA = '') knitr::opts_chunk$set(comment=">") knitr::opts_chunk$set(dpi=150) options(digits=2) set.seed(333) ``` ```{r echo=FALSE, fig.cap="Yoda Bayes (896 BBY - 4 ABY).", fig.align='center', out.width="80%"} knitr::include_graphics("https://github.com/easystats/easystats/raw/master/man/figures/bayestestR/YodaBayes.jpg") ``` ## Mixed Models TO BE CONTINUED. ### Priors TO BE CONTINUED. ## What's next? The journey to become a true Bayesian master is not over. It is merely the beginning. It is now time to leave the `bayestestR` universe and apply the Bayesian framework in a variety of other statistical contexts: - [**Marginal means**](https://easystats.github.io/modelbased/articles/marginal_means.html) - [**Contrast analysis**](https://easystats.github.io/modelbased/articles/contrast_analysis.html) - [**Testing Contrasts from Bayesian Models with 'emmeans' and 'bayestestR'**](https://easystats.github.io/blog/posts/bayestestr_emmeans/) bayestestR/vignettes/probability_of_direction.Rmd0000644000176200001440000002676113620150172022130 0ustar liggesusers--- title: "Probability of Direction (pd)" output: github_document: toc: true fig_width: 10.08 fig_height: 6 rmarkdown::html_vignette: toc: true fig_width: 10.08 fig_height: 6 tags: [r, bayesian, posterior, test] vignette: > \usepackage[utf8]{inputenc} %\VignetteIndexEntry{Probability of Direction (pd)} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console bibliography: bibliography.bib csl: apa.csl --- This vignette can be referred to by citing the package: - Makowski, D., Ben-Shachar, M. S., \& Lüdecke, D. (2019). *bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework*. Journal of Open Source Software, 4(40), 1541. https://doi.org/10.21105/joss.01541 - Makowski, D., Ben-Shachar, M. S., Chen, S. H. A., \& Lüdecke, D. (2019). *Indices of Effect Existence and Significance in the Bayesian Framework*. Frontiers in Psychology 2019;10:2767. [10.3389/fpsyg.2019.02767](https://doi.org/10.3389/fpsyg.2019.02767) --- ```{r message=FALSE, warning=FALSE, include=FALSE} if (!requireNamespace("see", quietly = TRUE) || !requireNamespace("dplyr", quietly = TRUE) || !requireNamespace("ggplot2", quietly = TRUE) || !requireNamespace("tidyr", quietly = TRUE) || !requireNamespace("logspline", quietly = TRUE) || !requireNamespace("KernSmooth", quietly = TRUE) || !requireNamespace("tidyr", quietly = TRUE) || !requireNamespace("GGally", quietly = TRUE)) { knitr::opts_chunk$set(eval = FALSE) } library(knitr) options(knitr.kable.NA = '') knitr::opts_chunk$set(comment=">") options(digits=2) set.seed(333) ``` # What is the *pd?* The **Probability of Direction (pd)** is an index of **effect existence**, ranging from 50\% to 100\%, representing the certainty with which an effect goes in a particular direction (*i.e.*, is positive or negative). Beyond its **simplicity of interpretation, understanding and computation**, this index also presents other interesting properties: - It is **independent from the model**: It is solely based on the posterior distributions and does not require any additional information from the data or the model. - It is **robust** to the scale of both the response variable and the predictors. - It is strongly correlated with the frequentist ***p*-value**, and can thus be used to draw parallels and give some reference to readers non-familiar with Bayesian statistics. However, this index is not relevant to assess the magnitude and importance of an effect (the meaning of "significance"), which is better achieved through other indices such as the [ROPE percentage](https://easystats.github.io/bayestestR/articles/region_of_practical_equivalence.html). In fact, indices of significance and existence are totally independent. You can have an effect with a *pd* of **99.99\%**, for which the whole posterior distribution is concentrated within the `[0.0001, 0.0002]` range. In this case, the effect is **positive with a high certainty**, but also **not significant** (*i.e.*, very small). Indices of effect existence, such as the *pd*, are particularly useful in exploratory research or clinical studies, for which the focus is to make sure that the effect of interest is not in the opposite direction (for clinical studies, that a treatment is not harmful). However, once the effect's direction is confirmed, the focus should shift toward its significance, including a precise estimation of its magnitude, relevance and importance. # Relationship with the *p*-value In most cases, it seems that the *pd* has a direct correspondence with the frequentist **one-sided *p*-value** through the formula: $$p_{one-sided} = 1-p_d$$ Similarly, the **two-sided *p*-value** (the most commonly reported one) is equivalent through the formula: $$p_{two-sided} = 2*(1-p_d)$$ Thus, the two-sided *p*-value of respectively **.1**, **.05**, **.01** and **.001** would correspond approximately to a *pd* of **95\%**, **97.5\%**, **99.5\%** and **99.95\%** . ```{r message=FALSE, warning=FALSE, echo=FALSE, fig.cap="Correlation between the frequentist p-value and the probability of direction (pd)", fig.align='center'} library(dplyr) library(tidyr) library(ggplot2) library(see) read.csv("https://raw.github.com/easystats/easystats/master/publications/makowski_2019_bayesian/data/data.csv") %>% mutate(effect_existence = ifelse(true_effect == 1, "Presence of true effect", "Absence of true effect"), p_direction = p_direction * 100) %>% ggplot(aes(x=p_direction, y=p_value, color=effect_existence)) + geom_point2(alpha=0.1) + geom_segment(aes(x=95, y=Inf, xend=95, yend=0.1), color="black", linetype="longdash") + geom_segment(aes(x=-Inf, y=0.1, xend=95, yend=0.1), color="black", linetype="longdash") + geom_segment(aes(x=97.5, y=Inf, xend=97.5, yend=0.05), color="black", linetype="dashed") + geom_segment(aes(x=-Inf, y=0.05, xend=97.5, yend=0.05), color="black", linetype="dashed") + theme_modern() + scale_y_reverse(breaks = c(0.05, round(seq(0, 1, length.out = 11), digits=2))) + scale_x_continuous(breaks = c(95, 97.5, round(seq(50, 100, length.out = 6)))) + scale_color_manual(values=c("Presence of true effect"="green", "Absence of true effect"="red")) + theme(legend.title = element_blank()) + guides(colour = guide_legend(override.aes = list(alpha = 1))) + xlab("Probability of Direction (pd)") + ylab("Frequentist p-value") ``` > **But if it's like the *p*-value, it must be bad because the *p*-value is bad [*insert reference to the reproducibility crisis*].** In fact, this aspect of the reproducibility crisis might have been misunderstood. Indeed, it is not that the *p*-value is an intrinsically bad or wrong. Instead, it is its **misuse**, **misunderstanding** and **misinterpretation** that fuels the decay of the situation. For instance, the fact that the **pd** is highly correlated with the *p*-value suggests that the latter is more an index of effect *existence* than *significance* (*i.e.*, "worth of interest"). The Bayesian version, the **pd**, has an intuitive meaning and makes obvious the fact that **all thresholds are arbitrary**. Additionally, the **mathematical and interpretative transparency** of the **pd**, and its reconceptualisation as an index of effect existence, offers a valuable insight into the characterization of Bayesian results. Moreover, its concomitant proximity with the frequentist *p*-value makes it a perfect metric to ease the transition of psychological research into the adoption of the Bayesian framework. # Methods of computation The most **simple and direct** way to compute the **pd** is to 1) look at the median's sign, 2) select the portion of the posterior of the same sign and 3) compute the percentage that this portion represents. This "simple" method is the most straightforward, but its precision is directly tied to the number of posterior draws. The second approach relies on [**density estimation**](https://easystats.github.io/bayestestR/reference/estimate_density.html). It starts by estimating the density function (for which many methods are available), and then computing the [**area under the curve**](https://easystats.github.io/bayestestR/reference/area_under_curve.html) (AUC) of the density curve on the other side of 0. The density-based method could hypothetically be considered as more precise, but strongly depends on the method used to estimate the density function. # Methods comparison Let's compare the 4 available methods, the **direct** method and 3 **density-based** methods differing by their density estimation algorithm (see [`estimate_density`](https://easystats.github.io/bayestestR/reference/estimate_density.html)). ## Correlation Let's start by testing the proximity and similarity of the results obtained by different methods. ```{r message=FALSE, warning=FALSE, fig.align='center'} library(bayestestR) library(logspline) library(KernSmooth) # Compute the correlations data <- data.frame() for(the_mean in runif(25, 0, 4)){ for(the_sd in runif(25, 0.5, 4)){ x <- rnorm(100, the_mean, abs(the_sd)) data <- rbind(data, data.frame("direct" = pd(x), "kernel" = pd(x, method="kernel"), "logspline" = pd(x, method="logspline"), "KernSmooth" = pd(x, method="KernSmooth") )) } } data <- as.data.frame(sapply(data, as.numeric)) # Visualize the correlations library(ggplot2) library(GGally) GGally::ggpairs(data) + theme_classic() ``` All methods give are highly correlated and give very similar results. That means that the method choice is not a drastic game changer and cannot be used to tweak the results too much. ## Accuracy To test the accuracy of each methods, we will start by computing the **direct *pd*** from a very dense distribution (with a large amount of observations). This will be our baseline, or "true" *pd*. Then, we will iteratively draw smaller samples from this parent distribution, and we will compute the *pd* with different methods. The closer this estimate is from the reference one, the better. ```{r message=FALSE, warning=FALSE} data <- data.frame() for(i in 1:25){ the_mean <- runif(1, 0, 4) the_sd <- abs(runif(1, 0.5, 4)) parent_distribution <- rnorm(100000, the_mean, the_sd) true_pd <- pd(parent_distribution) for(j in 1:25){ sample_size <- round(runif(1, 25, 5000)) subsample <- sample(parent_distribution, sample_size) data <- rbind(data, data.frame("sample_size" = sample_size, "true" = true_pd, "direct" = pd(subsample) - true_pd, "kernel" = pd(subsample, method="kernel")- true_pd, "logspline" = pd(subsample, method="logspline") - true_pd, "KernSmooth" = pd(subsample, method="KernSmooth") - true_pd )) } } data <- as.data.frame(sapply(data, as.numeric)) ``` ```{r message=FALSE, warning=FALSE, fig.align='center'} library(tidyr) library(dplyr) data %>% tidyr::gather(Method, Distance, -sample_size, -true) %>% ggplot(aes(x=sample_size, y = Distance, color = Method, fill= Method)) + geom_point(alpha=0.3, stroke=0, shape=16) + geom_smooth(alpha=0.2) + geom_hline(yintercept=0) + theme_classic() + xlab("\nDistribution Size") ``` The "Kernel" based density methods seems to consistently underestimate the *pd*. Interestingly, the "direct" method appears as being the more reliable, even in the case of small number of posterior draws. ## Can the pd be 100\%? `p = 0.000` is coined as one of the term to avoid when reporting results [@lilienfeld2015fifty], even if often displayed by statistical software. The rationale is that for every probability distribution, there is no value with a probability of exactly 0. There is always some infinitesimal probability associated with each data point, and the `p = 0.000` returned by software is due to approximations related, among other, to finite memory hardware. One could apply this rationale for the *pd*: since all data points have a non-null probability density, then the *pd* (a particular portion of the probability density) can *never* be 100\%. While this is an entirely valid point, people using the *direct* method might argue that their *pd* is based on the posterior draws, rather than on the theoretical, hidden, true posterior distribution (which is only approximated by the posterior draws). These posterior draws represent a finite sample for which `pd = 100%` is a valid statement. bayestestR/vignettes/region_of_practical_equivalence.Rmd0000644000176200001440000002404213620150172023424 0ustar liggesusers--- title: "Region of Practical Equivalence (ROPE)" output: github_document: toc: true fig_width: 10.08 fig_height: 6 rmarkdown::html_vignette: toc: true fig_width: 10.08 fig_height: 6 tags: [r, bayesian, posterior, test, rope, equivalence test] vignette: > \usepackage[utf8]{inputenc} %\VignetteIndexEntry{Region of Practical Equivalence (ROPE)} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console bibliography: bibliography.bib csl: apa.csl --- This vignette can be referred to by citing the package: - Makowski, D., Ben-Shachar, M. S., \& Lüdecke, D. (2019). *bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework*. Journal of Open Source Software, 4(40), 1541. https://doi.org/10.21105/joss.01541 --- ```{r message=FALSE, warning=FALSE, include=FALSE} if (!requireNamespace("rstanarm", quietly = TRUE) || !requireNamespace("see", quietly = TRUE)) { knitr::opts_chunk$set(eval = FALSE) } library(knitr) options(knitr.kable.NA = '') knitr::opts_chunk$set(comment=">") options(digits=2) set.seed(333) ``` # What is the *ROPE?* Unlike a frequentist approach, Bayesian inference is not based on statistical significance, where effects are tested against "zero". Indeed, the Bayesian framework offers a probabilistic view of the parameters, allowing assessment of the uncertainty related to them. Thus, rather than concluding that an effect is present when it simply differs from zero, we would conclude that the probability of being outside a specific range that can be considered as **"practically no effect"** (*i.e.*, a negligible magnitude) is sufficient. This range is called the **region of practical equivalence (ROPE)**. Indeed, statistically, the probability of a posterior distribution being different from 0 does not make much sense (the probability of it being different from a single point being infinite). Therefore, the idea underlining ROPE is to let the user define an area around the null value enclosing values that are **equivalent to the null** value for practical purposes [@kruschke2010believe; @kruschke2012time; @kruschke2014doing]. # Equivalence Test The ROPE, being a region corresponding to a "null" hypothesis, is used for the **equivalence test**, to test whether a parameter is **significant** (in the sense of *important* enough to be cared about). This test is usually based on the **"HDI+ROPE decision rule"** [@kruschke2014doing; @kruschke2018bayesian] to check whether parameter values should be accepted or rejected against an explicitly formulated "null hypothesis" (*i.e.*, a ROPE). In other words, it checks the percentage of Credible Interval (CI) that is the null region (the ROPE). If this percentage is sufficiently low, the null hypothesis is rejected. If this percentage is sufficiently high, the null hypothesis is accepted. # Credible interval in ROPE *vs* full posterior in ROPE Using the ROPE and the HDI as Credible Interval, Kruschke (2018) suggests using the percentage of the 95\% HDI that falls within the ROPE as a decision rule. However, as the 89\% HDI [is considered a better choice](https://easystats.github.io/bayestestR/articles/credible_interval.html) [@kruschke2014doing; @mcelreath2014rethinking; @mcelreath2018statistical], `bayestestR` provides by default the percentage of the 89\% HDI that falls within the ROPE. However, [*simulation studies data*](https://www.frontiersin.org/articles/10.3389/fpsyg.2019.02767/full) suggest that using the percentage of the full posterior distribution, instead of a CI, might be more sensitive (especially do delineate highly significant effects). Thus, we recommend that the user considers using the ***full* ROPE** percentage (by setting `ci = 1`), which will return the portion of the entire posterior distribution in the ROPE. # What percentage in ROPE to accept or to reject? If the HDI is completely outside the ROPE, the "null hypothesis" for this parameter is "rejected". If the ROPE completely covers the HDI, *i.e.*, all most credible values of a parameter are inside the region of practical equivalence, the null hypothesis is accepted. Else, it’s unclear whether the null hypothesis should be accepted or rejected. If the **full ROPE** is used (*i.e.*, 100\% of the HDI), then the null hypothesis is rejected or accepted if the percentage of the posterior within the ROPE is smaller than to 2.5\% or greater than 97.5\%. Desirable results are low proportions inside the ROPE (the closer to zero the better). # How to define the ROPE range? Kruschke (2018) suggests that the ROPE could be set, by default, to a range from `-0.1` to `0.1` of a standardized parameter (negligible effect size according to Cohen, 1988). - For **linear models (lm)**, this can be generalised to: $$[-0.1*SD_{y}, 0.1*SD_{y}]$$. - For **logistic models**, the parameters expressed in log odds ratio can be converted to standardized difference through the formula: $$\sqrt{3}/\pi$$, resulting in a range of `-0.055` to `-0.055`. For other models with binary outcome, it is strongly recommended to manually specify the rope argument. Currently, the same default is applied that for logistic models. - For **t-tests**, the standard deviation of the response is used, similarly to linear models (see above). - For **correlations**, `-0.05, 0.05` is used, *i.e.*, half the value of a negligible correlation as suggested by Cohen's (1988) rules of thumb. - For all other models, `-0.1, 0.1` is used to determine the ROPE limits, but it is strongly advised to specify it manually. # Sensitivity to parameter's scale It is important to consider **the unit (*i.e.*, the scale) of the predictors** when using an index based on the ROPE, as the correct interpretation of the ROPE as representing a region of practical equivalence to zero is dependent on the scale of the predictors. Indeed, unlike other indices (such as the [`pd`](https://easystats.github.io/bayestestR/articles/probability_of_direction.html)), the percentage in **ROPE** depend on the unit of its parameter. In other words, as the ROPE represents a fixed portion of the response's scale, its proximity with a coefficient depends on the scale of the coefficient itself. For instance, if we consider a simple regression `growth ~ time`, modelling the development of **Wookies babies**, a negligible change (the ROPE) is less than **54 cm**. If our `time` variable is **expressed in days**, we will find that the coefficient (representing the growth **by day**) is of about **10 cm** (*the median of the posterior of the coefficient is 10*). Which we would consider as **negligible**. However, if we decide to express the `time` variable **in years**, the coefficient will be scaled by this transformation (as it will now represent the growth **by year**). The coefficient will now be around **3550** cm (`10 * 355`), which we would now consider as **significant**. ```{r message=FALSE, warning=FALSE, eval=FALSE} library(rstanarm) library(bayestestR) library(see) data <- iris # Use the iris data model <- stan_glm(Sepal.Length ~ Sepal.Width, data=data) # Fit model ``` ```{r echo=FALSE, message=FALSE, warning=FALSE, comment=">"} library(rstanarm) library(bayestestR) library(see) set.seed(333) data <- iris # Use the iris data model <- stan_glm(Sepal.Length ~ Sepal.Width, data=data, refresh = 0) ``` ```{r echo=TRUE, message=FALSE, warning=FALSE, comment=">"} # Compute indices pd <- p_direction(model) percentage_in_rope <- rope(model, ci=1) # Visualise the pd plot(pd) pd # Visualise the percentage in ROPE plot(percentage_in_rope) percentage_in_rope ``` We can see that the *pd* and the percentage in ROPE of the linear relationship between **Sepal.Length** and **Sepal.Width** are respectively of about `92.95%` and `15.95%`, corresponding to an **uncertain** and **not significant** effect. What happen if we scale our predictor? ```{r message=FALSE, warning=FALSE, eval=FALSE} data$Sepal.Width_scaled <- data$Sepal.Width / 100 # Divide predictor by 100 model <- stan_glm(Sepal.Length ~ Sepal.Width_scaled, data=data) # Fit model ``` ```{r echo=FALSE, message=FALSE, warning=FALSE, comment=">"} set.seed(333) data$Sepal.Width_scaled <- data$Sepal.Width / 100 model <- stan_glm(Sepal.Length ~ Sepal.Width_scaled, data=data, refresh = 0) ``` ```{r echo=TRUE, message=FALSE, warning=FALSE, comment=">"} # Compute indices pd <- p_direction(model) percentage_in_rope <- rope(model, ci=1) # Visualise the pd plot(pd) pd # Visualise the percentage in ROPE plot(percentage_in_rope) percentage_in_rope ``` As you can see, by simply dividing the predictor by 100, we **drastically** changed the conclusion related to the **percentage in ROPE** (which became very close to `0`): the effect could now be **interpreted as being significant**. Thus, we recommend paying close attention to the unit of the predictors when selecting the ROPE range (*e.g.*, what coefficient would correspond to a small effect?), and when reporting or reading ROPE results. # Multicollinearity: Non-independent covariates When **parameters show strong correlations**, *i.e.*, when covariates are not independent, the joint parameter distributions may shift towards or away from the ROPE. Collinearity invalidates ROPE and hypothesis testing based on univariate marginals, as the probabilities are conditional on independence. Most problematic are parameters that only have partial overlap with the ROPE region. In case of collinearity, the (joint) distributions of these parameters may either get an increased or decreased ROPE, which means that inferences based on ROPE are inappropriate [@kruschke2014doing]. The `equivalence_test()` and `rope()` functions perform a simple check for pairwise correlations between parameters, but as there can be collinearity between more than two variables, a first step to check the assumptions of this hypothesis testing is to look at different pair plots. An even more sophisticated check is the projection predictive variable selection [@piironen2017comparison]. bayestestR/vignettes/bayestestR.Rmd0000644000176200001440000002401213613227664017211 0ustar liggesusers--- title: "Get Started with Bayesian Analysis" output: github_document: toc: true fig_width: 10.08 fig_height: 6 rmarkdown::html_vignette: toc: true fig_width: 10.08 fig_height: 6 tags: [r, bayesian, posterior, test] vignette: > \usepackage[utf8]{inputenc} %\VignetteIndexEntry{Get Started with Bayesian Analysis} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console bibliography: bibliography.bib csl: apa.csl --- This vignette can be referred to by citing the package: - Makowski, D., Ben-Shachar, M. S., \& Lüdecke, D. (2019). *bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework*. Journal of Open Source Software, 4(40), 1541. https://doi.org/10.21105/joss.01541 --- ```{r message=FALSE, warning=FALSE, include=FALSE} if (!requireNamespace("rstanarm", quietly = TRUE)) { knitr::opts_chunk$set(eval = FALSE) } library(knitr) options(knitr.kable.NA = '') knitr::opts_chunk$set(comment=">") options(digits=2) ``` ## Why use the Bayesian Framework? The Bayesian framework for statistics is quickly gaining in popularity among scientists, associated with the general shift towards **open and honest science**. Reasons to prefer this approach are **reliability**, **accuracy** (in noisy data and small samples), the possibility of introducing **prior knowledge** into the analysis and, critically, **results intuitiveness** and their **straightforward interpretation** [@andrews2013prior; @etz2016bayesian; @kruschke2010believe; @kruschke2012time; @wagenmakers2018bayesian]. In general, the frequentist approach has been associated with the focus on null hypothesis testing, and the misuse of *p*-values has been shown to critically contribute to the reproducibility crisis of psychological science [@chambers2014instead; @szucs2016empirical]. There is a general agreement that the generalization of the Bayesian approach is one way of overcoming these issues [@benjamin2018redefine; @etz2016bayesian]. Once we agreed that the Bayesian framework is the right way to go, you might wonder *what* is the Bayesian framework. **What's all the fuss about?** ## What is the Bayesian Framework? Adopting the Bayesian framework is more of a shift in the paradigm than a change in the methodology. Indeed, all the common statistical procedures (t-tests, correlations, ANOVAs, regressions, ...) can be achieved using the Bayesian framework. One of the core difference is that in the **frequentist view** (the "classic" statistics, with *p* and *t* values, as well as some weird *degrees of freedom*), **the effects are fixed** (but unknown) and **data are random**. On the other hand, in the Bayesian inference process, instead of having estimates of the "true effect", the probability of different effects *given the observed data* is computed, resulting in a distribution of possible values for the parameters, called the ***posterior distribution***. The uncertainty in Bayesian inference can be summarized, for instance, by the **median** of the distribution, as well as a range of values of the posterior distribution that includes the 95\% most probable values (the 95\% ***credible* interval**). *Cum grano salis*, these are considered the counterparts to the point-estimate and confidence interval in a frequentist framework. To illustrate the difference of interpretation, the Bayesian framework allows to say *"given the observed data, the effect has 95\% probability of falling within this range"*, while the frequentist less straightforward alternative would be *"when repeatedly computing confidence intervals from data of this sort, there is a 95\% probability that the effect falls within a given range"*. In essence, the Bayesian sampling algorithms (such as MCMC sampling) return a probability distribution (*the posterior*) of an effect that is compatible with the observed data. Thus, an effect can be described by [characterizing its posterior distribution](https://easystats.github.io/bayestestR/articles/guidelines.html) in relation to its centrality (point-estimates), uncertainty, as well as existence and significance In other words, omitting the maths behind it, we can say that: - The frequentist bloke tries to estimate "the **real effect**". For instance, the "real" value of the correlation between *x* and *y*. Hence, frequentist models return a "**point-estimate**" (*i.e.*, a single value) of the "real" correlation (*e.g.*, r = 0.42) estimated under a number of obscure assumptions (at a minimum, considering that the data is sampled at random from a "parent", usually normal distribution). - **The Bayesian master assumes no such thing**. The data are what they are. Based on this observed data (and a **prior** belief about the result), the Bayesian sampling algorithm (sometimes referred to for example as **MCMC** sampling) returns a probability distribution (called **the posterior**) of the effect that is compatible with the observed data. For the correlation between *x* and *y*, it will return a distribution that says, for example, "the most probable effect is 0.42, but this data is also compatible with correlations of 0.12 and 0.74". - To characterize our effects, **no need of *p* values** or other cryptic indices. We simply describe the posterior distribution of the effect. For example, we can report the median, the [89% *Credible* Interval](https://easystats.github.io/bayestestR/articles/credible_interval.html) or [other indices](https://easystats.github.io/bayestestR/articles/guidelines.html). ```{r echo=FALSE, fig.cap="Accurate depiction of a regular Bayesian user estimating a credible interval.", fig.align='center', out.width="50%"} knitr::include_graphics("https://github.com/easystats/easystats/raw/master/man/figures/bayestestR/bayesianMaster.jpg") ``` *Note: Altough the very purpose of this package is to advocate for the use of Bayesian statistics, please note that there are serious arguments supporting frequentist indices (see for instance [this thread](https://discourse.datamethods.org/t/language-for-communicating-frequentist-results-about-treatment-effects/934/16)). As always, the world is not black and white (p \< .001).* **So... how does it work?** ## A simple example ### BayestestR Installation You can install `bayestestR` along with the whole [**easystats**](https://github.com/easystats/easystats) suite by running the following: ```{r eval=FALSE, message=FALSE, warning=FALSE} install.packages("devtools") devtools::install_github("easystats/easystats") ``` Let's also install and load the [`rstanarm`](https://mc-stan.org/rstanarm/), that allows fitting Bayesian models, as well as [`bayestestR`](https://github.com/easystats/bayestestR), to describe them. ```{r message=FALSE, warning=FALSE, eval=FALSE} install.packages("rstanarm") library(rstanarm) ``` ### Traditional linear regression Let's start by fitting a simple frequentist linear regression (the `lm()` function stands for *linear model*) between two numeric variables, `Sepal.Length` and `Petal.Length` from the famous [`iris`](https://en.wikipedia.org/wiki/Iris_flower_data_set) dataset, included by default in R. ```{r message=FALSE, warning=FALSE, eval=FALSE} model <- lm(Sepal.Length ~ Petal.Length, data=iris) summary(model) ``` ```{r echo=FALSE, message=FALSE, warning=FALSE, comment=NA} library(dplyr) lm(Sepal.Length ~ Petal.Length, data=iris) %>% summary() ``` This analysis suggests that there is a **significant** (*whatever that means*) and **positive** (with a coefficient of `0.41`) linear relationship between the two variables. *Fitting and interpreting **frequentist models is so easy** that it is obvious that people use it instead of the Bayesian framework... right?* **Not anymore.** ### Bayesian linear regression ```{r message=FALSE, warning=FALSE, eval=FALSE} model <- stan_glm(Sepal.Length ~ Petal.Length, data=iris) describe_posterior(model) ``` ```{r echo=FALSE, message=FALSE, warning=FALSE, comment=NA} library(rstanarm) library(bayestestR) set.seed(333) model <- stan_glm(Sepal.Length ~ Petal.Length, data=iris, refresh = 0) knitr::kable(describe_posterior(model), digits=2) ``` **That's it!** You fitted a Bayesian version of the model by simply using [`stan_glm()`](https://mc-stan.org/rstanarm/reference/stan_glm.html) instead of `lm()` and described the posterior distributions of the parameters. The conclusion that we can drawn, for this example, are very similar. The effect (*the median of the effect's posterior distribution*) is about `0.41`, and it can be also be considered as *significant* in the Bayesian sense (more on that later). **So, ready to learn more?** Check out the [**next tutorial**](https://easystats.github.io/bayestestR/articles/example1.html)! ## References bayestestR/vignettes/indicesExistenceComparison.Rmd0000644000176200001440000000333713603574330022406 0ustar liggesusers--- title: "In-Depth 2: Comparison of Indices of Effect Existence and Significance" output: github_document: toc: true toc_depth: 3 fig_width: 10.08 fig_height: 6 word_document: toc: true toc_depth: 3 fig_width: 10.08 fig_height: 6 df_print: "kable" highlight: "pygments" reference_docx: templates/Template_Frontiers.docx rmarkdown::html_vignette: toc: true toc_depth: 3 fig_width: 10.08 fig_height: 6 tags: [r, bayesian, posterior, test] vignette: > \usepackage[utf8]{inputenc} %\VignetteIndexEntry{In-Depth 2: Comparison of Indices of Effect Existence} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console bibliography: bibliography.bib csl: apa.csl --- ```{r message=FALSE, warning=FALSE, include=FALSE} library(knitr) options(knitr.kable.NA = '') knitr::opts_chunk$set(comment=">", dpi=75) options(digits=2) ``` This vignette can be referred to by citing the following: - Makowski, D., Ben-Shachar, M. S., \& Lüdecke, D. (2019). *bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework*. Journal of Open Source Software, 4(40), 1541. https://doi.org/10.21105/joss.01541 - Makowski, D., Ben-Shachar, M. S., Chen, S. H. A., \& Lüdecke, D. (2019). *Indices of Effect Existence and Significance in the Bayesian Framework*. Frontiers in Psychology 2019;10:2767. [10.3389/fpsyg.2019.02767](https://doi.org/10.3389/fpsyg.2019.02767) --- # Indices of Effect *Existence* and *Significance* in the Bayesian Framework A comparison of different Bayesian indices (*pd*, *BFs*, ROPE etc.) is accessible [**here**](https://doi.org/10.3389/fpsyg.2019.02767).bayestestR/R/0000755000176200001440000000000013620150641012576 5ustar liggesusersbayestestR/R/p_significance.R0000644000176200001440000001572513613227664015710 0ustar liggesusers#' Practical Significance (ps) #' #' Compute the probability of \strong{Practical Significance} (\strong{\emph{ps}}), which can be conceptualized as a unidirectional equivalence test. It returns the probability that effect is above a given threshold corresponding to a negligible effect in the median's direction. Mathematically, it is defined as the proportion of the posterior distribution of the median sign above the threshold. #' #' @inheritParams rope #' @param threshold The threshold value that separates significant from negligible effect. If \code{"default"}, the range is set to \code{0.1} if input is a vector, and based on \code{\link[=rope_range]{rope_range()}} if a Bayesian model is provided. #' #' @return Values between 0 and 1 corresponding to the probability of practical significance (ps). #' #' @details \code{p_significance()} returns the proportion of a probability #' distribution (\code{x}) that is outside a certain range (the negligible #' effect, or ROPE, see argument \code{threshold}). If there are values of the #' distribution both below and above the ROPE, \code{p_significance()} returns #' the higher probability of a value being outside the ROPE. Typically, this #' value should be larger than 0.5 to indicate practical significance. However, #' if the range of the negligible effect is rather large compared to the #' range of the probability distribution \code{x}, \code{p_significance()} #' will be less than 0.5, which indicates no clear practical significance. #' #' @examples #' library(bayestestR) #' #' # Simulate a posterior distribution of mean 1 and SD 1 #' # ---------------------------------------------------- #' posterior <- rnorm(1000, mean = 1, sd = 1) #' p_significance(posterior) #' #' # Simulate a dataframe of posterior distributions #' # ----------------------------------------------- #' df <- data.frame(replicate(4, rnorm(100))) #' p_significance(df) #' #' \dontrun{ #' # rstanarm models #' # ----------------------------------------------- #' if (require("rstanarm")) { #' model <- rstanarm::stan_glm(mpg ~ wt + cyl, #' data = mtcars, #' chains = 2, refresh = 0 #' ) #' p_significance(model) #' } #' } #' @export p_significance <- function(x, ...) { UseMethod("p_significance") } #' @rdname p_significance #' @export p_significance.numeric <- function(x, threshold = "default", ...) { threshold <- .select_threshold_ps(x = x, threshold = threshold) psig <- max( c( length(x[x > abs(threshold)]) / length(x), # ps positive length(x[x < -abs(threshold)]) / length(x) # ps negative ) ) attr(psig, "threshold") <- threshold attr(psig, "data") <- x class(psig) <- unique(c("p_significance", "see_p_significance", class(psig))) psig } #' @export p_significance.data.frame <- function(x, threshold = "default", ...) { threshold <- .select_threshold_ps(x = x, threshold = threshold) x <- .select_nums(x) if (ncol(x) == 1) { ps <- p_significance(x[, 1], threshold = threshold, ...) } else { ps <- sapply(x, p_significance, threshold = threshold, simplify = TRUE, ...) } out <- data.frame( "Parameter" = names(x), "ps" = as.numeric(ps), row.names = NULL, stringsAsFactors = FALSE ) attr(out, "threshold") <- threshold attr(out, "object_name") <- .safe_deparse(substitute(x)) class(out) <- unique(c("p_significance", "see_p_significance", class(out))) out } #' @export p_significance.MCMCglmm <- function(x, threshold = "default", ...) { nF <- x$Fixed$nfl out <- p_significance(as.data.frame(x$Sol[, 1:nF, drop = FALSE]), threshold = threshold, ...) attr(out, "object_name") <- .safe_deparse(substitute(x)) out } #' @export p_significance.BFBayesFactor <- function(x, threshold = "default", ...) { out <- p_significance(insight::get_parameters(x), threshold = threshold, ...) attr(out, "object_name") <- .safe_deparse(substitute(x)) out } #' @export p_significance.mcmc <- function(x, threshold = "default", ...) { p_significance(as.data.frame(x), threshold = threshold, ...) } #' @rdname p_significance #' @export p_significance.emmGrid <- function(x, threshold = "default", ...) { if (!requireNamespace("emmeans")) { stop("Package 'emmeans' required for this function to work. Please install it by running `install.packages('emmeans')`.") } xdf <- as.data.frame(as.matrix(emmeans::as.mcmc.emmGrid(x, names = FALSE))) out <- p_significance(xdf, threshold = threshold, ...) attr(out, "object_name") <- .safe_deparse(substitute(x)) out } #' @rdname p_significance #' @export p_significance.stanreg <- function(x, threshold = "default", effects = c("fixed", "random", "all"), parameters = NULL, verbose = TRUE, ...) { effects <- match.arg(effects) threshold <- .select_threshold_ps(model = x, threshold = threshold) data <- p_significance( insight::get_parameters(x, effects = effects, parameters = parameters), threshold = threshold ) out <- .prepare_output(data, insight::clean_parameters(x)) attr(out, "threshold") <- threshold attr(out, "object_name") <- .safe_deparse(substitute(x)) class(out) <- class(data) out } #' @rdname p_significance #' @export p_significance.brmsfit <- function(x, threshold = "default", effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, verbose = TRUE, ...) { effects <- match.arg(effects) component <- match.arg(component) threshold <- .select_threshold_ps(model = x, threshold = threshold) data <- p_significance( insight::get_parameters(x, effects = effects, component = component, parameters = parameters), threshold = threshold ) out <- .prepare_output(data, insight::clean_parameters(x)) attr(out, "threshold") <- threshold attr(out, "object_name") <- .safe_deparse(substitute(x)) class(out) <- class(data) out } #' @rdname as.numeric.p_direction #' @export as.numeric.p_significance <- function(x, ...) { if ("data.frame" %in% class(x)) { return(as.numeric(as.vector(x$ps))) } else { return(as.vector(x)) } } #' @method as.double p_significance #' @export as.double.p_significance <- as.numeric.p_significance #' @keywords internal .select_threshold_ps <- function(x = NULL, model = NULL, threshold = "default") { # If a range is passed if (length(threshold) > 1) { if(length(unique(abs(threshold))) == 1) { # If symetric range threshold <- abs(threshold[2]) } else{ stop("`threshold` should be 'default' or a numeric value (e.g., 0.1).") } } # If default if (all(threshold == "default")) { if (!is.null(model)) { threshold <- rope_range(model)[2] } else { threshold <- 0.1 } } else if (!all(is.numeric(threshold))) { stop("`threshold` should be 'default' or a numeric value (e.g., 0.1).") } threshold } bayestestR/R/print.rope.R0000644000176200001440000000400713536442413015031 0ustar liggesusers#' @importFrom insight print_color #' @export print.rope <- function(x, digits = 2, ...) { insight::print_color(sprintf( "# Proportion%s of samples inside the ROPE [%.*f, %.*f]:\n\n", ifelse(all(x$CI[1] == x$CI), "", "s"), digits, x$ROPE_low[1], digits, x$ROPE_high[1] ), "blue") # I think this is something nobody will understand and we'll probably forget # why we did this, so I'll comment a bit... # These are the base columns we want to print cols <- c("Parameter", "ROPE_Percentage", "Effects", "Component") # In case we have ropes for different CIs, we also want this information # So we first check if values in the CI column differ, and if so, we also # keep this column for printing if (!all(x$CI[1] == x$CI)) { cols <- c("CI", cols) } # Either way, we need to know the different CI-values, so we can # split the data frame for printing later... ci <- unique(x$CI) # now we check which of the requested columns are actually in our data frame "x" # "x" may differ, depending on if "rope()" was called with a model-object, # or with a simple vector. So we can't hard-code this x <- subset(x, select = intersect(cols, colnames(x))) # This is just cosmetics, to have nicer column names and values x$ROPE_Percentage <- sprintf("%.*f %%", digits, x$ROPE_Percentage * 100) colnames(x)[which(colnames(x) == "ROPE_Percentage")] <- "inside ROPE" # In case we have multiple CI values, we create a subset for each CI value. # Else, parameter-rows would be mixed up with both CIs, which is a bit # more difficult to read... if (length(ci) == 1) { # print complete data frame, because we have no different CI values here print_data_frame(x, digits = digits) } else { for (i in ci) { xsub <- x[x$CI == i, -which(colnames(x) == "CI"), drop = FALSE] insight::print_color(sprintf("ROPE for the %s%% HDI:\n\n", i), "cyan") print_data_frame(xsub, digits = digits) cat("\n") } } } bayestestR/R/diagnostic_posterior.R0000644000176200001440000001343413613227664017174 0ustar liggesusers#' Posteriors Sampling Diagnostic #' #' Extract diagnostic metrics (Effective Sample Size (\code{ESS}), \code{Rhat} and Monte Carlo Standard Error \code{MCSE}). #' #' @param posteriors A stanreg or brms model. #' @param diagnostic Diagnostic metrics to compute. Character (vector) or list with one or more of these options: \code{"ESS"}, \code{"Rhat"}, \code{"MCSE"} or \code{"all"}. #' #' @details #' \strong{Effective Sample (ESS)} should be as large as possible, altough for most applications, an effective sample size greater than 1000 is sufficient for stable estimates (Bürkner, 2017). The ESS corresponds to the number of independent samples with the same estimation power as the N autocorrelated samples. It is is a measure of \dQuote{how much independent information there is in autocorrelated chains} (\emph{Kruschke 2015, p182-3}). #' \cr \cr #' \strong{Rhat} should be the closest to 1. It should not be larger than 1.1 (Gelman and Rubin, 1992) or 1.01 (Vehtari et al., 2019). The split R-hat statistic quantifies the consistency of an ensemble of Markov chains. #' \cr \cr #' \strong{Monte Carlo Standard Error (MCSE)} is another measure of accuracy of the chains. It is defined as standard deviation of the chains divided by their effective sample size (the formula for \code{mcse()} is from Kruschke 2015, p. 187). The MCSE \dQuote{provides a quantitative suggestion of how big the estimation noise is}. #' #' #' @examples #' \dontrun{ #' # rstanarm models #' # ----------------------------------------------- #' library(rstanarm) #' model <- stan_glm(mpg ~ wt + gear, data = mtcars, chains = 2, iter = 200, refresh = 0) #' diagnostic_posterior(model) #' #' # brms models #' # ----------------------------------------------- #' library(brms) #' model <- brms::brm(mpg ~ wt + cyl, data = mtcars) #' diagnostic_posterior(model) #' } #' #' @references #' \itemize{ #' \item Gelman, A., & Rubin, D. B. (1992). Inference from iterative simulation using multiple sequences. Statistical science, 7(4), 457-472. #' \item Vehtari, A., Gelman, A., Simpson, D., Carpenter, B., \& Bürkner, P. C. (2019). Rank-normalization, folding, and localization: An improved Rhat for assessing convergence of MCMC. arXiv preprint arXiv:1903.08008. #' \item Kruschke, J. (2014). Doing Bayesian data analysis: A tutorial with R, JAGS, and Stan. Academic Press. #' } #' @export diagnostic_posterior <- function(posteriors, diagnostic = c("ESS", "Rhat"), ...) { UseMethod("diagnostic_posterior") } #' @export diagnostic_posterior.numeric <- function(posteriors, diagnostic = c("ESS", "Rhat"), ...) { stop("`diagnostic_posterior` only works with rstanarm or brms models.") } #' @export diagnostic_posterior.data.frame <- diagnostic_posterior.numeric #' @export diagnostic_posterior.BFBayesFactor <- diagnostic_posterior.numeric #' @inheritParams insight::get_parameters #' @rdname diagnostic_posterior #' @export diagnostic_posterior.stanreg <- function(posteriors, diagnostic = "all", effects = c("fixed", "random", "all"), parameters = NULL, ...) { diagnostic <- match.arg(diagnostic, c("ESS", "Rhat", "MCSE", "all"), several.ok = TRUE) if ("all" %in% diagnostic) { diagnostic <- c("ESS", "Rhat", "MCSE") } else { diagnostic <- c(diagnostic) } # Get indices and rename diagnostic_df <- as.data.frame(posteriors$stan_summary) diagnostic_df$Parameter <- row.names(diagnostic_df) diagnostic_df$ESS <- round(diagnostic_df$n_eff) # special handling for MCSE, due to some parameters (like lp__) missing in rows MCSE <- mcse(posteriors, effects = "all") diagnostic_df <- merge(diagnostic_df, MCSE, by = "Parameter", all = FALSE) # Select columns available_columns <- intersect(colnames(diagnostic_df), c("Parameter", diagnostic)) diagnostic_df <- diagnostic_df[available_columns] row.names(diagnostic_df) <- NULL # Select rows effects <- match.arg(effects) params <- colnames( insight::get_parameters(posteriors, effects = effects, parameters = parameters) ) if (inherits(posteriors, "stanmvreg")) { diagnostic_df$Response <- gsub("^(.*)\\|(.*)", "\\1", diagnostic_df$Parameter) } diagnostic_df[diagnostic_df$Parameter %in% params, ] } #' @inheritParams insight::get_parameters #' @rdname diagnostic_posterior #' @export diagnostic_posterior.brmsfit <- function(posteriors, diagnostic = "all", effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, ...) { diagnostic <- match.arg(diagnostic, c("ESS", "Rhat", "MCSE", "all"), several.ok = TRUE) if ("all" %in% diagnostic) { diagnostic <- c("ESS", "Rhat", "MCSE") # Add MCSE } else { diagnostic <- c(diagnostic) } if (!requireNamespace("rstan", quietly = TRUE)) { stop("Package 'rstan' required for this function to work. Please install it by running `install.packages('rstan')`.") } # Get indices and rename diagnostic_df <- as.data.frame(rstan::summary(posteriors$fit)$summary) diagnostic_df$Parameter <- make.names(row.names(diagnostic_df)) diagnostic_df$ESS <- round(diagnostic_df$n_eff) # special handling for MCSE, due to some parameters (like lp__) missing in rows MCSE <- mcse(posteriors, effects = "all", component = "all") diagnostic_df <- merge(diagnostic_df, MCSE, by = "Parameter", all = FALSE) # Select columns diagnostic_df <- diagnostic_df[, c("Parameter", diagnostic)] row.names(diagnostic_df) <- NULL # Select rows effects <- match.arg(effects) component <- match.arg(component) params <- colnames( insight::get_parameters( posteriors, effects = effects, component = component, parameters = parameters ) ) diagnostic_df[diagnostic_df$Parameter %in% params, ] } bayestestR/R/simulate_priors.R0000644000176200001440000000407313613227664016162 0ustar liggesusers#' Returns Priors of a Model as Empirical Distributions #' #' Transforms priors information to actual distributions. #' #' @inheritParams effective_sample #' @param n Size of the simulated prior distributions. #' #' @examples #' \dontrun{ #' library(bayestestR) #' if (require("rstanarm")) { #' model <- stan_glm(mpg ~ wt + am, data = mtcars, chains = 1, refresh = 0) #' simulate_prior(model) #' } #' } #' @export simulate_prior <- function(model, n = 1000, ...) { UseMethod("simulate_prior") } #' @export simulate_prior.stanreg <- function(model, n = 1000, effects = c("fixed", "random", "all"), parameters = NULL, ...) { # check arguments effects <- match.arg(effects) priors <- insight::get_priors( model, effects = effects, parameters = parameters ) .simulate_prior(priors, n = n) } #' @export simulate_prior.brmsfit <- function(model, n = 1000, effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, ...) { # check arguments effects <- match.arg(effects) component <- match.arg(component) priors <- insight::get_priors( model, effects = effects, component = component, parameters = parameters ) .simulate_prior(priors, n = n) } #' @keywords internal .simulate_prior <- function(priors, n = 1000) { simulated <- data.frame(.bamboozled = 1:n) # iterate over parameters for (param in priors$Parameter) { prior <- priors[priors$Parameter == param, ] # Get actual scale if ("Adjusted_Scale" %in% names(prior)) { scale <- prior$Adjusted_Scale # is autoscale = FALSE, scale contains NA values - replace # with non-adjusted then. if (anyNA(scale)) scale[is.na(scale)] <- prior$Scale[is.na(scale)] } else { scale <- prior$Scale } # Simulate prior prior <- distribution(prior$Distribution, n, prior$Location, scale) simulated[param] <- prior } simulated$.bamboozled <- NULL simulated } bayestestR/R/area_under_curve.R0000644000176200001440000000402113546267673016254 0ustar liggesusers#' Area under the Curve (AUC) #' #' Based on the DescTools \code{AUC} function. It can calculate the area under the curve with a naive algorithm or a more elaborated spline approach. The curve must be given by vectors of xy-coordinates. This function can handle unsorted x values (by sorting x) and ties for the x values (by ignoring duplicates). #' #' @param x Vector of x values. #' @param y Vector of y values. #' @param method Method to compute the Area Under the Curve (AUC). Can be \code{"trapezoid"} (default), \code{"step"} or \code{"spline"}. If "trapezoid", the curve is formed by connecting all points by a direct line (composite trapezoid rule). If "step" is chosen then a stepwise connection of two points is used. For calculating the area under a spline interpolation the splinefun function is used in combination with integrate. #' @param ... Arguments passed to or from other methods. #' #' #' @examples #' library(bayestestR) #' posterior <- distribution_normal(1000) #' #' dens <- estimate_density(posterior) #' dens <- dens[dens$x > 0, ] #' x <- dens$x #' y <- dens$y #' #' area_under_curve(x, y, method = "trapezoid") #' area_under_curve(x, y, method = "step") #' area_under_curve(x, y, method = "spline") #' @importFrom stats integrate splinefun #' @seealso DescTools #' @export area_under_curve <- function(x, y, method = c("trapezoid", "step", "spline"), ...) { # Stolen from DescTools: https://github.com/cran/DescTools/blob/master/R/StatsAndCIs.r if (length(x) != length(y)) { stop("length x must equal length y") } idx <- order(x) x <- x[idx] y <- y[idx] switch( match.arg(arg = method, choices = c("trapezoid", "step", "spline")), "trapezoid" = sum((rowMeans(cbind(y[-length(y)], y[-1]))) * (x[-1] - x[-length(x)])), "step" = sum(y[-length(y)] * (x[-1] - x[-length(x)])), "spline" = stats::integrate(stats::splinefun(x, y, method = "natural"), lower = min(x), upper = max(x))$value ) } #' @rdname area_under_curve #' @export auc <- area_under_curve bayestestR/R/map_estimate.R0000644000176200001440000001016013613227664015403 0ustar liggesusers#' Maximum A Posteriori probability estimate (MAP) #' #' Find the \strong{Highest Maximum A Posteriori probability estimate (MAP)} of a posterior, i.e., the value associated with the highest probability density (the "peak" of the posterior distribution). In other words, it is an estimation of the \emph{mode} for continuous parameters. Note that this function relies on \link{estimate_density}, which by default uses a different smoothing bandwidth (\code{"SJ"}) compared to the legacy default implemented the base R \link{density} function (\code{"nrd0"}). #' #' @inheritParams hdi #' @inheritParams estimate_density #' #' @return A numeric value if \code{posterior} is a vector. If \code{posterior} #' is a model-object, returns a data frame with following columns: #' \itemize{ #' \item \code{Parameter} The model parameter(s), if \code{x} is a model-object. If \code{x} is a vector, this column is missing. #' \item \code{MAP_Estimate} The MAP estimate for the posterior or each model parameter. #' } #' #' @examples #' \dontrun{ #' library(bayestestR) #' #' posterior <- rnorm(10000) #' map_estimate(posterior) #' #' plot(density(posterior)) #' abline(v = map_estimate(posterior), col = "red") #' #' library(rstanarm) #' model <- rstanarm::stan_glm(mpg ~ wt + cyl, data = mtcars) #' map_estimate(model) #' #' library(brms) #' model <- brms::brm(mpg ~ wt + cyl, data = mtcars) #' map_estimate(model) #' } #' #' @importFrom stats density #' @export map_estimate <- function(x, precision = 2^10, method = "kernel", ...) { UseMethod("map_estimate") } #' @export print.map_estimate <- function(x, ...) { if (inherits(x, "data.frame")) { print.data.frame(x) } else { cat(sprintf("MAP = %.2f", x)) } } #' @rdname map_estimate #' @export map_estimate.numeric <- function(x, precision = 2^10, method = "kernel", ...) { d <- estimate_density(x, precision = precision, method = method, ...) hdp_x <- d$x[which.max(d$y)] hdp_y <- max(d$y) out <- hdp_x attr(out, "MAP_density") <- hdp_y attr(out, "data") <- x attr(out, "centrality") <- "map" class(out) <- unique(c("map_estimate", "see_point_estimate", class(out))) out } #' @importFrom insight get_parameters #' @keywords internal .map_estimate_models <- function(x, precision, method, ...) { l <- sapply(x, map_estimate, precision = precision, method = method, simplify = FALSE, ...) out <- data.frame( Parameter = colnames(x), MAP_Estimate = unlist(l), stringsAsFactors = FALSE, row.names = NULL ) attr(out, "MAP_density") <- sapply(l, attr, "MAP_density") attr(out, "object_name") <- deparse(substitute(x), width.cutoff = 500) attr(out, "centrality") <- "map" class(out) <- unique(c("map_estimate", "see_point_estimate", class(out))) class(out) <- unique(c("map_estimate", class(out))) out } #' @rdname map_estimate #' @export map_estimate.stanreg <- function(x, precision = 2^10, method = "kernel", effects = c("fixed", "random", "all"), parameters = NULL, ...) { effects <- match.arg(effects) .map_estimate_models( x = insight::get_parameters(x, effects = effects, parameters = parameters), precision = precision, method = method ) } #' @rdname map_estimate #' @export map_estimate.brmsfit <- function(x, precision = 2^10, method = "kernel", effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, ...) { effects <- match.arg(effects) component <- match.arg(component) .map_estimate_models( x = insight::get_parameters(x, effects = effects, component = component, parameters = parameters), precision = precision, method = method ) } #' @rdname as.numeric.p_direction #' @method as.numeric map_estimate #' @export as.numeric.map_estimate <- function(x, ...) { if (inherits(x, "data.frame")) { me <- as.numeric(as.vector(x$MAP_Estimate)) names(me) <- x$Parameter me } else { as.vector(x) } } #' @method as.double map_estimate #' @export as.double.map_estimate <- as.numeric.map_estimate bayestestR/R/utils_check_collinearity.R0000644000176200001440000000421113467130124017775 0ustar liggesusers#' @importFrom stats cor cor.test #' @importFrom insight find_parameters #' @keywords internal .check_multicollinearity <- function(model, method = "equivalence_test", threshold = 0.7, ...) { valid_parameters <- insight::find_parameters(model, parameters = "^(?!(r_|sd_|prior_|cor_|b\\[))", flatten = TRUE) dat <- as.data.frame(model)[, valid_parameters] dat <- dat[, -1, drop = FALSE] if (ncol(dat) > 1) { parameter_correlation <- stats::cor(dat) parameter <- expand.grid(colnames(dat), colnames(dat), stringsAsFactors = FALSE) results <- cbind( parameter, corr = abs(as.vector(expand.grid(parameter_correlation)[[1]])), pvalue = apply(parameter, 1, function(r) stats::cor.test(dat[[r[1]]], dat[[r[2]]])$p.value) ) # Filter results <- results[results$pvalue < 0.05 & results$Var1 != results$Var2, ] if (nrow(results) > 0) { # Remove duplicates results$where <- paste0(results$Var1, " and ", results$Var2) results$where2 <- paste0(results$Var2, " and ", results$Var1) to_remove <- c() for (i in 1:nrow(results)) { if (results$where2[i] %in% results$where[1:i]) { to_remove <- c(to_remove, i) } } results <- results[-to_remove, ] # Filter by first threshold threshold <- ifelse(threshold >= .9, .9, threshold) results <- results[results$corr > threshold & results$corr <= .9, ] if (nrow(results) > 0) { where <- paste0("between ", paste0(paste0(results$where, " (r = ", round(results$corr, 2), ")"), collapse = ", "), "") message("Possible multicollinearity ", where, ". This might lead to inappropriate results. See 'Details' in '?", method, "'.") } # Filter by second threshold results <- results[results$corr > .9, ] if (nrow(results) > 0) { where <- paste0("between ", paste0(paste0(results$where, " (r = ", round(results$corr, 2), ")"), collapse = ", "), "") warning("Probable multicollinearity ", where, ". This might lead to inappropriate results. See 'Details' in '?", method, "'.", call. = FALSE) } } } } bayestestR/R/mcse.R0000644000176200001440000000440613613227664013670 0ustar liggesusers#' Monte-Carlo Standard Error (MCSE) #' #' This function returns the Monte Carlo Standard Error (MCSE). #' #' @inheritParams effective_sample #' #' #' @details \strong{Monte Carlo Standard Error (MCSE)} is another measure of #' accuracy of the chains. It is defined as standard deviation of the chains #' divided by their effective sample size (the formula for \code{mcse()} is #' from Kruschke 2015, p. 187). The MCSE \dQuote{provides a quantitative #' suggestion of how big the estimation noise is}. #' #' @references Kruschke, J. (2014). Doing Bayesian data analysis: A tutorial with R, JAGS, and Stan. Academic Press. #' #' @examples #' \dontrun{ #' library(bayestestR) #' library(rstanarm) #' #' model <- stan_glm(mpg ~ wt + am, data = mtcars, chains = 1, refresh = 0) #' mcse(model) #' } #' @importFrom insight get_parameters #' @export mcse <- function(model, ...) { UseMethod("mcse") } #' @export mcse.brmsfit <- function(model, effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, ...) { # check arguments effects <- match.arg(effects) component <- match.arg(component) params <- insight::get_parameters( model, effects = effects, component = component, parameters = parameters ) ess <- effective_sample( model, effects = effects, component = component, parameters = parameters ) .mcse(params, ess$ESS) } #' @rdname mcse #' @export mcse.stanreg <- function(model, effects = c("fixed", "random", "all"), parameters = NULL, ...) { # check arguments effects <- match.arg(effects) params <- insight::get_parameters( model, effects = effects, parameters = parameters ) ess <- effective_sample( model, effects = effects, parameters = parameters ) .mcse(params, ess$ESS) } #' @importFrom stats sd #' @keywords internal .mcse <- function(params, ess) { # get standard deviations from posterior samples stddev <- sapply(params, stats::sd) # compute mcse data.frame( Parameter = colnames(params), MCSE = stddev / sqrt(ess), stringsAsFactors = FALSE, row.names = NULL ) } bayestestR/R/bayesfactor_parameters.R0000644000176200001440000003776713616534204017500 0ustar liggesusers#' Bayes Factors (BF) for a Single Parameter #' #' This method computes Bayes factors against the null (either a point or an interval), #' based on prior and posterior samples of a single parameter. This Bayes factor indicates #' the degree by which the mass of the posterior distribution has shifted further away #' from or closer to the null value(s) (relative to the prior distribution), thus indicating #' if the null value has become less or more likely given the observed data. #' \cr \cr #' When the null is an interval, the Bayes factor is computed by comparing the prior #' and posterior odds of the parameter falling within or outside the null interval #' (Morey & Rouder, 2011; Liao et al., 2020); When the null is a point, a Savage-Dickey #' density ratio is computed, which is also an approximation of a Bayes factor comparing #' the marginal likelihoods of the model against a model in which the tested parameter #' has been restricted to the point null (Wagenmakers et al., 2010; Heck, 2019). #' \cr \cr #' Note that the \code{logspline} package is used for estimating densities and probabilies, #' and must be installed for the function to work. #' \cr \cr #' \code{bayesfactor_pointnull()} and \code{bayesfactor_rope()} are wrappers around #' \code{bayesfactor_parameters} with different defaults for the null to be tested against #' (a point and a range, respectively). Aliases of the main functions are prefixed #' with \code{bf_*}, like \code{bf_parameters()} or \code{bf_pointnull()} #' \cr \cr #' \strong{For more info, in particular on specifying correct priors for factors with more than 2 levels, see \href{https://easystats.github.io/bayestestR/articles/bayes_factors.html}{the Bayes factors vignette}.} #' #' @param posterior A numerical vector, \code{stanreg} / \code{brmsfit} object, \code{emmGrid} #' or a data frame - representing a posterior distribution(s) from (see 'Details'). #' @param prior An object representing a prior distribution (see 'Details'). #' @param direction Test type (see 'Details'). One of \code{0}, \code{"two-sided"} (default, two tailed), #' \code{-1}, \code{"left"} (left tailed) or \code{1}, \code{"right"} (right tailed). #' @param null Value of the null, either a scaler (for point-null) or a a range #' (for a interval-null). #' @inheritParams hdi #' #' @return A data frame containing the Bayes factor representing evidence \emph{against} the null. #' #' @details This method is used to compute Bayes factors based on prior and posterior distributions. #' \cr\cr #' For the computation of Bayes factors, the model priors must be proper priors (at the very least #' they should be \emph{not flat}, and it is preferable that they be \emph{informative}); As the priors for #' the alternative get wider, the likelihood of the null value(s) increases, to the extreme that for completely #' flat priors the null is infinitely more favorable than the alternative (this is called \emph{the Jeffreys-Lindley-Bartlett #' paradox}). Thus, you should only ever try (or want) to compute a Bayes factor when you have an informed prior. #' \cr\cr #' (Note that by default, \code{brms::brm()} uses flat priors for fixed-effects; See example below.) #' #' \subsection{Setting the correct \code{prior}}{ #' It is important to provide the correct \code{prior} for meaningful results. #' \itemize{ #' \item When \code{posterior} is a numerical vector, \code{prior} should also be a numerical vector. #' \item When \code{posterior} is a \code{data.frame}, \code{prior} should also be a \code{data.frame}, with matching column order. #' \item When \code{posterior} is a \code{stanreg} or \code{brmsfit} model: \itemize{ #' \item \code{prior} can be set to \code{NULL}, in which case prior samples are drawn internally. #' \item \code{prior} can also be a model equvilant to \code{posterior} but with samples from the priors \emph{only}. #' } #' \item When \code{posterior} is an \code{emmGrid} object: \itemize{ #' \item \code{prior} should be the \code{stanreg} or \code{brmsfit} model used to create the \code{emmGrid} objects. #' \item \code{prior} can also be an \code{emmGrid} object equvilant to \code{posterior} but created with a model of priors samples \emph{only}. #' } #' }} #' \subsection{One-sided Tests (setting an order restriction)}{ #' One sided tests (controlled by \code{direction}) are conducted by restricting the prior and #' posterior of the non-null values (the "alternative") to one side of the null only #' (\cite{Morey & Wagenmakers, 2014}). For example, if we have a prior hypothesis that the #' parameter should be positive, the alternative will be restricted to the region to the right #' of the null (point or interval). #' } #' \subsection{Interpreting Bayes Factors}{ #' A Bayes factor greater than 1 can be interpereted as evidence against the null, #' at which one convention is that a Bayes factor greater than 3 can be considered #' as "substantial" evidence against the null (and vice versa, a Bayes factor #' smaller than 1/3 indicates substantial evidence in favor of the null-model) #' (\cite{Wetzels et al. 2011}). #' } #' #' @examples #' library(bayestestR) #' #' prior <- distribution_normal(1000, mean = 0, sd = 1) #' posterior <- distribution_normal(1000, mean = .5, sd = .3) #' #' bayesfactor_parameters(posterior, prior) #' \dontrun{ #' # rstanarm models #' # --------------- #' if (require("rstanarm") &6 require("emmeans")) { #' contrasts(sleep$group) <- contr.bayes # see vingette #' stan_model <- stan_lmer(extra ~ group + (1 | ID), data = sleep) #' bayesfactor_parameters(stan_model) #' bayesfactor_parameters(stan_model, null = rope_range(stan_model)) #' #' # emmGrid objects #' # --------------- #' group_diff <- pairs(emmeans(stan_model, ~group)) #' bayesfactor_parameters(group_diff, prior = stan_model) #' } #' #' # brms models #' # ----------- #' if (require("brms")) { #' contrasts(sleep$group) <- contr.bayes # see vingette #' my_custom_priors <- #' set_prior("student_t(3, 0, 1)", class = "b") + #' set_prior("student_t(3, 0, 1)", class = "sd", group = "ID") #' #' brms_model <- brm(extra ~ group + (1 | ID), #' data = sleep, #' prior = my_custom_priors #' ) #' bayesfactor_parameters(brms_model) #' } #' } #' @references #' \itemize{ #' \item Wagenmakers, E. J., Lodewyckx, T., Kuriyal, H., and Grasman, R. (2010). Bayesian hypothesis testing for psychologists: A tutorial on the Savage-Dickey method. Cognitive psychology, 60(3), 158-189. #' \item Heck, D. W. (2019). A caveat on the Savage–Dickey density ratio: The case of computing Bayes factors for regression parameters. British Journal of Mathematical and Statistical Psychology, 72(2), 316-333. #' \item Morey, R. D., & Wagenmakers, E. J. (2014). Simple relation between Bayesian order-restricted and point-null hypothesis tests. Statistics & Probability Letters, 92, 121-124. #' \item Morey, R. D., & Rouder, J. N. (2011). Bayes factor approaches for testing interval null hypotheses. Psychological methods, 16(4), 406. #' \item Liao, J. G., Midya, V., & Berg, A. (2020). Connecting and contrasting the Bayes factor and a modified ROPE procedure for testing interval null hypotheses. The American Statistician, 1-19. #' \item Wetzels, R., Matzke, D., Lee, M. D., Rouder, J. N., Iverson, G. J., and Wagenmakers, E.-J. (2011). Statistical Evidence in Experimental Psychology: An Empirical Comparison Using 855 t Tests. Perspectives on Psychological Science, 6(3), 291–298. \doi{10.1177/1745691611406923} #' } #' #' @author Mattan S. Ben-Shachar #' #' @export bayesfactor_parameters <- function(posterior, prior = NULL, direction = "two-sided", null = 0, verbose = TRUE, ...) { UseMethod("bayesfactor_parameters") } #' @rdname bayesfactor_parameters #' @export bayesfactor_pointull <- function(posterior, prior = NULL, direction = "two-sided", null = 0, verbose = TRUE, ...) { if (length(null) > 1) { message("'null' is a range - computing a ROPE based Bayes factor.") } bayesfactor_parameters( posterior = posterior, prior = prior, direction = direction, null = null, verbose = verbose, ... ) } #' @rdname bayesfactor_parameters #' @export bayesfactor_rope <- function(posterior, prior = NULL, direction = "two-sided", null = rope_range(posterior), verbose = TRUE, ...) { if (length(null) < 2) { message("'null' is a point - computing a Savage-Dickey (point null) Bayes factor.") } bayesfactor_parameters( posterior = posterior, prior = prior, direction = direction, null = null, verbose = verbose, ... ) } #' @rdname bayesfactor_parameters #' @export bf_parameters <- bayesfactor_parameters #' @rdname bayesfactor_parameters #' @export bf_pointull <- bayesfactor_pointull #' @rdname bayesfactor_parameters #' @export bf_rope <- bayesfactor_rope #' @rdname bayesfactor_parameters #' @export bayesfactor_parameters.numeric <- function(posterior, prior = NULL, direction = "two-sided", null = 0, verbose = TRUE, ...) { # nm <- .safe_deparse(substitute(posterior) if (is.null(prior)) { prior <- posterior if (verbose) { warning( "Prior not specified! ", "Please specify a prior (in the form 'prior = distribution_normal(1000, 0, 1)')", " to get meaningful results." ) } } prior <- data.frame(X = prior) posterior <- data.frame(X = posterior) # colnames(posterior) <- colnames(prior) <- nm # Get BFs sdbf <- bayesfactor_parameters.data.frame( posterior = posterior, prior = prior, direction = direction, null = null, ... ) sdbf$Parameter <- NULL sdbf } #' @importFrom insight clean_parameters #' @rdname bayesfactor_parameters #' @export bayesfactor_parameters.stanreg <- function(posterior, prior = NULL, direction = "two-sided", null = 0, verbose = TRUE, effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, ...) { cleaned_parameters <- insight::clean_parameters(posterior) effects <- match.arg(effects) component <- match.arg(component) samps <- .clean_priors_and_posteriors(posterior, prior, verbose = verbose, effects = effects, component = component, parameters = parameters) # Get BFs temp <- bayesfactor_parameters.data.frame( posterior = samps$posterior, prior = samps$prior, direction = direction, null = null, ... ) bf_val <- .prepare_output(temp, cleaned_parameters) class(bf_val) <- class(temp) attr(bf_val, "hypothesis") <- attr(temp, "hypothesis") # don't change the name of this attribute - it is used only internally for "see" and printing attr(bf_val, "direction") <- attr(temp, "direction") attr(bf_val, "plot_data") <- attr(temp, "plot_data") bf_val } #' @rdname bayesfactor_parameters #' @export bayesfactor_parameters.brmsfit <- bayesfactor_parameters.stanreg #' @rdname bayesfactor_parameters #' @export bayesfactor_parameters.emmGrid <- function(posterior, prior = NULL, direction = "two-sided", null = 0, verbose = TRUE, ...) { samps <- .clean_priors_and_posteriors(posterior, prior, verbose = verbose) # Get BFs bayesfactor_parameters.data.frame( posterior = samps$posterior, prior = samps$prior, direction = direction, null = null, ... ) } #' @rdname bayesfactor_parameters #' @export bayesfactor_parameters.data.frame <- function(posterior, prior = NULL, direction = "two-sided", null = 0, verbose = TRUE, ...) { # find direction direction <- .get_direction(direction) if (is.null(prior)) { prior <- posterior warning( "Prior not specified! ", "Please specify priors (with column order matching 'posterior')", " to get meaningful results." ) } sdbf <- numeric(ncol(posterior)) for (par in seq_along(posterior)) { sdbf[par] <- .bayesfactor_parameters( posterior[[par]], prior[[par]], direction = direction, null = null ) } bf_val <- data.frame( Parameter = colnames(posterior), BF = sdbf, stringsAsFactors = FALSE ) class(bf_val) <- unique(c( "bayesfactor_parameters", "see_bayesfactor_parameters", class(bf_val) )) attr(bf_val, "hypothesis") <- null # don't change the name of this attribute - it is used only internally for "see" and printing attr(bf_val, "direction") <- direction attr(bf_val, "plot_data") <- .make_BF_plot_data(posterior, prior, direction, null) bf_val } #' @keywords internal #' @importFrom insight print_color .bayesfactor_parameters <- function(posterior, prior, direction = 0, null = 0) { if (isTRUE(all.equal(posterior, prior))) { return(1) } if (!requireNamespace("logspline")) { stop("Package \"logspline\" needed for this function to work. Please install it.") } if (length(null) == 1) { relative_density <- function(samples) { f_samples <- suppressWarnings(logspline::logspline(samples)) d_samples <- logspline::dlogspline(null, f_samples) if (direction < 0) { norm_samples <- logspline::plogspline(null, f_samples) } else if (direction > 0) { norm_samples <- 1 - logspline::plogspline(null, f_samples) } else { norm_samples <- 1 } d_samples / norm_samples } return(relative_density(prior) / relative_density(posterior)) } else if (length(null) == 2) { null <- sort(null) null[is.infinite(null)] <- 1.797693e+308 * sign(null[is.infinite(null)]) f_prior <- logspline::logspline(prior) f_posterior <- logspline::logspline(posterior) h0_prior <- diff(logspline::plogspline(null, f_prior)) h0_post <- diff(logspline::plogspline(null, f_posterior)) BF_null_full <- h0_post / h0_prior if (direction < 0) { h1_prior <- logspline::plogspline(min(null), f_prior) h1_post <- logspline::plogspline(min(null), f_posterior) } else if (direction > 0) { h1_prior <- 1 - logspline::plogspline(max(null), f_prior) h1_post <- 1 - logspline::plogspline(max(null), f_posterior) } else { h1_prior <- 1 - h0_prior h1_post <- 1 - h0_post } BF_alt_full <- h1_post / h1_prior return(BF_alt_full / BF_null_full) } else { stop("'null' must be of length 1 or 2") } } # Bad Methods ------------------------------------------------------------- #' @export bayesfactor_parameters.bayesfactor_models <- function(...) { stop( "Oh no, 'bayesfactor_parameters()' does not know how to deal with multiple models :(\n", "You might want to use 'bayesfactor_inclusion()' here to test specific terms across models." ) } #' @export bayesfactor_parameters.sim <- function(...) { stop( "Bayes factors are based on the shift from a prior to a posterior. ", "Since simulated draws are not based on any priors, computing Bayes factors does not make sense :(\n", "You might want to try `rope`, `ci`, `pd` or `pmap` for posterior-based inference." ) } #' @export bayesfactor_parameters.sim.merMod <- bayesfactor_parameters.sim bayestestR/R/bayesfactor_restricted.R0000644000176200001440000002077413615677374017512 0ustar liggesusers#' Bayes Factors (BF) for Order Restricted Models #' #' This method computes Bayes factors for comparing a model with an order restrictions on its parameters #' with the fully unrestricted model. \emph{Note that this method should only be used for confirmatory analyses}. #' \cr \cr #' The \code{bf_*} function is an alias of the main function. #' \cr \cr #' \strong{For more info, in particular on specifying correct priors for factors with more than 2 levels, see \href{https://easystats.github.io/bayestestR/articles/bayes_factors.html}{the Bayes factors vignette}.} #' #' @param posterior A \code{stanreg} / \code{brmsfit} object, \code{emmGrid} or a data frame - representing a posterior distribution(s) from (see Details). #' @param hypothesis A character vector specifying the restrictions as logical conditions (see examples below). #' @param prior An object representing a prior distribution (see Details). #' @inheritParams hdi #' #' @details This method is used to compute Bayes factors for order-restricted models vs un-restricted #' models by setting an order restriction on the prior and posterior distributions #' (\cite{Morey & Wagenmakers, 2013}). #' \cr\cr #' (Though it is possible to use \code{bayesfactor_restricted()} to test interval restrictions, #' it is more suitable for testing order restrictions; see examples). #' \cr\cr #' For the computation of Bayes factors, the model priors must be proper priors (at the very least #' they should be \emph{not flat}, and it is preferable that they be \emph{informative}); As the priors for #' the alternative get wider, the likelihood of the null value(s) increases, to the extreme that for completely #' flat priors the null is infinitely more favorable than the alternative (this is called \emph{the Jeffreys-Lindley-Bartlett #' paradox}). Thus, you should only ever try (or want) to compute a Bayes factor when you have an informed prior. #' \cr\cr #' (Note that by default, \code{brms::brm()} uses flat priors for fixed-effects.) #' #' \subsection{Setting the correct \code{prior}}{ #' It is important to provide the correct \code{prior} for meaningful results. #' \itemize{ #' \item When \code{posterior} is a \code{data.frame}, \code{prior} should also be a \code{data.frame}, with matching column order. #' \item When \code{posterior} is a \code{stanreg} or \code{brmsfit} model: \itemize{ #' \item \code{prior} can be set to \code{NULL}, in which case prior samples are drawn internally. #' \item \code{prior} can also be a model equvilant to \code{posterior} but with samples from the priors \emph{only}. #' } #' \item When \code{posterior} is an \code{emmGrid} object: \itemize{ #' \item \code{prior} should be the \code{stanreg} or \code{brmsfit} model used to create the \code{emmGrid} objects. #' \item \code{prior} can also be an \code{emmGrid} object equvilant to \code{posterior} but created with a model of priors samples \emph{only}. #' } #' }} #' \subsection{Interpreting Bayes Factors}{ #' A Bayes factor greater than 1 can be interpereted as evidence against the null, #' at which one convention is that a Bayes factor greater than 3 can be considered #' as "substantial" evidence against the null (and vice versa, a Bayes factor #' smaller than 1/3 indicates substantial evidence in favor of the null-hypothesis) #' (\cite{Wetzels et al. 2011}). #' } #' #' @return A data frame containing the Bayes factor representing evidence \emph{against} the un-restricted model. #' #' @examples #' library(bayestestR) #' prior <- data.frame( #' X = rnorm(100), #' X1 = rnorm(100), #' X3 = rnorm(100) #' ) #' #' posterior <- data.frame( #' X = rnorm(100, .4), #' X1 = rnorm(100, -.2), #' X3 = rnorm(100) #' ) #' #' hyps <- c( #' "X > X1 & X1 > X3", #' "X > X1" #' ) #' #' bayesfactor_restricted(posterior, hypothesis = hyps, prior = prior) #' \dontrun{ #' # rstanarm models #' # --------------- #' if (require("rstanarm") && require("emmeans")) { #' fit_stan <- stan_glm(mpg ~ wt + cyl + am, #' data = mtcars #' ) #' hyps <- c( #' "am > 0 & cyl < 0", #' "cyl < 0", #' "wt - cyl > 0" #' ) #' bayesfactor_restricted(fit_stan, hypothesis = hyps) #' #' # emmGrid objects #' # --------------- #' # replicating http://bayesfactor.blogspot.com/2015/01/multiple-comparisons-with-bayesfactor-2.html #' disgust_data <- read.table(url("http://www.learnbayes.org/disgust_example.txt"), header = TRUE) #' #' contrasts(disgust_data$condition) <- contr.bayes # see vignette #' fit_model <- stan_glm(score ~ condition, data = disgust_data, family = gaussian()) #' #' em_condition <- emmeans(fit_model, ~condition) #' hyps <- c("lemon < control & control < sulfur") #' #' bayesfactor_restricted(em_condition, prior = fit_model, hypothesis = hyps) #' # > # Bayes Factor (Order-Restriction) #' # > #' # > Hypothesis P(Prior) P(Posterior) Bayes Factor #' # > lemon < control & control < sulfur 0.17 0.75 4.49 #' # > --- #' # > Bayes factors for the restricted model vs. the un-restricted model. #' } #' } #' @references #' \itemize{ #' \item Morey, R. D., & Wagenmakers, E. J. (2014). Simple relation between Bayesian order-restricted and point-null hypothesis tests. Statistics & Probability Letters, 92, 121-124. #' \item Morey, R. D., & Rouder, J. N. (2011). Bayes factor approaches for testing interval null hypotheses. Psychological methods, 16(4), 406. #' \item Morey, R. D. (Jan, 2015). Multiple Comparisons with BayesFactor, Part 2 – order restrictions. Retrived from https://richarddmorey.org/category/order-restrictions/. #' } #' #' @export bayesfactor_restricted <- function(posterior, hypothesis, prior = NULL, verbose = TRUE, ...) { UseMethod("bayesfactor_restricted") } #' @rdname bayesfactor_restricted #' @export bf_restricted <- bayesfactor_restricted #' @rdname bayesfactor_restricted #' @export bayesfactor_restricted.stanreg <- function(posterior, hypothesis, prior = NULL, verbose = TRUE, effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), ...) { effects <- match.arg(effects) component <- match.arg(component) samps <- .clean_priors_and_posteriors(posterior, prior, effects, component, verbose = verbose) # Get savage-dickey BFs bayesfactor_restricted.data.frame( posterior = samps$posterior, prior = samps$prior, hypothesis = hypothesis ) } #' @rdname bayesfactor_restricted #' @export bayesfactor_restricted.brmsfit <- bayesfactor_restricted.stanreg #' @rdname bayesfactor_restricted #' @export bayesfactor_restricted.emmGrid <- function(posterior, hypothesis, prior = NULL, verbose = TRUE, ...) { samps <- .clean_priors_and_posteriors(posterior, prior, verbose = verbose) bayesfactor_restricted.data.frame( posterior = samps$posterior, prior = samps$prior, hypothesis = hypothesis ) } #' @export bayesfactor_restricted.data.frame <- function(posterior, hypothesis, prior = NULL, ...) { p_hypothesis <- parse(text = hypothesis) if (is.null(prior)) { prior <- posterior warning( "Prior not specified! ", "Please specify priors (with column names matching 'posterior')", " to get meaningful results." ) } .get_prob <- function(x, data) { x_logical <- try(eval(x, envir = data), silent = TRUE) if (inherits(x_logical, "try-error")) { cnames <- colnames(data) is_name <- make.names(cnames) == cnames cnames[!is_name] <- paste0("`", cnames[!is_name], "`") cnames <- paste0(cnames, collapse = ", ") stop(x_logical, "Available parameters are: ", cnames) } else if (!all(is.logical(x_logical))) { stop("Hypotheses must be logical") } mean(x_logical) } posterior_p <- sapply(p_hypothesis, .get_prob, data = posterior) prior_p <- sapply(p_hypothesis, .get_prob, data = prior) BF <- posterior_p / prior_p res <- data.frame( Hypothesis = hypothesis, Prior_prob = prior_p, Posterior_prob = posterior_p, BF = BF ) class(res) <- unique(c( "bayesfactor_restricted", class(res) )) res } bayestestR/R/print.p_direction.R0000644000176200001440000000114013546320102016345 0ustar liggesusers#' @export print.p_direction <- function(x, digits = 2, ...) { if ("data_plot" %in% class(x)) { print(as.data.frame(x)) } else if ("data.frame" %in% class(x)) { .print_pd(x, digits, ...) } else { cat(sprintf("pd = %s%%", insight::format_value(x * 100, digits = digits))) } } #' @keywords internal .print_pd <- function(x, digits, ...) { insight::print_color("# Probability of Direction (pd)\n\n", "blue") x$Parameter <- as.character(x$Parameter) x$pd <- sprintf("%s%%", insight::format_value(x$pd * 100, digits = digits)) print_data_frame(x, digits = digits) } bayestestR/R/reshape_ci.R0000644000176200001440000000616613536647164015055 0ustar liggesusers#' Reshape CI between wide/long formats #' #' Reshape CI between wide/long formats. #' #' @param x A data.frame containing \code{CI_low} and \code{CI_high}. #' #' @examples #' library(bayestestR) #' #' x <- data.frame(replicate(4, rnorm(100))) #' x <- ci(x, ci = c(0.68, 0.89, 0.95)) #' reshape_ci(x) #' reshape_ci(reshape_ci(x)) #' #' x <- data.frame(replicate(4, rnorm(100))) #' x <- describe_posterior(x, ci = c(0.68, 0.89, 0.95)) #' reshape_ci(x) #' reshape_ci(reshape_ci(x)) #' @importFrom stats reshape #' @export reshape_ci <- function(x) { # Long to wide ---------------- if ("CI_low" %in% names(x) & "CI_high" %in% names(x) & "CI" %in% names(x)) { ci_position <- which(names(x) == "CI") # Reshape if (length(unique(x$CI)) > 1) { if (!"Parameter" %in% names(x)) { x$Parameter <- x$CI remove_parameter <- TRUE } else { remove_parameter <- FALSE } x <- stats::reshape( x, idvar = "Parameter", timevar = "CI", direction = "wide", v.names = c("CI_low", "CI_high"), sep = "_" ) row.names(x) <- NULL if (remove_parameter) x$Parameter <- NULL } # Replace at the right place ci_colname <- names(x)[c(grepl("CI_low_*", names(x)) | grepl("CI_high_*", names(x)))] colnames_1 <- names(x)[0:(ci_position - 1)][!names(x)[0:(ci_position - 1)] %in% ci_colname] colnames_2 <- names(x)[!names(x) %in% c(ci_colname, colnames_1)] x <- x[c(colnames_1, ci_colname, colnames_2)] # Wide to long -------------- } else { if (!"Parameter" %in% names(x)) { x$Parameter <- 1:nrow(x) remove_parameter <- TRUE } else { remove_parameter <- FALSE } lows <- grepl("CI_low_*", names(x)) highs <- grepl("CI_high_*", names(x)) ci <- as.numeric(gsub("CI_low_", "", names(x)[lows])) if (paste0(ci, collapse = "-") != paste0(gsub("CI_high_", "", names(x)[highs]), collapse = "-")) { stop("Something went wrong in the CIs reshaping.") return(x) } if (sum(lows) > 1 & sum(highs) > 1) { low <- stats::reshape( x[!highs], direction = "long", varying = list(names(x)[lows]), sep = "_", timevar = "CI", v.names = "CI_low", times = ci ) high <- stats::reshape( x[!lows], direction = "long", varying = list(names(x)[highs]), sep = "_", timevar = "CI", v.names = "CI_high", times = ci ) x <- merge(low, high) x$id <- NULL x <- x[order(x$Parameter), ] row.names(x) <- NULL if (remove_parameter) x$Parameter <- NULL } # Replace at the right place ci_position <- which(lows)[1] ci_colname <- c("CI", "CI_low", "CI_high") colnames_1 <- names(x)[0:(ci_position - 1)][!names(x)[0:(ci_position - 1)] %in% ci_colname] colnames_2 <- names(x)[!names(x) %in% c(ci_colname, colnames_1)] x <- x[c(colnames_1, ci_colname, colnames_2)] } class(x) <- intersect(c("data.frame", "numeric"), class(x)) x } bayestestR/R/utils.R0000644000176200001440000000741713571072511014076 0ustar liggesusers# trim leading / trailing whitespace .trim <- function(x) gsub("^\\s+|\\s+$", "", x) # safe depare, also for very long strings .safe_deparse <- function(string) { paste0(sapply(deparse(string, width.cutoff = 500), .trim, simplify = TRUE), collapse = "") } # has object an element with given name? #' @keywords internal .obj_has_name <- function(x, name) { name %in% names(x) } # remove NULL elements from lists #' @keywords internal .compact_list <- function(x) x[!sapply(x, function(i) length(i) == 0 || is.null(i) || any(i == "NULL"))] # is string empty? #' @keywords internal .is_empty_object <- function(x) { if (is.list(x)) { x <- tryCatch( { .compact_list(x) }, error = function(x) { x } ) } # this is an ugly fix because of ugly tibbles if (inherits(x, c("tbl_df", "tbl"))) x <- as.data.frame(x) x <- suppressWarnings(x[!is.na(x)]) length(x) == 0 || is.null(x) } # select rows where values in "variable" match "value" #' @keywords internal .select_rows <- function(data, variable, value) { data[which(data[[variable]] == value), ] } # remove column #' @keywords internal .remove_column <- function(data, variables) { data[variables] <- NULL data } #' @importFrom stats reshape #' @keywords internal .to_long <- function(x, names_to = "key", values_to = "value", columns = colnames(x)) { if (is.numeric(columns)) columns <- colnames(x)[columns] dat <- stats::reshape( as.data.frame(x), idvar = "id", ids = row.names(x), times = columns, timevar = names_to, v.names = values_to, varying = list(columns), direction = "long" ) if (is.factor(dat[[values_to]])) { dat[[values_to]] <- as.character(dat[[values_to]]) } dat[, 1:(ncol(dat) - 1), drop = FALSE] } #' select numerics columns #' @keywords internal .select_nums <- function(x) { x[unlist(lapply(x, is.numeric))] } ## TODO remove?!? # #' Used in describe_posterior # #' @keywords internal # .reorder_rows <- function(x, out, ci = NULL) { # if (!is.data.frame(out) || nrow(out) == 1) { # return(out) # } # # if (is.null(ci)) { # refdata <- point_estimate(x, centrality = "median", dispersion = FALSE) # order <- refdata$Parameter # out <- out[match(order, out$Parameter), ] # } else { # uncertainty <- ci(x, ci = ci) # order <- paste0(uncertainty$Parameter, uncertainty$CI) # out <- out[match(order, paste0(out$Parameter, out$CI)), ] # } # rownames(out) <- NULL # out # } #' @keywords internal .get_direction <- function(direction) { if (length(direction) > 1) warning("Using first 'direction' value.") if (is.numeric(direction[1])) { return(sign(direction[1])) } Value <- c( "left" = -1, "right" = 1, "two-sided" = 0, "twosided" = 0, "one-sided" = 1, "onesided" = 1, "<" = -1, ">" = 1, "=" = 0, "==" = 0, "-1" = -1, "0" = 0, "1" = 1, "+1" = 1 ) direction <- Value[tolower(direction[1])] if (is.na(direction)) { stop("Unrecognized 'direction' argument.") } direction } .prepare_output <- function(temp, cleaned_parameters) { merge_by <- intersect(c("Parameter", "Effects", "Component"), colnames(temp)) temp$.roworder <- 1:nrow(temp) out <- merge(x = temp, y = cleaned_parameters, by = merge_by, all.x = TRUE) attr(out, "Cleaned_Parameter") <- out$Cleaned_Parameter[order(out$.roworder)] .remove_column(out[order(out$.roworder), ], c("Group", "Cleaned_Parameter", "Response", "Function", ".roworder")) } .merge_and_sort <- function(x, y, by, all) { x$.rowid <- 1:nrow(x) x <- merge(x, y, by = by, all = all) .remove_column(x[order(x$.rowid), ], ".rowid") } bayestestR/R/print.bayesfactor_inclusion.R0000644000176200001440000000163313506415364020455 0ustar liggesusers#' @export print.bayesfactor_inclusion <- function(x, digits = 2, log = FALSE, ...) { BFE <- x priorOdds <- attr(BFE, "priorOdds") if (log) { BFE$BF <- log(BFE$BF) } BFE$BF <- .format_big_small(BFE$BF, digits = digits) colnames(BFE) <- c("Pr(prior)", "Pr(posterior)", "Inclusion BF") insight::print_color("# Inclusion Bayes Factors (Model Averaged)\n\n", "blue") print.data.frame(BFE, digits = digits) cat("\n") cat("* Compared among: ") if (attr(BFE, "matched")) { insight::print_color("matched models only\n", "cyan") } else { insight::print_color("all models\n", "cyan") } cat("* Priors odds: ") if (!is.null(priorOdds)) { insight::print_color("custom\n", "cyan") } else { insight::print_color("uniform-equal\n", "cyan") } if (log) insight::print_color("\nBayes Factors are on the log-scale.\n", "red") invisible(x) } bayestestR/R/convert_pd_to_p.R0000644000176200001440000000174213535721433016121 0ustar liggesusers#' Convert between Probability of Direction (pd) and p-value. #' #' Enables a conversion between sProbability of Direction (pd) and p-value. #' #' @param pd A Probability of Direction (pd) value (between 0 and 1). #' @param p A p-value. #' @param direction What type of p-value is requested or provided. Can be \code{"two-sided"} (default, two tailed) or \code{"one-sided"} (one tailed). #' @param ... Arguments passed to or from other methods. #' #' @examples #' pd_to_p(pd = 0.95) #' pd_to_p(pd = 0.95, direction = "one-sided") #' @export pd_to_p <- function(pd, direction = "two-sided", ...) { p <- (1 - pd) if (.get_direction(direction) == 0) { p <- 2 * p } p } #' @rdname pd_to_p #' @export p_to_pd <- function(p, direction = "two-sided", ...) { if (.get_direction(direction) == 0) { p <- p / 2 } (1 - p) } #' @rdname pd_to_p #' @export convert_p_to_pd <- p_to_pd #' @rdname pd_to_p #' @export convert_pd_to_p <- pd_to_p bayestestR/R/print.point_estimate.R0000644000176200001440000000046113536442410017105 0ustar liggesusers#' @export print.point_estimate <- function(x, digits = 2, ...) { if ("data_plot" %in% class(x)) { print(as.data.frame(x)) } else if ("data.frame" %in% class(x)) { insight::print_color("# Point Estimates\n\n", "blue") print_data_frame(x, digits = digits) } else { x } } bayestestR/R/estimate_density.R0000644000176200001440000002352213613227664016313 0ustar liggesusers#' Density Estimation #' #' This function is a wrapper over different methods of density estimation. By default, it uses the base R \link{density} with by default uses a different smoothing bandwidth (\code{"SJ"}) from the legacy default implemented the base R \link{density} function (\code{"nrd0"}). However, Deng \& Wickham suggest that \code{method = "KernSmooth"} is the fastest and the most accurate. #' #' @inheritParams hdi #' @inheritParams stats::density #' @param method Density estimation method. Can be \code{"kernel"} (default), \code{"logspline"} or \code{"KernSmooth"}. #' @param precision Number of points of density data. See the \code{n} parameter in \link[=density]{density}. #' @param extend Extend the range of the x axis by a factor of \code{extend_scale}. #' @param extend_scale Ratio of range by which to extend the x axis. A value of \code{0.1} means that the x axis will be extended by \code{1/10} of the range of the data. #' #' @examples #' library(bayestestR) #' #' set.seed(1) #' x <- rnorm(250, 1) #' #' # Methods #' density_kernel <- estimate_density(x, method = "kernel") #' density_logspline <- estimate_density(x, method = "logspline") #' density_KernSmooth <- estimate_density(x, method = "KernSmooth") #' density_mixture <- estimate_density(x, method = "mixture") #' #' hist(x, prob = TRUE) #' lines(density_kernel$x, density_kernel$y, col = "black", lwd = 2) #' lines(density_logspline$x, density_logspline$y, col = "red", lwd = 2) #' lines(density_KernSmooth$x, density_KernSmooth$y, col = "blue", lwd = 2) #' lines(density_mixture$x, density_mixture$y, col = "green", lwd = 2) #' #' # Extension #' density_extended <- estimate_density(x, extend = TRUE) #' density_default <- estimate_density(x, extend = FALSE) #' #' hist(x, prob = TRUE) #' lines(density_extended$x, density_extended$y, col = "red", lwd = 3) #' lines(density_default$x, density_default$y, col = "black", lwd = 3) #' #' df <- data.frame(replicate(4, rnorm(100))) #' head(estimate_density(df)) #' \dontrun{ #' # rstanarm models #' # ----------------------------------------------- #' library(rstanarm) #' model <- stan_glm(mpg ~ wt + gear, data = mtcars, chains = 2, iter = 200, refresh = 0) #' head(estimate_density(model)) #' #' library(emmeans) #' head(estimate_density(emtrends(model, ~1, "wt"))) #' #' # brms models #' # ----------------------------------------------- #' library(brms) #' model <- brms::brm(mpg ~ wt + cyl, data = mtcars) #' estimate_density(model) #' } #' #' @references Deng, H., & Wickham, H. (2011). Density estimation in R. Electronic publication. #' #' @importFrom stats density #' @importFrom utils install.packages #' @export estimate_density <- function(x, method = "kernel", precision = 2^10, extend = FALSE, extend_scale = 0.1, bw = "SJ", ...) { UseMethod("estimate_density") } #' @importFrom stats predict #' @keywords internal .estimate_density <- function(x, method = "kernel", precision = 2^10, extend = FALSE, extend_scale = 0.1, bw = "SJ", ...) { method <- match.arg(tolower(method), c("kernel", "logspline", "kernsmooth", "smooth", "mixture", "mclust")) # Remove NA x <- x[!is.na(x)] # Range x_range <- range(x) if (extend) { extension_scale <- diff(x_range) * extend_scale x_range[1] <- x_range[1] - extension_scale x_range[2] <- x_range[2] + extension_scale } # Replace inf values if needed x_range[is.infinite(x_range)] <- 5.565423e+156 # Kernel if (method == "kernel") { return(as.data.frame(density(x, n = precision, bw = bw, from = x_range[1], to = x_range[2], ...))) # Logspline } else if (method == "logspline") { if (!requireNamespace("logspline")) { if (interactive()) { readline("Package \"logspline\" needed for this function. Press ENTER to install or ESCAPE to abort.") install.packages("logspline") } else { stop("Package \"logspline\" needed for this function. Press run 'install.packages(\"logspline\")'.") } } x_axis <- seq(x_range[1], x_range[2], length.out = precision) y <- logspline::dlogspline(x_axis, logspline::logspline(x, ...), ...) return(data.frame(x = x_axis, y = y)) # KernSmooth } else if (method %in% c("kernsmooth", "smooth")) { if (!requireNamespace("KernSmooth")) { if (interactive()) { readline("Package \"KernSmooth\" needed for this function. Press ENTER to install or ESCAPE to abort.") install.packages("KernSmooth") } else { stop("Package \"KernSmooth\" needed for this function. Press run 'install.packages(\"KernSmooth\")'.") } } return(as.data.frame(KernSmooth::bkde(x, range.x = x_range, gridsize = precision, truncate = TRUE, ...))) # Mixturre } else if (method %in% c("mixture", "mclust")) { if (!requireNamespace("mclust")) { if (interactive()) { readline("Package \"mclust\" needed for this function. Press ENTER to install or ESCAPE to abort.") install.packages("KernSmooth") } else { stop("Package \"mclust\" needed for this function. Press run 'install.packages(\"mclust\")'.") } } x_axis <- seq(x_range[1], x_range[2], length.out = precision) y <- predict(mclust::densityMclust(x, verbose = FALSE), newdata = x_axis) return(data.frame(x = x_axis, y = y)) } else { stop("method should be one of 'kernel', 'logspline', 'KernSmooth' or 'mixture'.") } } #' @export estimate_density.numeric <- function(x, method = "kernel", precision = 2^10, extend = FALSE, extend_scale = 0.1, bw = "SJ", ...) { out <- .estimate_density(x, method = method, precision = precision, extend = extend, extend_scale = extend_scale, bw = bw, ...) class(out) <- c("estimate_density", "see_estimate_density", class(out)) out } #' @export estimate_density.data.frame <- function(x, method = "kernel", precision = 2^10, extend = FALSE, extend_scale = 0.1, bw = "SJ", ...) { x <- .select_nums(x) out <- sapply(x, estimate_density, method = method, precision = precision, extend = extend, extend_scale = extend_scale, bw = bw, simplify = FALSE) for (i in names(out)) { out[[i]]$Parameter <- i } out <- do.call(rbind, out) row.names(out) <- NULL out[, c("Parameter", "x", "y")] } #' @export estimate_density.emmGrid <- function(x, method = "kernel", precision = 2^10, extend = FALSE, extend_scale = 0.1, bw = "SJ", ...) { if (!requireNamespace("emmeans")) { stop("Package 'emmeans' required for this function to work. Please install it by running `install.packages('emmeans')`.") } x <- as.data.frame(as.matrix(emmeans::as.mcmc.emmGrid(x, names = FALSE))) estimate_density(x, method = method, precision = precision, extend = extend, extend_scale = extend_scale, bw = bw, ... ) } #' @importFrom insight get_parameters #' @export estimate_density.stanreg <- function(x, method = "kernel", precision = 2^10, extend = FALSE, extend_scale = 0.1, bw = "SJ", effects = c("fixed", "random", "all"), parameters = NULL, ...) { effects <- match.arg(effects) out <- estimate_density(insight::get_parameters(x, effects = effects, parameters = parameters), method = method, precision = precision, extend = extend, extend_scale = extend_scale, bw = bw, ...) attr(out, "object_name") <- .safe_deparse(substitute(x)) out } #' @importFrom insight get_parameters #' @export estimate_density.brmsfit <- function(x, method = "kernel", precision = 2^10, extend = FALSE, extend_scale = 0.1, bw = "SJ", effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, ...) { effects <- match.arg(effects) component <- match.arg(component) out <- estimate_density(insight::get_parameters(x, effects = effects, component = component, parameters = parameters), method = method, precision = precision, extend = extend, extend_scale = extend_scale, bw = bw, ...) attr(out, "object_name") <- .safe_deparse(substitute(x)) out } #' @export estimate_density.MCMCglmm <- function(x, method = "kernel", precision = 2^10, extend = FALSE, extend_scale = 0.1, bw = "SJ", parameters = NULL, ...) { nF <- x$Fixed$nfl out <- estimate_density(as.data.frame(x$Sol[, 1:nF, drop = FALSE]), method = method, precision = precision, extend = extend, extend_scale = extend_scale, bw = bw, ...) attr(out, "object_name") <- .safe_deparse(substitute(x)) out } #' @importFrom insight get_parameters #' @export estimate_density.mcmc <- function(x, method = "kernel", precision = 2^10, extend = FALSE, extend_scale = 0.1, bw = "SJ", parameters = NULL, ...) { out <- estimate_density(insight::get_parameters(x, parameters = parameters), method = method, precision = precision, extend = extend, extend_scale = extend_scale, bw = bw, ...) attr(out, "object_name") <- .safe_deparse(substitute(x)) out } #' Coerce to a Data Frame #' #' @inheritParams base::as.data.frame #' @method as.data.frame density #' @export as.data.frame.density <- function(x, ...) { data.frame(x = x$x, y = x$y) } #' Density Probability at a Given Value #' #' Compute the density value at a given point of a distribution (i.e., the value of the \code{y} axis of a value \code{x} of a distribution). #' #' @param posterior Vector representing a posterior distribution. #' @param x The value of which to get the approximate probability. #' @inheritParams estimate_density #' #' @examples #' library(bayestestR) #' posterior <- distribution_normal(n = 10) #' density_at(posterior, 0) #' density_at(posterior, c(0, 1)) #' @importFrom stats approx density #' @export density_at <- function(posterior, x, precision = 2^10, method = "kernel", ...) { density <- estimate_density(posterior, precision = precision, method = method, ...) stats::approx(density$x, density$y, xout = x)$y } bayestestR/R/bayesfactor_models.R0000644000176200001440000003035213615440575016605 0ustar liggesusers#' Bayes Factors (BF) for model comparison #' #' @description This function computes or extracts Bayes factors from fitted models. #' \cr \cr #' The \code{bf_*} function is an alias of the main function. #' #' @author Mattan S. Ben-Shachar #' #' @param ... Fitted models (see details), all fit on the same data, or a single \code{BFBayesFactor} object (see 'Details'). #' @param denominator Either an integer indicating which of the models to use as the denominator, #' or a model to be used as a denominator. Ignored for \code{BFBayesFactor}. #' @inheritParams hdi #' #' @details #' If the passed models are supported by \pkg{insight} the DV of all models will be tested for equality #' (else this is assumed to be true), and the models' terms will be extracted (allowing for follow-up #' analysis with \code{bayesfactor_inclusion}). #' #' \itemize{ #' \item For \code{brmsfit} or \code{stanreg} models, Bayes factors are computed using the \CRANpkg{bridgesampling} package. #' \itemize{ #' \item \code{brmsfit} models must have been fitted with \code{save_all_pars = TRUE}. #' \item \code{stanreg} models must have been fitted with a defined \code{diagnostic_file}. #' } #' \item For \code{BFBayesFactor}, \code{bayesfactor_models()} is mostly a wraparoud \code{BayesFactor::extractBF()}. #' \item For all other model types (supported by \CRANpkg{insight}), BIC approximations are used to compute Bayes factors. #' } #' In order to correctly and precisely estimate Bayes factors, a rule of thumb are #' the 4 P's: \strong{P}roper \strong{P}riors and \strong{P}lentiful \strong{P}osterior #' (i.e. probably at leat 40,000 samples instead of the default of 4,000). #' \cr \cr #' A Bayes factor greater than 1 can be interpereted as evidence against the compared-to #' model (the denominator). One convention is that a Bayes factor greater than 3 can be considered #' as "substantial" evidence against the denominator model (and vice versa, a Bayes factor #' smaller than 1/3 indicates substantial evidence in favor of the denominator model) #' (\cite{Wetzels et al. 2011}). #' \cr \cr #' See also \href{https://easystats.github.io/bayestestR/articles/bayes_factors.html}{the Bayes factors vignette}. #' #' @return A data frame containing the models' formulas (reconstructed fixed and random effects) and their BFs, that prints nicely. #' #' @examples #' # With lm objects: #' # ---------------- #' lm1 <- lm(Sepal.Length ~ 1, data = iris) #' lm2 <- lm(Sepal.Length ~ Species, data = iris) #' lm3 <- lm(Sepal.Length ~ Species + Petal.Length, data = iris) #' lm4 <- lm(Sepal.Length ~ Species * Petal.Length, data = iris) #' bayesfactor_models(lm1, lm2, lm3, lm4, denominator = 1) #' bayesfactor_models(lm2, lm3, lm4, denominator = lm1) # same result #' bayesfactor_models(lm1, lm2, lm3, lm4, denominator = lm1) # same result #' #' \dontrun{ #' # With lmerMod objects: #' # --------------------- #' if (require("lme4")) { #' lmer1 <- lmer(Sepal.Length ~ Petal.Length + (1 | Species), data = iris) #' lmer2 <- lmer(Sepal.Length ~ Petal.Length + (Petal.Length | Species), data = iris) #' lmer3 <- lmer( #' Sepal.Length ~ Petal.Length + (Petal.Length | Species) + (1 | Petal.Width), #' data = iris #' ) #' bayesfactor_models(lmer1, lmer2, lmer3, denominator = 1) #' bayesfactor_models(lmer1, lmer2, lmer3, denominator = lmer1) #' } #' #' # rstanarm models #' # --------------------- #' # (note that a unique diagnostic_file MUST be specified in order to work) #' if (require("rstanarm")) { #' stan_m0 <- stan_glm(Sepal.Length ~ 1, #' data = iris, #' family = gaussian(), #' diagnostic_file = file.path(tempdir(), "df0.csv") #' ) #' stan_m1 <- stan_glm(Sepal.Length ~ Species, #' data = iris, #' family = gaussian(), #' diagnostic_file = file.path(tempdir(), "df1.csv") #' ) #' stan_m2 <- stan_glm(Sepal.Length ~ Species + Petal.Length, #' data = iris, #' family = gaussian(), #' diagnostic_file = file.path(tempdir(), "df2.csv") #' ) #' bayesfactor_models(stan_m1, stan_m2, denominator = stan_m0) #' } #' #' #' # brms models #' # -------------------- #' # (note the save_all_pars MUST be set to TRUE in order to work) #' if (require("brms")) { #' brm1 <- brm(Sepal.Length ~ 1, data = iris, save_all_pars = TRUE) #' brm2 <- brm(Sepal.Length ~ Species, data = iris, save_all_pars = TRUE) #' brm3 <- brm( #' Sepal.Length ~ Species + Petal.Length, #' data = iris, #' save_all_pars = TRUE #' ) #' #' bayesfactor_models(brm1, brm2, brm3, denominator = 1) #' } #' #' #' # BayesFactor #' # --------------------------- #' if (require("BayesFactor")) { #' data(puzzles) #' BF <- anovaBF(RT ~ shape * color + ID, #' data = puzzles, #' whichRandom = "ID", progress = FALSE #' ) #' BF #' bayesfactor_models(BF) # basically the same #' } #' } #' @references #' \itemize{ #' \item Gronau, Q. F., Wagenmakers, E. J., Heck, D. W., and Matzke, D. (2019). A simple method for comparing complex models: Bayesian model comparison for hierarchical multinomial processing tree models using Warp-III bridge sampling. Psychometrika, 84(1), 261-284. #' \item Kass, R. E., and Raftery, A. E. (1995). Bayes Factors. Journal of the American Statistical Association, 90(430), 773-795. #' \item Robert, C. P. (2016). The expected demise of the Bayes factor. Journal of Mathematical Psychology, 72, 33–37. #' \item Wagenmakers, E. J. (2007). A practical solution to the pervasive problems of p values. Psychonomic bulletin & review, 14(5), 779-804. #' \item Wetzels, R., Matzke, D., Lee, M. D., Rouder, J. N., Iverson, G. J., and Wagenmakers, E.-J. (2011). Statistical Evidence in Experimental Psychology: An Empirical Comparison Using 855 t Tests. Perspectives on Psychological Science, 6(3), 291–298. \doi{10.1177/1745691611406923} #' } #' #' @importFrom insight get_response is_model_supported #' @export bayesfactor_models <- function(..., denominator = 1, verbose = TRUE) { UseMethod("bayesfactor_models") } #' @rdname bayesfactor_models #' @export bf_models <- bayesfactor_models #' @importFrom stats BIC #' @export bayesfactor_models.default <- function(..., denominator = 1, verbose = TRUE) { # Organize the models and their names mods <- list(...) mnames <- sapply(match.call(expand.dots = FALSE)$`...`, .safe_deparse) if (!is.numeric(denominator)) { model_name <- .safe_deparse(match.call()[["denominator"]]) denominator_model <- which(mnames == model_name) if (length(denominator_model) == 0) { mods <- c(mods, list(denominator)) mnames <- c(mnames, model_name) denominator <- length(mods) } else { denominator <- denominator_model } } # Get formula / model names mforms <- mnames # supported models supported_models <- all(sapply(mods, insight::is_model_supported)) if (supported_models) { # Test that all is good: resps <- lapply(mods, insight::get_response) if (!any(sapply(resps, is.null))) { if (!all(sapply(resps[-denominator], function(x) identical(x, resps[[denominator]])))) { stop("Models were not computed from the same data.") } mforms <- sapply(mods, .find_full_formula) } else { supported_models <- FALSE } } if (verbose && !supported_models) { object_names <- match.call(expand.dots = FALSE)$`...` warning(sprintf( "Unable to extract terms / validate that the following models use the same data: \n%s", paste0(mnames[!supported_models], collapse = ", ") ), call. = FALSE) } # Get BF mBIC <- sapply(mods, BIC) mBFs <- (mBIC - mBIC[denominator]) / (-2) res <- data.frame( Model = mforms, BF = exp(mBFs), stringsAsFactors = FALSE ) attr(res, "denominator") <- denominator attr(res, "BF_method") <- "BIC approximation" attr(res, "unsupported_models") <- !supported_models class(res) <- c("bayesfactor_models", "see_bayesfactor_models", class(res)) res } #' @importFrom insight get_response find_algorithm .bayesfactor_models_stan <- function(..., denominator = 1, verbose = TRUE) { if (!requireNamespace("bridgesampling")) { stop("Package 'bridgesampling' required for this function to work. Please install it by running `install.packages('bridgesampling')`.") } # Orgenize the models mods <- list(...) # Warn n_samps <- sapply(mods, function(x) { alg <- insight::find_algorithm(x) (alg$iterations - alg$warmup) * alg$chains }) if (any(n_samps < 4e4)) { warning( "Bayes factors might not be precise.\n", "For precise Bayes factors, it is recommended sampling at least 40,000 posterior samples." ) } if (!is.numeric(denominator)) { model_name <- .safe_deparse(match.call()[["denominator"]]) arg_names <- sapply(match.call(expand.dots = FALSE)$`...`, .safe_deparse) denominator_model <- which(arg_names == model_name) if (length(denominator_model) == 0) { mods <- c(mods, list(denominator)) denominator <- length(mods) } else { denominator <- denominator_model } } # Test that all is good: resps <- lapply(mods, insight::get_response) if (!all(sapply(resps[-denominator], function(x) identical(x, resps[[denominator]])))) { stop("Models were not computed from the same data.") } # Get BF if (verbose) { message("Computation of Bayes factors: estimating marginal likelihood, please wait...") } mML <- lapply(mods, function(x) { bridgesampling::bridge_sampler(x, silent = TRUE) }) mBFs <- sapply(mML, function(x) { bridgesampling::bf(x, mML[[denominator]], log = TRUE)[["bf"]] }) # Get formula mforms <- sapply(mods, .find_full_formula) res <- data.frame( Model = mforms, BF = exp(mBFs), stringsAsFactors = FALSE ) attr(res, "denominator") <- denominator attr(res, "BF_method") <- "marginal likelihoods (bridgesampling)" attr(res, "unsupported_models") <- FALSE class(res) <- c("bayesfactor_models", "see_bayesfactor_models", class(res)) res } #' @export bayesfactor_models.stanreg <- function(..., denominator = 1, verbose = TRUE) { if (!requireNamespace("rstanarm")) { stop("Package 'rstanarm' required for this function to work. Please install it by running `install.packages('rstanarm')`.") } .bayesfactor_models_stan(..., denominator = denominator) } #' @export bayesfactor_models.brmsfit <- function(..., denominator = 1, verbose = TRUE) { if (!requireNamespace("brms")) { stop("Package 'brms' required for this function to work. Please install it by running `install.packages('brms')`.") } if (!("brms" %in% .packages())) { stop("This function requires package 'brms' to be loaded. Please run `library(brms)`.") } .bayesfactor_models_stan(..., denominator = denominator) } #' @export bayesfactor_models.BFBayesFactor <- function(..., verbose = TRUE) { models <- c(...) if (!requireNamespace("BayesFactor")) { stop("Package 'BayesFactor' required for this function to work. Please install it by running `install.packages('BayesFactor')`.") } mBFs <- c(0, BayesFactor::extractBF(models, TRUE, TRUE)) mforms <- sapply(c(models@denominator, models@numerator), function(x) x@shortName) mforms[mforms == "Intercept only"] <- "1" res <- data.frame( Model = unname(mforms), BF = exp(mBFs), stringsAsFactors = FALSE ) attr(res, "denominator") <- 1 attr(res, "BF_method") <- "JZS (BayesFactor)" attr(res, "unsupported_models") <- !"BFlinearModel" %in% class(models@denominator) class(res) <- c("bayesfactor_models", "see_bayesfactor_models", class(res)) res } #' @keywords internal #' @importFrom insight find_formula .find_full_formula <- function(mod) { formulas <- insight::find_formula(mod) conditional <- random <- NULL if (!is.null(formulas$conditional)) { conditional <- as.character(formulas$conditional)[3] } if (!is.null(formulas$random)) { if (!is.list(formulas$random)) { formulas$random <- list(formulas$random) } random <- sapply(formulas$random, function(x) { paste0("(", as.character(x)[2], ")") }) } paste(c(conditional, random), collapse = " + ") } bayestestR/R/simulate_data.R0000644000176200001440000000705413552545320015551 0ustar liggesusers#' Data Simulation #' #' Simulate data with specific characteristics. #' #' @param n The number of observations to be generated. #' @param r A value or vector corresponding to the desired correlation coefficients. #' @param d A value or vector corresponding to the desired difference between the groups. #' @param mean A value or vector corresponding to the mean of the variables. #' @param sd A value or vector corresponding to the SD of the variables. #' @param names A character vector of desired variable names. #' @param ... Arguments passed to or from other methods. #' @examples #' #' # Correlation -------------------------------- #' data <- simulate_correlation(r = 0.5) #' plot(data$V1, data$V2) #' cor.test(data$V1, data$V2) #' summary(lm(V2 ~ V1, data = data)) #' #' # Specify mean and SD #' data <- simulate_correlation(r = 0.5, n = 50, mean = c(0, 1), sd = c(0.7, 1.7)) #' cor.test(data$V1, data$V2) #' round(c(mean(data$V1), sd(data$V1)), 1) #' round(c(mean(data$V2), sd(data$V2)), 1) #' summary(lm(V2 ~ V1, data = data)) #' #' # Generate multiple variables #' cor_matrix <- matrix(c( #' 1.0, 0.2, 0.4, #' 0.2, 1.0, 0.3, #' 0.4, 0.3, 1.0 #' ), #' nrow = 3 #' ) #' #' data <- simulate_correlation(r = cor_matrix, names = c("y", "x1", "x2")) #' cor(data) #' summary(lm(y ~ x1, data = data)) #' #' # t-test -------------------------------- #' data <- simulate_ttest(n = 30, d = 0.3) #' plot(data$V1, data$V0) #' round(c(mean(data$V1), sd(data$V1)), 1) #' diff(t.test(data$V1 ~ data$V0)$estimate) #' summary(lm(V1 ~ V0, data = data)) #' summary(glm(V0 ~ V1, data = data, family = "binomial")) #' @export simulate_correlation <- function(n = 100, r = 0.5, mean = 0, sd = 1, names = NULL, ...) { if (!requireNamespace("MASS", quietly = TRUE)) { stop("Package 'MASS' required for this function to work. Please install it by running `install.packages('MASS')`.") } # Define matrix if (is.matrix(r)) { if (isSymmetric(r)) { if (any(r > 1)) { stop("'r' should only contain values between -1 and 1.") } else { sigma <- r } } else { stop("'r' should be a symetric matrix (relative to the diagonal).") } } else if (length(r) == 1) { if (abs(r) > 1) { stop("'r' should only contain values between -1 and 1.") } else { sigma <- matrix(c(1, r, r, 1), nrow = 2) } } else { stop("'r' should be a value (e.g., r = 0.5) or a square matrix.") } # Get data data <- MASS::mvrnorm( n = n, mu = rep_len(0, ncol(sigma)), # Means of variables Sigma = sigma, empirical = TRUE ) # Adjust scale if (any(sd != 1)) { data <- t(t(data) * rep_len(sd, ncol(sigma))) } # Adjust mean if (any(mean != 0)) { data <- t(t(data) + rep_len(mean, ncol(sigma))) } data <- as.data.frame(data) # Rename if (!is.null(names)) { if (length(names) == ncol(data)) { names(data) <- names } } data } #' @rdname simulate_correlation #' @export simulate_ttest <- function(n = 100, d = 0.5, names = NULL, ...) { x <- distribution_normal(n, 0, 1) # Continuous variables z <- 0 + d * x # Linear combination pr <- 1 / (1 + exp(-z)) # Pass it through an inverse logit function y <- distribution_binomial(n, 1, pr, random = 3) # Bernoulli response variable data <- data.frame(y = as.factor(y), x = x) names(data) <- paste0("V", 0:(ncol(data) - 1)) if (!is.null(names)) { if (length(names) == ncol(data)) { names(data) <- names } } data } bayestestR/R/weighted_posteriors.R0000644000176200001440000001610713610210576017022 0ustar liggesusers#' Generate posterior distributions weighted across models #' #' Extract posterior samples of parameters, weighted across models. #' Weighting is done by comparing posterior model probabilities, via \code{\link{bayesfactor_models}}. #' #' @param missing An optional numeric value to use if a model does not contain a parameter that appears in other models. Defaults to 0. #' @inheritParams bayesfactor_models #' @inheritParams bayesfactor_inclusion #' @inheritParams bayesfactor_parameters #' #' @details #' Note that across models some parameters might play different roles. For example, #' the parameter \code{A} plays a different role in the model \code{Y ~ A + B} (where it is a main effect) #' than it does in the model \code{Y ~ A + B + A:B} (where it is a simple effect). In many cases centering #' of predictors (mean subtracting for continuous variables, and effects coding via \code{contr.sum} or #' orthonormal coding via {\code{\link{contr.bayes}}} for factors) can reduce this issue. In any case #' you should be mindful of this issue. #' \cr\cr #' See \code{\link{bayesfactor_models}} details for more info on passed models. #' \cr\cr #' Note that for \code{BayesFactor} models, posterior samples cannot be generated from intercept only models. #' \cr\cr #' This function is similar in function to \code{brms::\link[brms]{posterior_average}}. #' #' @return A data frame with posterior distributions (weighted accross models) . #' #' @seealso \code{\link{bayesfactor_inclusion}} for Bayesian model averaging. #' #' @examples #' \donttest{ #' library(rstanarm) #' library(see) #' #' stan_m0 <- stan_glm(extra ~ 1, data = sleep, #' family = gaussian(), #' refresh=0, #' diagnostic_file = file.path(tempdir(), "df0.csv")) #' #' stan_m1 <- stan_glm(extra ~ group, data = sleep, #' family = gaussian(), #' refresh=0, #' diagnostic_file = file.path(tempdir(), "df1.csv")) #' #' #' res <- weighted_posteriors(stan_m0, stan_m1) #' #' plot(eti(res)) #' #' # With BayesFactor and brms #' library(BayesFactor) #' library(brms) #' #' BFmods <- anovaBF(extra ~ group + ID, sleep, whichRandom = "ID") #' #' res <- weighted_posteriors(BFmods)[1:3] #' plot(eti(res)) #' #' # Compare to brms::posterior_average #' fit1 <- brm(rating ~ treat + period + carry, #' data = inhaler, #' save_all_pars = TRUE) #' fit2 <- brm(rating ~ period + carry, #' data = inhaler, #' save_all_pars = TRUE) #' #' res_BT <- weighted_posteriors(fit1, fit2) #' res_brms <- brms::posterior_average(fit1, fit2, weights = "marglik", missing = 0)[, 1:4] #' #' plot(eti(res_BT)) #' plot(eti(res_brms)) #' } #' #' @references #' \itemize{ #' \item Clyde, M., Desimone, H., & Parmigiani, G. (1996). Prediction via orthogonalized model mixing. Journal of the American Statistical Association, 91(435), 1197-1208. #' \item Hinne, M., Gronau, Q. F., van den Bergh, D., and Wagenmakers, E. (2019, March 25). A conceptual introduction to Bayesian Model Averaging. \doi{10.31234/osf.io/wgb64} #' \item Rouder, J. N., Haaf, J. M., & Vandekerckhove, J. (2018). Bayesian inference for psychology, part IV: Parameter estimation and Bayes factors. Psychonomic bulletin & review, 25(1), 102-113. #' \item van den Bergh, D., Haaf, J. M., Ly, A., Rouder, J. N., & Wagenmakers, E. J. (2019). A cautionary note on estimating effect size. #' } #' #' @export weighted_posteriors <- function(..., prior_odds = NULL, missing = 0, verbose = TRUE) { UseMethod("weighted_posteriors") } #' @export #' @rdname weighted_posteriors #' @importFrom insight get_parameters weighted_posteriors.stanreg <- function(..., prior_odds = NULL, missing = 0, verbose = TRUE, effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL){ Mods <- list(...) effects <- match.arg(effects) component <- match.arg(component) # Get Bayes factors BFMods <- bayesfactor_models(..., denominator = 1, verbose = verbose) # Compute posterior model probabilities prior_odds <- c(1, prior_odds) posterior_odds <- prior_odds * BFMods$BF priorProbs <- prior_odds / sum(prior_odds) postProbs <- posterior_odds / sum(posterior_odds) # Compute weighted number of samples nsamples <- min(sapply(Mods, .total_samps)) weighted_samps <- round(nsamples * postProbs) # extract parameters params <- lapply(Mods, insight::get_parameters, effects = effects, component = component, parameters = parameters) .weighted_posteriors(params, weighted_samps, missing) } #' @export #' @rdname weighted_posteriors weighted_posteriors.brmsfit <- weighted_posteriors.stanreg #' @export #' @rdname weighted_posteriors weighted_posteriors.BFBayesFactor <- function(..., prior_odds = NULL, missing = 0, verbose = TRUE){ Mods <- c(...) # Get Bayes factors BFMods <- bayesfactor_models(Mods, verbose = verbose) # Compute posterior model probabilities prior_odds <- c(1, prior_odds) posterior_odds <- prior_odds * BFMods$BF priorProbs <- prior_odds / sum(prior_odds) postProbs <- posterior_odds / sum(posterior_odds) # Compute weighted number of samples nsamples <- 4000 weighted_samps <- round(nsamples * postProbs) # extract parameters params <- lapply(seq_len(length(Mods@numerator)), function(mi){ BayesFactor::posterior(Mods, iterations = nsamples, index = mi, progress = FALSE) }) mod_0 <- try(BayesFactor::posterior(1/Mods[1], iterations = nsamples, progress = FALSE), silent = TRUE) if (inherits(mod_0, "try-error")) { if (!grepl("Sampling from intercept-only", mod_0)) stop(mod_0) warning("Cannot sample from BFBayesFactor model with intercept only (model prob = ", round(postProbs[1],2), "). Ommiting the intercept model.") postProbs <- postProbs[-1] / sum(postProbs[-1]) weighted_samps <- round(nsamples * postProbs) } else { mod_0 <- list(params[[1]]) params <- c(mod_0,params) } params <- lapply(params, as.data.frame) .weighted_posteriors(params, weighted_samps, missing) } .weighted_posteriors <- function(params, weighted_samps, missing) { par_names <- unique(unlist(sapply(params, colnames), recursive = TRUE)) for (m in seq_along(weighted_samps)) { temp_params <- params[[m]] temp_params <- temp_params[sample(nrow(temp_params),size = weighted_samps[m]), ,drop = FALSE] # If any parameters not estimated in the model, they are assumed to be 0 (the default value of `missing`) temp_params[, setdiff(par_names, colnames(temp_params))] <- missing params[[m]] <- temp_params } # combine all do.call("rbind", params) } #' @keywords internal #' @importFrom insight find_algorithm .total_samps <- function(mod){ x <- insight::find_algorithm(mod) x$chains * (x$iterations - x$warmup) } bayestestR/R/p_map.R0000644000176200001440000001717213613227664014041 0ustar liggesusers#' Bayesian p-value based on the density at the Maximum A Posteriori (MAP) #' #' Compute a Bayesian equivalent of the \emph{p}-value, related to the odds that a parameter (described by its posterior distribution) has against the null hypothesis (\emph{h0}) using Mills' (2014, 2017) \emph{Objective Bayesian Hypothesis Testing} framework. It corresponds to the density value at 0 divided by the density at the Maximum A Posteriori (MAP). #' #' @details Note that this method is sensitive to the density estimation \code{method} (see the secion in the examples below). #' \subsection{Strengths and Limitations}{ #' \strong{Strengths:} Straightforward computation. Objective property of the posterior distribution. #' \cr \cr #' \strong{Limitations:} Limited information favoring the null hypothesis. Relates on density approximation. Indirect relationship between mathematical definition and interpretation. Only suitable for weak / very diffused priors. #' } #' #' @inheritParams hdi #' @inheritParams density_at #' #' @examples #' library(bayestestR) #' #' p_map(rnorm(1000, 0, 1)) #' p_map(rnorm(1000, 10, 1)) #' #' \dontrun{ #' library(rstanarm) #' model <- stan_glm(mpg ~ wt + gear, data = mtcars, chains = 2, iter = 200, refresh = 0) #' p_map(model) #' #' library(emmeans) #' p_map(emtrends(model, ~1, "wt")) #' #' library(brms) #' model <- brms::brm(mpg ~ wt + cyl, data = mtcars) #' p_map(model) #' #' library(BayesFactor) #' bf <- ttestBF(x = rnorm(100, 1, 1)) #' p_map(bf) #' } #' #' \donttest{ #' # --------------------------------------- #' # Robustness to density estimation method #' set.seed(333) #' data <- data.frame() #' for (iteration in 1:250) { #' x <- rnorm(1000, 1, 1) #' result <- data.frame( #' "Kernel" = p_map(x, method = "kernel"), #' "KernSmooth" = p_map(x, method = "KernSmooth"), #' "logspline" = p_map(x, method = "logspline") #' ) #' data <- rbind(data, result) #' } #' data$KernSmooth <- data$Kernel - data$KernSmooth #' data$logspline <- data$Kernel - data$logspline #' #' summary(data$KernSmooth) #' summary(data$logspline) #' boxplot(data[c("KernSmooth", "logspline")]) #' } #' @seealso \href{https://www.youtube.com/watch?v=Ip8Ci5KUVRc}{Jeff Mill's talk} #' #' @references \itemize{ #' \item Makowski D, Ben-Shachar MS, Chen SHA, Lüdecke D (2019) Indices of Effect Existence and Significance in the Bayesian Framework. Frontiers in Psychology 2019;10:2767. \doi{10.3389/fpsyg.2019.02767} #' \item Mills, J. A. (2018). Objective Bayesian Precise Hypothesis Testing. University of Cincinnati. #' } #' #' @importFrom stats density #' @export p_map <- function(x, precision = 2^10, method = "kernel", ...) { UseMethod("p_map") } #' @rdname p_map #' @export p_pointnull <- p_map #' @export p_map.numeric <- function(x, precision = 2^10, method = "kernel", ...) { # Density at MAP map <- attributes(map_estimate(x, precision = precision, method = method, ...))$MAP_density # Density at 0 d_0 <- density_at(x, 0, precision = precision, method = method, ...) if (is.na(d_0)) d_0 <- 0 # Odds p <- d_0 / map class(p) <- c("p_map", class(p)) p } #' @export p_map.data.frame <- function(x, precision = 2^10, method = "kernel", ...) { x <- .select_nums(x) if (ncol(x) == 1) { p_MAP <- p_map(x[, 1], precision = precision, method = method, ...) } else { p_MAP <- sapply(x, p_map, precision = precision, method = method, simplify = TRUE, ...) } out <- data.frame( "Parameter" = names(x), "p_MAP" = p_MAP, row.names = NULL, stringsAsFactors = FALSE ) class(out) <- c("p_map", class(out)) out } #' @export p_map.emmGrid <- function(x, precision = 2^10, method = "kernel", ...) { if (!requireNamespace("emmeans")) { stop("Package 'emmeans' required for this function to work. Please install it by running `install.packages('emmeans')`.") } xdf <- as.data.frame(as.matrix(emmeans::as.mcmc.emmGrid(x, names = FALSE))) out <- p_map(xdf, precision = precision, method = method, ...) attr(out, "object_name") <- .safe_deparse(substitute(x)) out } #' @importFrom insight get_parameters #' @keywords internal .p_map_models <- function(x, precision, method, effects, component, parameters, ...) { p_map(insight::get_parameters(x, effects = effects, component = component, parameters = parameters), precision = precision, method = method, ...) } #' @export p_map.mcmc <- function(x, precision = 2^10, method = "kernel", parameters = NULL, ...) { out <- .p_map_models( x = x, precision = precision, method = method, effects = "fixed", component = "conditional", parameters = parameters, ... ) attr(out, "data") <- insight::get_parameters(x, parameters = parameters) out } #' @export p_map.sim.merMod <- function(x, precision = 2^10, method = "kernel", effects = c("fixed", "random", "all"), parameters = NULL, ...) { effects <- match.arg(effects) out <- .p_map_models( x = x, precision = precision, method = method, effects = effects, component = "conditional", parameters = parameters, ... ) attr(out, "data") <- insight::get_parameters(x, effects = effects, parameters = parameters) out } #' @export p_map.sim <- function(x, precision = 2^10, method = "kernel", parameters = NULL, ...) { out <- .p_map_models( x = x, precision = precision, method = method, effects = "fixed", component = "conditional", parameters = parameters, ... ) attr(out, "data") <- insight::get_parameters(x, parameters = parameters) out } #' @rdname p_map #' @export p_map.stanreg <- function(x, precision = 2^10, method = "kernel", effects = c("fixed", "random", "all"), parameters = NULL, ...) { effects <- match.arg(effects) out <- .prepare_output( p_map(insight::get_parameters(x, effects = effects, parameters = parameters), precision = precision, method = method), insight::clean_parameters(x) ) class(out) <- unique(c("p_map", class(out))) attr(out, "object_name") <- .safe_deparse(substitute(x)) out } #' @rdname p_map #' @export p_map.brmsfit <- function(x, precision = 2^10, method = "kernel", effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, ...) { effects <- match.arg(effects) component <- match.arg(component) out <- .prepare_output( p_map(insight::get_parameters(x, effects = effects, component = component, parameters = parameters), precision = precision, method = method, ...), insight::clean_parameters(x) ) class(out) <- unique(c("p_map", class(out))) attr(out, "object_name") <- .safe_deparse(substitute(x)) out } #' @export p_map.BFBayesFactor <- function(x, precision = 2^10, method = "kernel", ...) { out <- p_map(insight::get_parameters(x), precision = precision, method = method, ...) attr(out, "object_name") <- .safe_deparse(substitute(x)) out } #' @export p_map.MCMCglmm <- function(x, precision = 2^10, method = "kernel", ...) { nF <- x$Fixed$nfl out <- p_map(as.data.frame(x$Sol[, 1:nF, drop = FALSE]), precision = precision, method = method, ...) attr(out, "object_name") <- .safe_deparse(substitute(x)) out } #' @rdname as.numeric.p_direction #' @method as.numeric p_map #' @export as.numeric.p_map <- function(x, ...) { if ("data.frame" %in% class(x)) { return(as.numeric(as.vector(x$p_MAP))) } else { return(as.vector(x)) } } #' @method as.double p_map #' @export as.double.p_map <- as.numeric.p_map bayestestR/R/update.bayesfactor_models.R0000644000176200001440000000311113613227664020057 0ustar liggesusers#' Update bayesfactor_models #' #' @param object A \code{\link{bayesfactor_models}} object. #' @param subset Vector of model indices to keep or remove. #' @param reference Index of model to rereference to, or \code{"top"} to reference to the best model, or \code{"bottom"} to reference to the worst model. #' @param ... Currently not used. #' #' @examples #' \dontrun{ #' library(lme4) #' lmer1 <- lmer(Sepal.Length ~ Petal.Length + (1 | Species), data = iris) #' lmer2 <- lmer(Sepal.Length ~ Petal.Length + (Petal.Length | Species), data = iris) #' lmer3 <- lmer( #' Sepal.Length ~ Petal.Length + (Petal.Length | Species) + (1 | Petal.Width), #' data = iris #' ) #' #' m <- bayesfactor_models(lmer1, lmer2, lmer3, denominator = 1) #' m #' #' update(m, reference = "bottom") #' } #' @export update.bayesfactor_models <- function(object, subset = NULL, reference = NULL, ...) { if (!is.null(reference)) { if (reference == "top") { reference <- which.max(object$BF) } else if (reference == "bottom") { reference <- which.min(object$BF) } object$BF <- object$BF / object$BF[reference] attr(object, "denominator") <- reference } denominator <- attr(object, "denominator") if (!is.null(subset)) { object_subset <- object[subset, ] if (denominator %in% subset) { attr(object_subset, "denominator") <- which(denominator == subset) } else { object_subset <- rbind(object[denominator, ], object_subset) attr(object_subset, "denominator") <- 1 } object <- object_subset } object } bayestestR/R/utils_bayesfactor.R0000644000176200001440000002040713603574000016446 0ustar liggesusers # update_to_priors ------------------------------------------------------- #' @keywords internal .update_to_priors <- function(model, verbose = TRUE) { UseMethod(".update_to_priors") } #' @keywords internal #' @importFrom stats update getCall .update_to_priors.stanreg <- function(model, verbose = TRUE) { if (!requireNamespace("rstanarm")) { stop("Package \"rstanarm\" needed for this function to work. Please install it.") } prior_PD <- stats::getCall(model)$prior_PD if (!is.null(prior_PD) && isTRUE(eval(parse(text = prior_PD)))) { return(model) } if (verbose) { message("Computation of Bayes factors: sampling priors, please wait...") } prior_dists <- sapply(rstanarm::prior_summary(model), `[[`, "dist") if (anyNA(prior_dists)) { stop( "Cannot compute Bayes factors with flat priors (such as when priors are ", "set to 'NULL' in a 'stanreg' model), as Bayes factors inform about the raltive ", "likelihood of two 'hypotheses', and flat priors provide no likelihood.\n", "See '?bayesfactor_parameters' for more information.\n", call. = FALSE ) } model_prior <- suppressWarnings( stats::update(model, prior_PD = TRUE, refresh = 0) ) model_prior } #' @keywords internal #' @importFrom stats update #' @importFrom utils capture.output #' @importFrom methods is .update_to_priors.brmsfit <- function(model, verbose = TRUE) { if (!requireNamespace("brms")) { stop("Package \"brms\" needed for this function to work. Please install it.") } if (isTRUE(attr(model$prior, "sample_prior") == "only")) { return(model) } if (verbose) { message("Computation of Bayes factors: sampling priors, please wait...") } utils::capture.output( model_prior <- try(suppressMessages(suppressWarnings( stats::update(model, sample_prior = "only", refresh = 0) )), silent = TRUE) ) if (is(model_prior, "try-error")) { if (grepl("proper priors", model_prior)) { stop( "Cannot compute Bayes factors with flat priors (such as the default ", "priors for fixed-effects in a 'brmsfit' model), as Bayes factors inform about ", "the raltive likelihood of two 'hypotheses', and flat priors provide no ", "likelihood.\n", "See '?bayesfactor_parameters' for more information.\n", call. = FALSE ) } else { stop(model_prior) } } model_prior } #' @keywords internal .format_big_small <- function(BF, digits = 2) { BFx <- as.character(round(BF, digits = digits)) big_ind <- abs(BF) >= (10 * 10^digits) | abs(BF) < 1 / (10^digits) big_ind <- sapply(big_ind, isTRUE) if (isTRUE(any(big_ind))) { BFx[big_ind] <- formatC(BF, format = "e", digits = digits)[big_ind] } BFx } # clean priors and posteriors --------------------------------------------- #' @keywords internal .clean_priors_and_posteriors <- function(posterior, prior, verbose = TRUE, ...) { UseMethod(".clean_priors_and_posteriors") } #' @keywords internal #' @importFrom insight get_parameters .clean_priors_and_posteriors.stanreg <- function(posterior, prior, verbose = TRUE, effects, component, ...) { # Get Priors if (is.null(prior)) { prior <- posterior } prior <- .update_to_priors(prior, verbose = verbose) prior <- insight::get_parameters(prior, effects = effects, component = component, ...) posterior <- insight::get_parameters(posterior, effects = effects, component = component, ...) list(posterior = posterior, prior = prior) } #' @keywords internal .clean_priors_and_posteriors.brmsfit <- .clean_priors_and_posteriors.stanreg #' @keywords internal #' @importFrom stats update .clean_priors_and_posteriors.emmGrid <- function(posterior, prior, verbose = TRUE) { if (!requireNamespace("emmeans")) { stop("Package 'emmeans' required for this function to work. Please install it by running `install.packages('emmeans')`.") } if (is.null(prior)) { prior <- posterior warning( "Prior not specified! ", "Please provide the original model to get meaningful results." ) } else if (!inherits(prior, "emmGrid")) { # then is it a model prior <- .update_to_priors(prior, verbose = verbose) prior <- emmeans::ref_grid(prior) prior <- prior@post.beta prior <- stats::update(posterior, post.beta = prior) } prior <- as.data.frame(as.matrix(emmeans::as.mcmc.emmGrid(prior, names = FALSE))) posterior <- as.data.frame(as.matrix(emmeans::as.mcmc.emmGrid(posterior, names = FALSE))) list(posterior = posterior, prior = prior) } # make_BF_plot_data ------------------------------------------------------- #' @importFrom stats median mad approx #' @importFrom utils stack #' @keywords internal .make_BF_plot_data <- function(posterior, prior, direction, null) { if (!requireNamespace("logspline")) { stop("Package \"logspline\" needed for this function to work. Please install it.") } estimate_samples_density <- function(samples) { nm <- .safe_deparse(substitute(samples)) samples <- utils::stack(samples) samples <- split(samples, samples$ind) samples <- lapply(samples, function(data) { # 1. estimate density x <- data$values extend_scale <- 0.05 precision <- 2^8 x_range <- range(x) x_rangex <- stats::median(x) + 7 * stats::mad(x) * c(-1, 1) x_range <- c( max(c(x_range[1], x_rangex[1])), min(c(x_range[2], x_rangex[2])) ) extension_scale <- diff(x_range) * extend_scale x_range[1] <- x_range[1] - extension_scale x_range[2] <- x_range[2] + extension_scale x_axis <- seq(x_range[1], x_range[2], length.out = precision) f_x <- logspline::logspline(x) y <- logspline::dlogspline(x_axis, f_x) d_points <- data.frame(x = x_axis, y = y) # 2. estimate points d_null <- stats::approx(d_points$x, d_points$y, xout = null) d_null$y[is.na(d_null$y)] <- 0 # 3. direction? if (direction > 0) { d_points <- d_points[d_points$x > min(null), , drop = FALSE] norm_factor <- 1 - logspline::plogspline(min(null), f_x) d_points$y <- d_points$y / norm_factor d_null$y <- d_null$y / norm_factor } else if (direction < 0) { d_points <- d_points[d_points$x < max(null), , drop = FALSE] norm_factor <- logspline::plogspline(max(null), f_x) d_points$y <- d_points$y / norm_factor d_null$y <- d_null$y / norm_factor } d_points$ind <- d_null$ind <- data$ind[1] list(d_points, d_null) }) # 4a. orgenize point0 <- lapply(samples, function(.) as.data.frame(.[[2]])) point0 <- do.call("rbind", point0) samplesX <- lapply(samples, function(.) .[[1]]) samplesX <- do.call("rbind", samplesX) samplesX$Distribution <- point0$Distribution <- nm rownames(samplesX) <- rownames(point0) <- c() list(samplesX, point0) } # 4b. orgenize posterior <- estimate_samples_density(posterior) prior <- estimate_samples_density(prior) list( plot_data = rbind(posterior[[1]], prior[[1]]), d_points = rbind(posterior[[2]], prior[[2]]) ) } # As numeric vector ------------------------------------------------------- #' @export as.numeric.bayesfactor_inclusion <- function(x, ...) { if ("data.frame" %in% class(x)) { return(as.numeric(as.vector(x$BF))) } else { return(as.vector(x)) } } #' @export as.numeric.bayesfactor_models <- as.numeric.bayesfactor_inclusion #' @export as.numeric.bayesfactor_parameters <- as.numeric.bayesfactor_inclusion #' @export as.numeric.bayesfactor_restricted <- as.numeric.bayesfactor_inclusion #' @export as.double.bayesfactor_inclusion <- as.numeric.bayesfactor_inclusion #' @export as.double.bayesfactor_models <- as.numeric.bayesfactor_inclusion #' @export as.double.bayesfactor_parameters <- as.numeric.bayesfactor_inclusion #' @export as.double.bayesfactor_restricted <- as.numeric.bayesfactor_inclusion bayestestR/R/describe_posterior.R0000644000176200001440000005321113615677374016636 0ustar liggesusers#' Describe Posterior Distributions #' #' Compute indices relevant to describe and characterise the posterior distributions. #' #' @param posteriors A vector, dataframe or model of posterior draws. #' @param ci_method The type of index used for Credible Interval. Can be #' \code{"HDI"} (default, see \code{\link[bayestestR:hdi]{hdi}}), \code{"ETI"} #' (see \code{\link[bayestestR:eti]{eti}}) or \code{"SI"} #' (see \code{\link[bayestestR:si]{si}}). #' @param test The indices of effect existence to compute. Character (vector) or #' list with one or more of these options: \code{"p_direction"} (or \code{"pd"}), #' \code{"rope"}, \code{"p_map"}, \code{"equivalence_test"} (or \code{"equitest"}), #' \code{"bayesfactor"} (or \code{"bf"}) or \code{"all"} to compute all tests. #' For each "test", the corresponding \pkg{bayestestR} function is called #' (e.g. \code{\link[bayestestR:rope]{rope}} or \code{\link[bayestestR:p_direction]{p_direction}}) and its results #' included in the summary output. #' @param rope_range ROPE's lower and higher bounds. Should be a list of two #' values (e.g., \code{c(-0.1, 0.1)}) or \code{"default"}. If \code{"default"}, #' the bounds are set to \code{x +- 0.1*SD(response)}. #' @param rope_ci The Credible Interval (CI) probability, corresponding to the #' proportion of HDI, to use for the percentage in ROPE. #' #' @inheritParams point_estimate #' @inheritParams ci #' @inheritParams si #' #' @details One or more components of point estimates (like posterior mean or median), #' intervals and tests can be ommitted from the summary output by setting the #' related argument to \code{NULL}. For example, \code{test = NULL} and #' \code{centrality = NULL} would only return the HDI (or CI). #' #' @references \itemize{ #' \item \href{https://easystats.github.io/bayestestR/articles/indicesEstimationComparison.html}{Comparison of Point-Estimates} #' \item \href{https://easystats.github.io/bayestestR/articles/region_of_practical_equivalence.html}{Region of Practical Equivalence (ROPE)} #' \item \href{https://easystats.github.io/bayestestR/articles/bayes_factors.html}{Bayes factors} #' } #' #' @examples #' library(bayestestR) #' #' x <- rnorm(1000) #' describe_posterior(x) #' describe_posterior(x, centrality = "all", dispersion = TRUE, test = "all") #' describe_posterior(x, ci = c(0.80, 0.90)) #' #' df <- data.frame(replicate(4, rnorm(100))) #' describe_posterior(df) #' describe_posterior(df, centrality = "all", dispersion = TRUE, test = "all") #' describe_posterior(df, ci = c(0.80, 0.90)) #' \dontrun{ #' # rstanarm models #' # ----------------------------------------------- #' if (require("rstanarm") && require("emmeans")) { #' model <- stan_glm(mpg ~ wt + gear, data = mtcars, chains = 2, iter = 200, refresh = 0) #' describe_posterior(model) #' describe_posterior(model, centrality = "all", dispersion = TRUE, test = "all") #' describe_posterior(model, ci = c(0.80, 0.90)) #' #' # emmeans estimates #' # ----------------------------------------------- #' describe_posterior(emtrends(model, ~1, "wt")) #' } #' #' # brms models #' # ----------------------------------------------- #' if (require("brms")) { #' model <- brms::brm(mpg ~ wt + cyl, data = mtcars) #' describe_posterior(model) #' describe_posterior(model, centrality = "all", dispersion = TRUE, test = "all") #' describe_posterior(model, ci = c(0.80, 0.90)) #' } #' #' # BayesFactor objects #' # ----------------------------------------------- #' if (require("BayesFactor")) { #' bf <- ttestBF(x = rnorm(100, 1, 1)) #' describe_posterior(bf) #' describe_posterior(bf, centrality = "all", dispersion = TRUE, test = "all") #' describe_posterior(bf, ci = c(0.80, 0.90)) #' } #' } #' @importFrom stats mad median sd setNames #' #' @export describe_posterior <- function(posteriors, centrality = "median", dispersion = FALSE, ci = 0.89, ci_method = "hdi", test = c("p_direction", "rope"), rope_range = "default", rope_ci = 0.89, ...) { UseMethod("describe_posterior") } #' @keywords internal .describe_posterior <- function(x, centrality = "median", dispersion = FALSE, ci = 0.89, ci_method = "hdi", test = c("p_direction", "rope"), rope_range = "default", rope_ci = 0.89, bf_prior = NULL, BF = 1, ...) { # Point-estimates if (!is.null(centrality)) { estimates <- point_estimate(x, centrality = centrality, dispersion = dispersion, ...) if (!"Parameter" %in% names(estimates)) { estimates <- cbind(data.frame("Parameter" = "Posterior"), estimates) } } else { estimates <- data.frame("Parameter" = NA) } # Uncertainty if (!is.null(ci)) { ci_method <- match.arg(tolower(ci_method), c("hdi", "quantile", "ci", "eti", "si")) if (ci_method == "si") { uncertainty <- ci(x, BF = BF, method = ci_method, prior = bf_prior, ...) } else { uncertainty <- ci(x, ci = ci, method = ci_method, ...) } if (!"Parameter" %in% names(uncertainty)) { uncertainty <- cbind(data.frame("Parameter" = "Posterior"), uncertainty) } } else { uncertainty <- data.frame("Parameter" = NA) } # Effect Existence if (!is.null(test)) { test <- .check_test_values(test) if ("all" %in% test) { test <- c("pd", "p_map", "p_rope", "p_significance", "rope", "equivalence", "bf") } ## TODO no BF for arm::sim if (inherits(x, c("sim", "sim.merMod", "mcmc"))) test <- setdiff(test, "bf") # MAP-based p-value if (any(c("p_map", "p_pointnull") %in% test)) { test_pmap <- p_map(x, ...) if (!is.data.frame(test_pmap)) test_pmap <- data.frame("Parameter" = "Posterior", "p_map" = test_pmap) } else { test_pmap <- data.frame("Parameter" = NA) } # Probability of direction if (any(c("pd", "p_direction", "pdir", "mpe") %in% test)) { test_pd <- p_direction(x, ...) if (!is.data.frame(test_pd)) test_pd <- data.frame("Parameter" = "Posterior", "pd" = test_pd) } else { test_pd <- data.frame("Parameter" = NA) } # Probability of rope if (any(c("p_rope") %in% test)) { test_prope <- p_rope(x, range = rope_range, ...) if (!"Parameter" %in% names(test_prope)) { test_prope <- cbind(data.frame("Parameter" = "Posterior"), test_prope) } } else { test_prope <- data.frame("Parameter" = NA) } # Probability of significance if (any(c("ps", "p_sig", "p_significance") %in% test)) { test_psig <- p_significance(x, threshold = rope_range, ...) if (!is.data.frame(test_psig)) test_psig <- data.frame("Parameter" = "Posterior", "ps" = test_psig) } else { test_psig <- data.frame("Parameter" = NA) } # ROPE if (any(c("rope") %in% test)) { test_rope <- rope(x, range = rope_range, ci = rope_ci, ...) if (!"Parameter" %in% names(test_rope)) { test_rope <- cbind(data.frame("Parameter" = "Posterior"), test_rope) } names(test_rope)[names(test_rope) == "CI"] <- "ROPE_CI" } else { test_rope <- data.frame("Parameter" = NA) } # Equivalence test if (any(c("equivalence", "equivalence_test", "equitest") %in% test)) { if (any(c("rope") %in% test)) { equi_warnings <- FALSE } else { equi_warnings <- TRUE } test_equi <- equivalence_test(x, range = rope_range, ci = rope_ci, verbose = equi_warnings, ...) test_equi$Cleaned_Parameter <- NULL if (!"Parameter" %in% names(test_equi)) { test_equi <- cbind(data.frame("Parameter" = "Posterior"), test_equi) } names(test_equi)[names(test_equi) == "CI"] <- "ROPE_CI" test_rope <- merge(test_rope, test_equi, all = TRUE) test_rope <- test_rope[!names(test_rope) %in% c("HDI_low", "HDI_high")] } # Bayes Factors if (any(c("bf", "bayesfactor", "bayes_factor") %in% test)) { test_bf <- bayesfactor_parameters(x, prior = bf_prior, ...) if (!"Parameter" %in% names(test_bf)) { test_bf <- cbind(data.frame("Parameter" = "Posterior"), test_bf) } } else { test_bf <- data.frame("Parameter" = NA) } } else { test_pd <- data.frame("Parameter" = NA, "Effects" = NA, "Component" = NA) test_prope <- data.frame("Parameter" = NA, "Effects" = NA, "Component" = NA) test_psig <- data.frame("Parameter" = NA, "Effects" = NA, "Component" = NA) test_rope <- data.frame("Parameter" = NA, "Effects" = NA, "Component" = NA) test_bf <- data.frame("Parameter" = NA, "Effects" = NA, "Component" = NA) test_pmap <- data.frame("Parameter" = NA, "Effects" = NA, "Component" = NA) } # for data frames or numeric, and even for some models, we don't # have the "Effects" or "Component" column for all data frames. # To make "merge()" work, we add those columns to all data frames, # filled with NA, and remove the columns later if necessary estimates <- .add_effects_component_column(estimates) uncertainty <- .add_effects_component_column(uncertainty) test_pmap <- .add_effects_component_column(test_pmap) test_pd <- .add_effects_component_column(test_pd) test_prope <- .add_effects_component_column(test_prope) test_psig <- .add_effects_component_column(test_psig) test_rope <- .add_effects_component_column(test_rope) test_bf <- .add_effects_component_column(test_bf) merge_by <- c("Parameter", "Effects", "Component") # at least one "valid" data frame needs a row id, to restore # row-order after merging if (!all(is.na(estimates$Parameter))) { estimates$.rowid <- 1:nrow(estimates) } else if (!all(is.na(test_pmap$Parameter))) { test_pmap$.rowid <- 1:nrow(test_pmap) } else if (!all(is.na(test_pd$Parameter))) { test_pd$.rowid <- 1:nrow(test_pd) } else if (!all(is.na(test_prope$Parameter))) { test_prope$.rowid <- 1:nrow(test_prope) } else if (!all(is.na(test_psig$Parameter))) { test_psig$.rowid <- 1:nrow(test_psig) } else if (!all(is.na(test_rope$Parameter))) { test_rope$.rowid <- 1:nrow(test_rope) } else if (!all(is.na(test_bf$Parameter))) { test_bf$.rowid <- 1:nrow(test_bf) } else { estimates$.rowid <- 1:nrow(estimates) } # remove duplicated columns if (all(c("rope", "p_rope") %in% test)) { test_prope$ROPE_low <- NULL test_prope$ROPE_high <- NULL } # merge all data frames out <- merge(estimates, uncertainty, by = merge_by, all = TRUE) out <- merge(out, test_pmap, by = merge_by, all = TRUE) out <- merge(out, test_pd, by = merge_by, all = TRUE) out <- merge(out, test_prope, by = merge_by, all = TRUE) out <- merge(out, test_psig, by = merge_by, all = TRUE) out <- merge(out, test_rope, by = merge_by, all = TRUE) out <- merge(out, test_bf, by = merge_by, all = TRUE) out <- out[!is.na(out$Parameter), ] # check which columns can be removed at the end. In any case, we don't # need .rowid in the returned data frame, and when the Effects or Component # column consist only of missing values, we remove those columns as well remove_columns <- ".rowid" if (all(is.na(out$Effects)) || length(unique(out$Effects)) < 2) remove_columns <- c(remove_columns, "Effects") if (all(is.na(out$Component)) || length(unique(out$Component)) < 2) remove_columns <- c(remove_columns, "Component") attr(out, "ci_method") <- ci_method # Restore columns order .remove_column(out[order(out$.rowid), ], remove_columns) } #' @keywords internal .add_effects_component_column <- function(x) { if (!"Effects" %in% names(x)) x <- cbind(x, data.frame("Effects" = NA)) if (!"Component" %in% names(x)) x <- cbind(x, data.frame("Component" = NA)) x } #' @rdname describe_posterior #' @param bf_prior Distribution representing a prior for the computation of Bayes factors / SI. Used if the input is a posterior, otherwise (in the case of models) ignored. #' @export describe_posterior.numeric <- function(posteriors, centrality = "median", dispersion = FALSE, ci = 0.89, ci_method = "hdi", test = c("p_direction", "rope"), rope_range = "default", rope_ci = 0.89, bf_prior = NULL, BF = 1, ...) { .describe_posterior(posteriors, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, bf_prior = bf_prior, BF = BF, ...) } #' @export describe_posterior.double <- describe_posterior.numeric #' @export describe_posterior.data.frame <- describe_posterior.numeric #' @export describe_posterior.sim.merMod <- describe_posterior.numeric #' @export describe_posterior.sim <- describe_posterior.numeric #' @export describe_posterior.emmGrid <- function(posteriors, centrality = "median", dispersion = FALSE, ci = 0.89, ci_method = "hdi", test = c("p_direction", "rope"), rope_range = "default", rope_ci = 0.89, bf_prior = NULL, BF = 1, ...) { if (!requireNamespace("emmeans")) { stop("Package 'emmeans' required for this function to work. Please install it by running `install.packages('emmeans')`.") } if (any(c("all", "bf", "bayesfactor", "bayes_factor") %in% tolower(test)) | "si" %in% tolower(ci_method)) { samps <- .clean_priors_and_posteriors(posteriors, bf_prior) bf_prior <- samps$prior } posteriors <- as.data.frame(as.matrix(emmeans::as.mcmc.emmGrid(posteriors, names = FALSE))) out <- .describe_posterior( posteriors, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, bf_prior = bf_prior, BF = BF, ... ) class(out) <- c("describe_posterior", "see_describe_posterior", class(out)) attr(out, "ci_method") <- ci_method out } #' @inheritParams insight::get_parameters #' @inheritParams diagnostic_posterior #' @importFrom insight find_algorithm #' @param priors Add the prior used for each parameter. #' @rdname describe_posterior #' @export describe_posterior.stanreg <- function(posteriors, centrality = "median", dispersion = FALSE, ci = 0.89, ci_method = "hdi", test = c("p_direction", "rope"), rope_range = "default", rope_ci = 0.89, bf_prior = NULL, diagnostic = c("ESS", "Rhat"), priors = FALSE, effects = c("fixed", "random", "all"), parameters = NULL, BF = 1, ...) { if ((any(c("all", "bf", "bayesfactor", "bayes_factor") %in% tolower(test)) | "si" %in% tolower(ci_method)) & is.null(bf_prior)) { bf_prior <- .update_to_priors(posteriors) } out <- .describe_posterior(posteriors, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, bf_prior = bf_prior, BF = BF, effects = effects, parameters = parameters, ...) if (!is.null(diagnostic)) { model_algorithm <- insight::find_algorithm(posteriors) if (model_algorithm$algorithm %in% c("fullrank", "meanfield")) { insight::print_color("Model diagnostic not available for stanreg-models fitted with 'fullrank' or 'meanfield'-algorithm.\n", "red") } else { diagnostic <- diagnostic_posterior( posteriors, diagnostic, effects = effects, parameters = parameters, ... ) out <- .merge_and_sort(out, diagnostic, by = "Parameter", all = TRUE) } } if (isTRUE(priors)) { priors_data <- describe_prior(posteriors, ...) out <- .merge_and_sort(out, priors_data, by = "Parameter", all = TRUE) } attr(out, "ci_method") <- ci_method class(out) <- c("describe_posterior", "see_describe_posterior", class(out)) out } #' @inheritParams insight::get_parameters #' @inheritParams diagnostic_posterior #' @importFrom insight find_algorithm #' @param priors Add the prior used for each parameter. #' @rdname describe_posterior #' @export describe_posterior.stanmvreg <- function(posteriors, centrality = "median", dispersion = FALSE, ci = 0.89, ci_method = "hdi", test = "p_direction", rope_range = "default", rope_ci = 0.89, bf_prior = NULL, diagnostic = c("ESS", "Rhat"), priors = FALSE, effects = c("fixed", "random", "all"), parameters = NULL, ...) { out <- .describe_posterior(posteriors, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, bf_prior = bf_prior, effects = effects, parameters = parameters, ...) out$Response <- gsub("^(.*)\\|(.*)", "\\1", out$Parameter) if (!is.null(diagnostic)) { model_algorithm <- insight::find_algorithm(posteriors) if (model_algorithm$algorithm %in% c("fullrank", "meanfield")) { insight::print_color("Model diagnostic not available for stanreg-models fitted with 'fullrank' or 'meanfield'-algorithm.\n", "red") } else { diagnostic <- diagnostic_posterior( posteriors, diagnostic, effects = effects, parameters = parameters, ... ) out <- .merge_and_sort(out, diagnostic, by = c("Parameter", "Response"), all = TRUE) } } if (isTRUE(priors)) { priors_data <- describe_prior(posteriors, ...) out <- .merge_and_sort(out, priors_data, by = c("Parameter", "Response"), all = TRUE) } attr(out, "ci_method") <- ci_method class(out) <- c("describe_posterior", "see_describe_posterior", class(out)) out } #' @inheritParams describe_posterior.stanreg #' @rdname describe_posterior #' @export describe_posterior.MCMCglmm <- function(posteriors, centrality = "median", dispersion = FALSE, ci = 0.89, ci_method = "hdi", test = c("p_direction", "rope"), rope_range = "default", rope_ci = 0.89, diagnostic = "ESS", parameters = NULL, ...) { out <- .describe_posterior(posteriors, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, effects = "fixed", parameters = parameters, ...) if (!is.null(diagnostic) && diagnostic == "ESS") { diagnostic <- effective_sample(posteriors, effects = "fixed", parameters = parameters, ...) out <- .merge_and_sort(out, diagnostic, by = "Parameter", all = TRUE) } out } #' @export describe_posterior.mcmc <- function(posteriors, centrality = "median", dispersion = FALSE, ci = 0.89, ci_method = "hdi", test = c("p_direction", "rope"), rope_range = "default", rope_ci = 0.89, parameters = NULL, ...) { .describe_posterior(as.data.frame(posteriors), centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, effects = "fixed", parameters = parameters, ...) } #' @inheritParams describe_posterior.stanreg #' @rdname describe_posterior #' @export describe_posterior.brmsfit <- function(posteriors, centrality = "median", dispersion = FALSE, ci = 0.89, ci_method = "hdi", test = c("p_direction", "rope"), rope_range = "default", rope_ci = 0.89, bf_prior = NULL, diagnostic = c("ESS", "Rhat"), effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, BF = 1, ...) { if ((any(c("all", "bf", "bayesfactor", "bayes_factor") %in% tolower(test)) | "si" %in% tolower(ci_method)) & is.null(bf_prior)) { bf_prior <- .update_to_priors(posteriors) } out <- .describe_posterior(posteriors, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, bf_prior = bf_prior, BF = BF, effects = effects, component = component, parameters = parameters, ...) if (!is.null(diagnostic)) { diagnostic <- diagnostic_posterior( posteriors, diagnostic, effects = effects, component = component, parameters = parameters, ... ) out <- .merge_and_sort(out, diagnostic, by = "Parameter", all = TRUE) } attr(out, "ci_method") <- ci_method class(out) <- c("describe_posterior", "see_describe_posterior", class(out)) out } #' @rdname describe_posterior #' @export describe_posterior.BFBayesFactor <- function(posteriors, centrality = "median", dispersion = FALSE, ci = 0.89, ci_method = "hdi", test = c("p_direction", "rope", "bf"), rope_range = "default", rope_ci = 0.89, priors = TRUE, ...) { # Match test args to catch BFs if (!is.null(test)) { test <- .check_test_values(test) if ("all" %in% test) { test <- c("pd", "p_map", "p_rope", "p_significance", "rope", "equivalence", "bf") } } # Remove BF from list if (any(c("bf", "bayesfactor", "bayes_factor") %in% test)) { test <- test[!test %in% c("bf", "bayesfactor", "bayes_factor")] compute_bf <- TRUE } else { compute_bf <- FALSE } # Describe posterior out <- .describe_posterior( posteriors, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, ... ) # Compute and readd BF a posteriori if (compute_bf) { tryCatch({ out$BF <- as.data.frame(bayesfactor_models(posteriors, ...))[-1, ]$BF }, error = function(e) { NULL } ) } # Add priors if (priors) { priors_data <- describe_prior(posteriors, ...) out <- .merge_and_sort(out, priors_data, by = intersect(names(out), names(priors_data)), all = TRUE) } out } .check_test_values <- function(test) { match.arg(tolower(test), c( "pd", "p_direction", "pdir", "mpe", "ps", "psig", "p_significance", "p_rope", "rope", "equivalence", "equivalence_test", "equitest", "bf", "bayesfactor", "bayes_factor", "p_map", "all" ), several.ok = TRUE) } bayestestR/R/print.p_rope.R0000644000176200001440000000016513573333034015350 0ustar liggesusers#' @export print.p_rope <- function(x, digits = 2, ...) { print.data.frame(x, digits = digits, ...) # TODO } bayestestR/R/distribution.R0000644000176200001440000001371113603755515015457 0ustar liggesusers#' Empirical Distributions #' #' Generate a sequence of n-quantiles, i.e., a sample of size \code{n} with a near-perfect distribution. #' #' @param type Can be any of the names from base R's \link[stats]{Distributions}, like \code{"cauchy"}, \code{"pois"} or \code{"beta"}. #' @param random Generate near-perfect or random (simple wrappers for the base R \code{r*} functions) distributions. #' @param ... Arguments passed to or from other methods. #' #' @examples #' library(bayestestR) #' x <- distribution(n = 10) #' plot(density(x)) #' #' x <- distribution(type = "gamma", n = 100, shape = 2) #' plot(density(x)) #' @export distribution <- function(type = "normal", ...) { basr_r_distributions <- c( "beta", "binom", "cauchy", "chisq", "chisquared", "exp", "f", "gamma", "geom", "hyper", "lnorm", "multinom", "nbinom", "normal", "pois", "poisson", "student", "t", "student_t", "unif", "uniform", "weibull" ) switch( match.arg(arg = type, choices = basr_r_distributions), "normal" = distribution_normal(...), "cauchy" = distribution_cauchy(...), "poisson" = distribution_poisson(...), "gamma" = distribution_gamma(...), "t" = , "student" = , "student_t" = distribution_student(...), "chisquared" = distribution_chisquared(...), "uniform" = distribution_uniform(...), "beta" = distribution_beta(...), distribution_custom(type = type, ...) ) } #' @rdname distribution #' @inheritParams stats::rnorm #' @importFrom stats qnorm rnorm #' @export distribution_normal <- function(n, mean = 0, sd = 1, random = FALSE, ...) { if (random) { stats::rnorm(n, mean, sd) } else { stats::qnorm(seq(1 / n, 1 - 1 / n, length.out = n), mean, sd, ...) } } #' @rdname distribution #' @inheritParams stats::rbinom #' @importFrom stats qbinom rbinom #' @export distribution_binomial <- function(n, size = 1, prob = 0.5, random = FALSE, ...) { if (random) { stats::rbinom(n, size, prob) } else { stats::qbinom(seq(1 / n, 1 - 1 / n, length.out = n), size, prob, ...) } } #' @rdname distribution #' @inheritParams stats::rcauchy #' @importFrom stats rcauchy qcauchy #' @export distribution_cauchy <- function(n, location = 0, scale = 1, random = FALSE, ...) { if (random) { stats::rcauchy(n, location, scale) } else { stats::qcauchy(seq(1 / n, 1 - 1 / n, length.out = n), location, scale, ...) } } #' @rdname distribution #' @inheritParams stats::rpois #' @importFrom stats rpois qpois #' @export distribution_poisson <- function(n, lambda = 1, random = FALSE, ...) { if (random) { stats::rpois(n, lambda) } else { stats::qpois(seq(1 / n, 1 - 1 / n, length.out = n), lambda, ...) } } #' @rdname distribution #' @inheritParams stats::rt #' @importFrom stats rt qt #' @export distribution_student <- function(n, df, ncp, random = FALSE, ...) { if (random) { stats::rt(n, df, ncp) } else { stats::qt(seq(1 / n, 1 - 1 / n, length.out = n), df, ncp, ...) } } #' @rdname distribution #' @inheritParams stats::rchisq #' @importFrom stats rchisq qchisq #' @export distribution_chisquared <- function(n, df, ncp = 0, random = FALSE, ...) { if (random) { stats::rchisq(n, df, ncp) } else { stats::qchisq(seq(1 / n, 1 - 1 / n, length.out = n), df, ncp, ...) } } #' @rdname distribution #' @inheritParams stats::runif #' @importFrom stats runif qunif #' @export distribution_uniform <- function(n, min = 0, max = 1, random = FALSE, ...) { if (random) { stats::runif(n, min, max) } else { stats::qunif(seq(1 / n, 1 - 1 / n, length.out = n), min, max, ...) } } #' @rdname distribution #' @inheritParams stats::rbeta #' @importFrom stats rbeta qbeta #' @export distribution_beta <- function(n, shape1, shape2, ncp = 0, random = FALSE, ...) { if (random) { stats::rbeta(n, shape1, shape2, ncp = ncp) } else { stats::qbeta(seq(1 / n, 1 - 1 / n, length.out = n), shape1, shape2, ncp = ncp, ...) } } #' @rdname distribution #' @inheritParams tweedie::rtweedie #' @export distribution_tweedie <- function(n, xi = NULL, mu, phi, power = NULL, random = FALSE, ...) { if (!requireNamespace("tweedie", quietly = TRUE)) { stop("Package 'tweedi' required for this function to work. Please install it.") } if (random) { tweedie::rtweedie(n = n, xi = xi, mu = mu, phi = phi, power = power) } else { tweedie::qtweedie(p = seq(1 / n, 1 - 1 / n, length.out = n), xi = xi, mu = mu, phi = phi, power = power) } } #' @rdname distribution #' @inheritParams stats::rgamma #' @importFrom stats rgamma qgamma #' @export distribution_gamma <- function(n, shape, scale = 1, random = FALSE, ...) { if (random) { stats::rgamma(n = n, shape = shape, scale = scale) } else { stats::qgamma(p = seq(1 / n, 1 - 1 / n, length.out = n), shape = shape, scale = scale) } } #' @rdname distribution #' @inheritParams distribution #' @export distribution_custom <- function(n, type = "norm", ..., random = FALSE) { if (random) { f <- match.fun(paste0("r", type)) f(n, ...) } else { f <- match.fun(paste0("q", type)) f(seq(1 / n, 1 - 1 / n, length.out = n), ...) } } #' @rdname distribution #' @inheritParams stats::rnorm #' @importFrom stats rbeta qbeta #' @export distribution_mixture_normal <- function(n, mean = c(-3, 3), sd = 1, random = FALSE, ...) { n <- round(n / length(mean)) sd <- c(sd) if (length(sd) != length(mean)) { sd <- rep(sd, length.out = length(mean)) } x <- c() for (i in 1:length(mean)) { x <- c(x, distribution_normal(n = n, mean = mean[i], sd = sd[i], random = random)) } x } #' @rdname distribution #' @inheritParams stats::rnorm #' @importFrom stats qnorm #' @export rnorm_perfect <- function(n, mean = 0, sd = 1) { .Deprecated("distribution_normal") stats::qnorm(seq(1 / n, 1 - 1 / n, length.out = n), mean, sd) } bayestestR/R/p_rope.R0000644000176200001440000000540713607554751014232 0ustar liggesusers#' Probability of not being in ROPE #' #' Compute the proportion of the posterior distribution that doesn't lie within a region of practical equivalence (ROPE). It is equivalent to running \code{rope(..., ci = 1)}. #' #' @inheritParams rope #' #' @examples #' library(bayestestR) #' #' p_rope(x = rnorm(1000, 0, 0.01), range = c(-0.1, 0.1)) #' p_rope(x = mtcars, range = c(-0.1, 0.1)) #' @export p_rope <- function(x, ...) { UseMethod("p_rope") } #' @method as.double p_rope #' @export as.double.p_rope <- function(x, ...) { x } #' @rdname p_rope #' @export p_rope.default <- function(x, ...) { NULL } #' @rdname p_rope #' @export p_rope.numeric <- function(x, range = "default", ...) { out <- .p_rope(rope(x, range = range, ci = 1, ...)) attr(out, "object_name") <- .safe_deparse(substitute(x)) out } #' @rdname p_rope #' @export p_rope.data.frame <- p_rope.numeric #' @rdname p_rope #' @export p_rope.emmGrid <- p_rope.numeric #' @rdname p_rope #' @export p_rope.BFBayesFactor <- p_rope.numeric #' @rdname p_rope #' @export p_rope.MCMCglmm <- p_rope.numeric #' @rdname p_rope #' @export p_rope.stanreg <- function(x, range = "default", effects = c("fixed", "random", "all"), parameters = NULL, ...) { out <- .p_rope(rope(x, range = range, ci = 1, effects = effects, parameters = parameters, ...)) attr(out, "object_name") <- .safe_deparse(substitute(x)) out } #' @rdname p_rope #' @export p_rope.brmsfit <- function(x, range = "default", effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, ...) { out <- .p_rope(rope(x, range = range, ci = 1, effects = effects, component = component, parameters = parameters, ...)) attr(out, "object_name") <- .safe_deparse(substitute(x)) out } #' @export p_rope.sim.merMod <- p_rope.stanreg #' @export p_rope.sim <- function(x, range = "default", parameters = NULL, ...) { out <- .p_rope(rope(x, range = range, ci = 1, parameters = parameters, ...)) attr(out, "object_name") <- .safe_deparse(substitute(x)) out } #' @export p_rope.mcmc <- function(x, range = "default", parameters = NULL, ...) { out <- .p_rope(rope(x, range = range, ci = 1, parameters = parameters, ...)) attr(out, "object_name") <- .safe_deparse(substitute(x)) out } # Internal ---------------------------------------------------------------- #' @keywords internal .p_rope <- function(rope_rez) { cols <- c("Parameter", "ROPE_low", "ROPE_high", "ROPE_Percentage", "Effects", "Component") out <- as.data.frame(rope_rez[cols[cols %in% names(rope_rez)]]) names(out)[names(out) == "ROPE_Percentage"] <- "p_ROPE" class(out) <- c("p_rope", "see_p_rope", "data.frame") out }bayestestR/R/contr.bayes.R0000644000176200001440000000526613537046305015171 0ustar liggesusers#' Orthonormal Contrast Matrices for Bayesian Estimation #' #' Returns a design or model matrix of orthonormal contrasts such that the #' marginal prior on all effects is identical. Implementation from Singmann #' \& Gronau's \href{https://github.com/bayesstuff/bfrms/}{\code{bfrms}}, #' following the description in Rouder, Morey, Speckman, \& Province (2012, p. 363). #' #' Though using this factor coding scheme might obscure the interpretation of #' parameters, it is essential for correct estimation of Bayes factors for #' contrasts and multi-level order restrictions. See info on specifying correct #' priors for factors with more than 2 levels in #' \href{https://easystats.github.io/bayestestR/articles/bayes_factors.html}{the Bayes factors vignette}. #' #' @param n a vector of levels for a factor, or the number of levels. #' @param contrasts logical indicating whether contrasts should be computed. #' #' @references Rouder, J. N., Morey, R. D., Speckman, P. L., \& Province, J. M. #' (2012). Default Bayes factors for ANOVA designs. *Journal of Mathematical #' Psychology*, 56(5), 356-374. https://doi.org/10.1016/j.jmp.2012.08.001 #' #' @return A \code{matrix} with n rows and k columns, with k=n-1 if contrasts is #' \code{TRUE} and k=n if contrasts is \code{FALSE}. #' #' @examples #' \dontrun{ #' contr.bayes(2) # Q_2 in Rouder et al. (2012, p. 363) #' # [,1] #' # [1,] -0.7071068 #' # [2,] 0.7071068 #' #' contr.bayes(5) # equivalent to Q_5 in Rouder et al. (2012, p. 363) #' # [,1] [,2] [,3] [,4] #' # [1,] 0.0000000 0.8944272 0.0000000 0.0000000 #' # [2,] 0.0000000 -0.2236068 -0.5000000 0.7071068 #' # [3,] 0.7071068 -0.2236068 -0.1666667 -0.4714045 #' # [4,] -0.7071068 -0.2236068 -0.1666667 -0.4714045 #' # [5,] 0.0000000 -0.2236068 0.8333333 0.2357023 #' #' ## check decomposition #' Q3 <- contr.bayes(3) #' Q3 %*% t(Q3) #' # [,1] [,2] [,3] #' # [1,] 0.6666667 -0.3333333 -0.3333333 #' # [2,] -0.3333333 0.6666667 -0.3333333 #' # [3,] -0.3333333 -0.3333333 0.6666667 #' ## 2/3 on diagonal and -1/3 on off-diagonal elements #' } #' #' @export contr.bayes <- function(n, contrasts = TRUE) { # validate n if (length(n) <= 1L) { if (is.numeric(n) && length(n) == 1L && n > 1L) { TRUE # all good } else { stop("not enough degrees of freedom to define contrasts") } } else { n <- length(n) } # make factor coding cont <- diag(n) if (contrasts) { a <- n I_a <- diag(a) J_a <- matrix(1, nrow = a, ncol = a) Sigma_a <- I_a - J_a / a cont <- eigen(Sigma_a)$vectors[, seq_len(a - 1), drop = FALSE] } cont } bayestestR/R/convert_bayesian_to_frequentist.R0000644000176200001440000000407113615677374021436 0ustar liggesusers#' Convert (refit) a Bayesian model to frequentist #' #' Refit Bayesian model as frequentist. Can be useful for comparisons. #' #' @param model A Bayesian model. #' @param data Data used by the model. If \code{NULL}, will try to extract it from the model. #' @examples #' \donttest{ #' # Rstanarm ---------------------- #' if (require("rstanarm")) { #' # Simple regressions #' model <- stan_glm(Sepal.Length ~ Petal.Length * Species, #' data = iris, chains = 2, refresh = 0 #' ) #' bayesian_as_frequentist(model) #' #' model <- stan_glm(vs ~ mpg, #' family = "binomial", #' data = mtcars, chains = 2, refresh = 0 #' ) #' bayesian_as_frequentist(model) #' #' # Mixed models #' model <- stan_glmer(Sepal.Length ~ Petal.Length + (1 | Species), #' data = iris, chains = 2, refresh = 0 #' ) #' bayesian_as_frequentist(model) #' #' model <- stan_glmer(vs ~ mpg + (1 | cyl), #' family = "binomial", #' data = mtcars, chains = 2, refresh = 0 #' ) #' bayesian_as_frequentist(model) #' } #' } #' @importFrom stats lm glm #' @export convert_bayesian_as_frequentist <- function(model, data = NULL) { if (is.null(data)) { data <- insight::get_data(model) } # info info <- insight::model_info(model) # Call called <- model$call # fun <- as.character(called)[1] formula <- called$formula family <- called$family if (info$is_mixed) { if (!requireNamespace("lme4", quietly = TRUE)) { stop("Package 'lme4' required for this function to work. Please install it by running `install.packages('lme4')`.") } if (info$is_linear) { freq <- lme4::lmer(formula, data = data) } else { freq <- lme4::glmer(formula, data = data, family = family) } } else { if (info$is_linear) { freq <- stats::lm(formula, data = data) } else { freq <- stats::glm(formula, data = data, family = family) } } freq } #' @rdname convert_bayesian_as_frequentist #' @export bayesian_as_frequentist <- convert_bayesian_as_frequentist bayestestR/R/mhdior.R0000644000176200001440000001626413613227664014230 0ustar liggesusers#' Maximum HDI level inside/outside ROPE (MHDIOR) #' #' The MHDIOR (pronounced 'em-eich-dior') is an exploratory and non-validated index representing the maximum percentage of \link[=hdi]{HDI} that does not contain (or is entirely contained, in which case the value is prefixed with a negative sign), in the negligible values space defined by the \link[=rope]{ROPE}. It differs from the ROPE percentage, \emph{i.e.}, from the proportion of a given CI in the ROPE, as it represents the maximum CI values needed to reach a ROPE proportion of 0\% or 100\%. Whether the index reflects the ROPE reaching 0\% or 100\% is indicated through the sign: a negative sign is added to indicate that the probability corresponds to the probability of a not significant effect (a percentage in ROPE of 100\%). For instance, a MHDIOR of 97\% means that there is a probability of .97 that a parameter (described by its posterior distribution) is outside the ROPE. In other words, the 97\% HDI is the maximum HDI level for which the percentage in ROPE is 0\%. On the contrary, a ROPE-based p of -97\% indicates that there is a probability of .97 that the parameter is inside the ROPE (percentage in ROPE of 100\%). A value close to 0\% would indicate that the mode of the distribution falls perfectly at the edge of the ROPE, in which case the percentage of HDI needed to be on either side of the ROPE becomes infinitely small. Negative values do not refer to negative values \emph{per se}, simply indicating that the value corresponds to non-significance rather than significance. #' #' #' @inheritParams rope #' @param precision The precision by which to explore the ROPE space (in percentage). Lower values increase the precision of the returned p value but can be quite computationaly costly. #' #' @inheritParams hdi #' #' @examples #' \dontrun{ #' library(bayestestR) #' #' # precision = 1 is used to speed up examples... #' #' mhdior( #' x = rnorm(1000, mean = 1, sd = 1), #' range = c(-0.1, 0.1), #' precision = 1 #' ) #' #' df <- data.frame(replicate(4, rnorm(100))) #' mhdior(df, precision = 1) #' #' if (require("rstanarm")) { #' model <- stan_glm( #' mpg ~ wt + gear, data = mtcars, #' chains = 2, #' iter = 200, #' refresh = 0 #' ) #' mhdior(model, precision = 1) #' } #' #' if (require("emmeans")) { #' mhdior(emtrends(model, ~1, "wt")) #' } #' #' if (require("brms")) { #' model <- brms::brm(mpg ~ wt + cyl, data = mtcars) #' mhdior(model) #' } #' #' if (require("BayesFactor")) { #' bf <- ttestBF(x = rnorm(100, 1, 1)) #' mhdior(bf) #' } #' } #' @importFrom stats na.omit #' @export mhdior <- function(x, ...) { UseMethod("mhdior") } #' @rdname mhdior #' @export mhdior.numeric <- function(x, range = "default", precision = .1, ...) { # This implementation is very clunky if (all(range == "default")) { range <- c(-0.1, 0.1) } else if (!all(is.numeric(range)) || length(range) != 2) { stop("`range` should be 'default' or a vector of 2 numeric values (e.g., c(-0.1, 0.1)).") } rope_df <- rope(x, range, ci = seq(0, 1, by = precision / 100), verbose = FALSE) rope_df <- stats::na.omit(rope_df) rope_values <- rope_df$ROPE_Percentage if (all(rope_values == min(rope_values))) { if (rope_values[1] == 0) { p <- 1 } else { p <- -1 } } else { min_rope <- min(rope_values) if (rope_values[1] == min_rope) { name_min2 <- rope_df$CI[rope_values != min_rope][1] CI_position <- match(name_min2, rope_df$CI) - 1 if (CI_position > 1) CI_position <- CI_position - 1 h0 <- 1 } else { name_max <- rope_df$CI[rope_values != max(rope_values)][1] CI_position <- match(name_max, rope_df$CI) if (CI_position > 1) CI_position <- CI_position - 1 h0 <- -1 } p <- rope_df$CI[CI_position] p <- as.numeric(unlist(p)) / 100 p <- h0 * p # p <- 1/p # Convert to probability } class(p) <- c("mhdior", class(p)) p } #' @rdname mhdior #' @export mhdior.data.frame <- function(x, range = "default", precision = .1, ...) { x <- .select_nums(x) if (ncol(x) == 1) { mhdior <- mhdior(x[, 1], range = range, precision = precision, ...) } else { mhdior <- sapply(x, mhdior, range = range, precision = precision, simplify = TRUE, ...) } out <- data.frame( "Parameter" = names(x), "mhdior" = mhdior, row.names = NULL, stringsAsFactors = FALSE ) class(out) <- c("mhdior", class(out)) out } #' @rdname mhdior #' @export mhdior.emmGrid <- function(x, range = "default", precision = .1, ...) { if (!requireNamespace("emmeans")) { stop("Package 'emmeans' required for this function to work. Please install it by running `install.packages('emmeans')`.") } xdf <- as.data.frame(as.matrix(emmeans::as.mcmc.emmGrid(x, names = FALSE))) out <- mhdior(xdf, range = range, precision = precision, ...) attr(out, "object_name") <- deparse(substitute(x), width.cutoff = 500) out } #' @rdname mhdior #' @export mhdior.BFBayesFactor <- function(x, range = "default", precision = .1, ...) { out <- mhdior(insight::get_parameters(x), range = range, precision = precision, ...) out } #' @importFrom insight get_parameters #' @keywords internal .mhdior_models <- function(x, range, precision, effects, component, parameters, ...) { if (all(range == "default")) { range <- rope_range(x) } else if (!all(is.numeric(range)) || length(range) != 2) { stop("`range` should be 'default' or a vector of 2 numeric values (e.g., c(-0.1, 0.1)).") } out <- .prepare_output( mhdior(insight::get_parameters(x, effects = effects, parameters = parameters), range = range, precision = precision, ...), insight::clean_parameters(x) ) class(out) <- unique(c("mhdior", class(out))) out } #' @rdname mhdior #' @export mhdior.stanreg <- function(x, range = "default", precision = .1, effects = c("fixed", "random", "all"), parameters = NULL, ...) { effects <- match.arg(effects) out <- .mhdior_models( x = x, range = range, precision = precision, effects = effects, component = "conditional", parameters = parameters, ... ) attr(out, "object_name") <- deparse(substitute(x), width.cutoff = 500) out } #' @rdname mhdior #' @export mhdior.brmsfit <- function(x, range = "default", precision = .1, effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, ...) { effects <- match.arg(effects) component <- match.arg(component) out <- .mhdior_models( x = x, range = range, precision = precision, effects = effects, component = component, parameters = parameters, ... ) attr(out, "object_name") <- deparse(substitute(x), width.cutoff = 500) out } #' @rdname as.numeric.p_direction #' @method as.numeric mhdior #' @export as.numeric.mhdior <- function(x, ...) { if ("data.frame" %in% class(x)) { return(as.numeric(as.vector(x$mhdior))) } else { return(as.vector(x)) } } #' @method as.double mhdior #' @export as.double.mhdior <- as.numeric.mhdior bayestestR/R/utils_clean_stan_parameters.R0000644000176200001440000000140413470405141020473 0ustar liggesusers#' @keywords internal .clean_up_tmp_stanreg <- function(tmp, group, cols, parms) { tmp$Group <- group tmp$Parameter <- rep(parms, each = nrow(tmp) / length(parms)) rownames(tmp) <- NULL tmp <- tmp[, c("Parameter", cols)] # clean random effects notation from parameters # tmp$Parameter <- gsub("b\\[(.*) (.*)\\]", "\\2", tmp$Parameter) tmp } #' @keywords internal .clean_up_tmp_brms <- function(tmp, group, component, cols, parms) { tmp$Group <- group tmp$Component <- component tmp$Parameter <- rep(parms, each = nrow(tmp) / length(parms)) rownames(tmp) <- NULL tmp <- tmp[, c("Parameter", cols)] # clean random effects notation from parameters # tmp$Parameter <- gsub("r_(.*)\\.(.*)\\.", "\\1", tmp$Parameter) tmp } bayestestR/R/effective_sample.R0000644000176200001440000000711613613227664016243 0ustar liggesusers#' Effective Sample Size (ESS) #' #' This function returns the effective sample size (ESS). #' #' @param model A \code{stanreg}, \code{stanfit}, or \code{brmsfit} object. #' @param ... Currently not used. #' @inheritParams hdi #' #' @return A data frame with two columns: Parameter name and effective sample size (ESS). #' #' @details \strong{Effective Sample (ESS)} should be as large as possible, altough for most applications, an effective sample size greater than 1,000 is sufficient for stable estimates (Bürkner, 2017). The ESS corresponds to the number of independent samples with the same estimation power as the N autocorrelated samples. It is is a measure of \dQuote{how much independent information there is in autocorrelated chains} (\emph{Kruschke 2015, p182-3}). #' #' @references \itemize{ #' \item Kruschke, J. (2014). Doing Bayesian data analysis: A tutorial with R, JAGS, and Stan. Academic Press. #' \item Bürkner, P. C. (2017). brms: An R package for Bayesian multilevel models using Stan. Journal of Statistical Software, 80(1), 1-28 #' } #' #' @examples #' \dontrun{ #' library(rstanarm) #' model <- stan_glm(mpg ~ wt + gear, data = mtcars, chains = 2, iter = 200, refresh = 0) #' effective_sample(model) #' } #' @export effective_sample <- function(model, ...) { UseMethod("effective_sample") } #' @rdname effective_sample #' @export effective_sample.brmsfit <- function(model, effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, ...) { # check arguments effects <- match.arg(effects) component <- match.arg(component) pars <- insight::get_parameters( model, effects = effects, component = component, parameters = parameters ) if (!requireNamespace("rstan", quietly = TRUE)) { stop("Package 'rstan' required for this function to work. Please install it by running `install.packages('stan')`.") } s <- rstan::summary(model$fit)$summary s <- s[make.names(rownames(s)) %in% colnames(pars), ] data.frame( Parameter = make.names(rownames(s)), ESS = round(s[, "n_eff"]), stringsAsFactors = FALSE, row.names = NULL ) } #' @rdname effective_sample #' @export effective_sample.stanreg <- function(model, effects = c("fixed", "random", "all"), parameters = NULL, ...) { # check arguments effects <- match.arg(effects) pars <- insight::get_parameters( model, effects = effects, parameters = parameters ) s <- as.data.frame(summary(model)) s <- s[rownames(s) %in% colnames(pars), ] data.frame( Parameter = rownames(s), ESS = s[["n_eff"]], stringsAsFactors = FALSE, row.names = NULL ) } #' @rdname effective_sample #' @export effective_sample.MCMCglmm <- function(model, effects = c("fixed", "random", "all"), parameters = NULL, ...) { # check arguments effects <- match.arg(effects) pars <- insight::get_parameters( model, effects = effects, parameters = parameters ) s.fixed <- as.data.frame(summary(model)$solutions) s.random <- as.data.frame(summary(model)$Gcovariances) es <- data.frame( Parameter = rownames(s.fixed), ESS = round(s.fixed[["eff.samp"]]), stringsAsFactors = FALSE, row.names = NULL ) if (nrow(s.random) > 0) { es <- rbind(es, data.frame( Parameter = rownames(s.random), ESS = round(s.random[["eff.samp"]]), stringsAsFactors = FALSE, row.names = NULL )) } es[match(pars[[1]], es$Parameter), ] } bayestestR/R/print.bayesfactor_models.R0000644000176200001440000000211613573411511017724 0ustar liggesusers#' @importFrom insight print_color #' @export print.bayesfactor_models <- function(x, digits = 2, log = FALSE, ...) { BFE <- x denominator <- attr(BFE, "denominator") grid.type <- attr(BFE, "BF_method") BFE <- as.data.frame(BFE) if (log) { BFE$BF <- log(BFE$BF) } BFE$BF <- .format_big_small(BFE$BF, digits = digits) # indicate null-model BFE$Model[BFE$Model == "1"] <- "(Intercept only)" BFE$Model <- paste0(" [", seq_len(nrow(BFE)), "] ", BFE$Model) denM <- .trim(BFE$Model[denominator]) BFE <- BFE[-denominator, ] BFE$Model <- format(BFE$Model) colnames(BFE) <- c(format(" Model", width = max(nchar(BFE$Model))), "BF") insight::print_color("# Bayes Factors for Model Comparison\n\n", "blue") print.data.frame(BFE, digits = digits, quote = FALSE, row.names = FALSE) cat("\n* Against Denominator: ") insight::print_color(denM, "cyan") cat("\n* Bayes Factor Type: ") insight::print_color(grid.type, "cyan") cat("\n") if (log) insight::print_color("\nBayes Factors are on the log-scale.\n", "red") invisible(x) } bayestestR/R/describe_prior.R0000644000176200001440000000356013615677374015745 0ustar liggesusers#' Describe Priors #' #' Returns a summary of the priors used in the model. #' #' @param model A Bayesian model. #' @param ... Currently not used. #' #' @examples #' \dontrun{ #' library(bayestestR) #' #' # rstanarm models #' # ----------------------------------------------- #' if (require("rstanarm")) { #' model <- rstanarm::stan_glm(mpg ~ wt + cyl, data = mtcars) #' describe_prior(model) #' } #' #' # brms models #' # ----------------------------------------------- #' if (require("brms")) { #' model <- brms::brm(mpg ~ wt + cyl, data = mtcars) #' describe_prior(model) #' } #' #' # BayesFactor objects #' # ----------------------------------------------- #' if (require("BayesFactor")) { #' bf <- ttestBF(x = rnorm(100, 1, 1)) #' describe_prior(bf) #' } #' } #' @importFrom insight get_priors #' @export describe_prior <- function(model, ...) { UseMethod("describe_prior") } #' @keywords internal .describe_prior <- function(model, ...) { priors <- insight::get_priors(model, ...) # Format names names(priors)[-1] <- paste0("Prior_", names(priors)[-1]) # If the prior scale has been adjusted, it is the actual scale that was used. if ("Prior_Adjusted_Scale" %in% names(priors)) { priors$Prior_Scale[!is.na(priors$Prior_Adjusted_Scale)] <- priors$Prior_Adjusted_Scale[!is.na(priors$Prior_Adjusted_Scale)] priors$Prior_Adjusted_Scale <- NULL } if ("Prior_Response" %in% names(priors)) { names(priors)[names(priors) == "Prior_Response"] <- "Response" } priors } #' @export describe_prior.stanreg <- .describe_prior #' @export describe_prior.brmsfit <- .describe_prior #' @export describe_prior.BFBayesFactor <- function(model, ...) { priors <- insight::get_priors(model) # Format names names(priors)[-1] <- paste0("Prior_", names(priors)[-1]) priors } bayestestR/R/print.mhdior.R0000644000176200001440000000055513571077552015361 0ustar liggesusers#' @export print.mhdior <- function(x, digits = 2, ...) { if ("data_plot" %in% class(x)) { print(as.data.frame(x)) } else if ("data.frame" %in% class(x)) { insight::print_color("# Max HDI inside/outside ROPE (MHDIOR)\n\n", "blue") print_data_frame(x, digits = digits) } else { cat(sprintf("MHDIOR = %.*f%%", digits, x * 100)) } } bayestestR/R/point_estimate.R0000644000176200001440000002066513603652175015770 0ustar liggesusers#' Point-estimates of posterior distributions #' #' Compute various point-estimates, such as the mean, the median or the MAP, to describe posterior distributions. #' #' @param centrality The point-estimates (centrality indices) to compute. Character (vector) or list with one or more of these options: \code{"median"}, \code{"mean"}, \code{"MAP"} or \code{"all"}. #' @param dispersion Logical, if \code{TRUE}, computes indices of dispersion related to the estimate(s) (\code{SD} and \code{MAD} for \code{mean} and \code{median}, respectively). #' @param ... Additional arguments to be passed to or from methods. #' @inheritParams hdi #' #' @references \href{https://easystats.github.io/bayestestR/articles/indicesEstimationComparison.html}{Vignette In-Depth 1: Comparison of Point-Estimates} #' #' @examples #' library(bayestestR) #' #' point_estimate(rnorm(1000)) #' point_estimate(rnorm(1000), centrality = "all", dispersion = TRUE) #' point_estimate(rnorm(1000), centrality = c("median", "MAP")) #' #' df <- data.frame(replicate(4, rnorm(100))) #' point_estimate(df, centrality = "all", dispersion = TRUE) #' point_estimate(df, centrality = c("median", "MAP")) #' \dontrun{ #' # rstanarm models #' # ----------------------------------------------- #' library(rstanarm) #' model <- rstanarm::stan_glm(mpg ~ wt + cyl, data = mtcars) #' point_estimate(model, centrality = "all", dispersion = TRUE) #' point_estimate(model, centrality = c("median", "MAP")) #' #' #' # emmeans estimates #' # ----------------------------------------------- #' library(emmeans) #' point_estimate(emtrends(model, ~1, "wt"), centrality = c("median", "MAP")) #' #' # brms models #' # ----------------------------------------------- #' library(brms) #' model <- brms::brm(mpg ~ wt + cyl, data = mtcars) #' point_estimate(model, centrality = "all", dispersion = TRUE) #' point_estimate(model, centrality = c("median", "MAP")) #' #' # BayesFactor objects #' # ----------------------------------------------- #' library(BayesFactor) #' bf <- ttestBF(x = rnorm(100, 1, 1)) #' point_estimate(bf, centrality = "all", dispersion = TRUE) #' point_estimate(bf, centrality = c("median", "MAP")) #' } #' #' @importFrom stats mad median sd #' @export point_estimate <- function(x, centrality = "all", dispersion = FALSE, ...) { UseMethod("point_estimate") } #' @export point_estimate.numeric <- function(x, centrality = "all", dispersion = FALSE, ...) { centrality <- match.arg(tolower(centrality), c("median", "mean", "map", "all"), several.ok = TRUE) if ("all" %in% centrality) { estimate_list <- c("median", "mean", "map") } else { estimate_list <- centrality } out <- data.frame(".temp" = 0) # Median if ("median" %in% estimate_list) { out$Median <- stats::median(x) if (dispersion) { out$MAD <- stats::mad(x) } } # Mean if ("mean" %in% estimate_list) { out$Mean <- mean(x) if (dispersion) { out$SD <- stats::sd(x) } } # MAP if ("map" %in% estimate_list) { out$MAP <- as.numeric(map_estimate(x)) } out <- out[names(out) != ".temp"] attr(out, "data") <- x attr(out, "centrality") <- centrality class(out) <- unique(c("point_estimate", "see_point_estimate", class(out))) out } #' @export point_estimate.data.frame <- function(x, centrality = "all", dispersion = FALSE, ...) { x <- .select_nums(x) if (ncol(x) == 1) { estimates <- point_estimate(x[, 1], centrality = centrality, dispersion = dispersion, ...) } else { estimates <- sapply(x, point_estimate, centrality = centrality, dispersion = dispersion, simplify = FALSE, ...) estimates <- do.call(rbind, estimates) } out <- cbind(data.frame("Parameter" = names(x), stringsAsFactors = FALSE), estimates) rownames(out) <- NULL attr(out, "data") <- x attr(out, "centrality") <- centrality class(out) <- unique(c("point_estimate", "see_point_estimate", class(out))) out } #' @export point_estimate.mcmc <- function(x, centrality = "all", dispersion = FALSE, ...) { point_estimate(as.data.frame(x), centrality = centrality, dispersion = dispersion, ...) } #' @export point_estimate.MCMCglmm <- function(x, centrality = "all", dispersion = FALSE, ...) { nF <- x$Fixed$nfl point_estimate(as.data.frame(x$Sol[, 1:nF, drop = FALSE]), centrality = centrality, dispersion = dispersion, ...) } #' @export point_estimate.emmGrid <- function(x, centrality = "all", dispersion = FALSE, ...) { if (!requireNamespace("emmeans")) { stop("Package 'emmeans' required for this function to work. Please install it by running `install.packages('emmeans')`.") } xdf <- as.data.frame(as.matrix(emmeans::as.mcmc.emmGrid(x, names = FALSE))) out <- point_estimate(xdf, centrality = centrality, dispersion = dispersion, ...) attr(out, "object_name") <- .safe_deparse(substitute(x)) attr(out, "centrality") <- centrality class(out) <- unique(c("point_estimate", "see_point_estimate", class(out))) out } #' @importFrom insight get_parameters #' @keywords internal .point_estimate_models <- function(x, effects, component, parameters, centrality = "all", dispersion = FALSE, ...) { out <- point_estimate(insight::get_parameters(x, effects = effects, component = component, parameters = parameters), centrality = centrality, dispersion = dispersion, ...) # out$Parameter <- .get_parameter_names(x, effects = effects, component = component, parameters = parameters) out } #' @importFrom insight get_parameters clean_parameters #' @rdname point_estimate #' @export point_estimate.stanreg <- function(x, centrality = "all", dispersion = FALSE, effects = c("fixed", "random", "all"), parameters = NULL, ...) { effects <- match.arg(effects) out <- .prepare_output( point_estimate(insight::get_parameters(x, effects = effects, parameters = parameters), centrality = centrality, dispersion = dispersion, ...), insight::clean_parameters(x) ) attr(out, "object_name") <- .safe_deparse(substitute(x)) attr(out, "centrality") <- centrality class(out) <- unique(c("point_estimate", "see_point_estimate", class(out))) out } #' @rdname point_estimate #' @export point_estimate.brmsfit <- function(x, centrality = "all", dispersion = FALSE, effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, ...) { effects <- match.arg(effects) component <- match.arg(component) out <- .prepare_output( point_estimate(insight::get_parameters(x, effects = effects, component = component, parameters = parameters), centrality = centrality, dispersion = dispersion, ...), insight::clean_parameters(x) ) attr(out, "object_name") <- .safe_deparse(substitute(x)) attr(out, "centrality") <- centrality class(out) <- unique(c("point_estimate", "see_point_estimate", class(out))) out } #' @export point_estimate.sim.merMod <- function(x, centrality = "all", dispersion = FALSE, effects = c("fixed", "random", "all"), parameters = NULL, ...) { effects <- match.arg(effects) out <- .point_estimate_models( x = x, effects = effects, component = "conditional", parameters = parameters, centrality = centrality, dispersion = dispersion, ... ) attr(out, "data") <- insight::get_parameters(x, effects = effects, parameters = parameters) attr(out, "centrality") <- centrality class(out) <- unique(c("point_estimate", "see_point_estimate", class(out))) out } #' @export point_estimate.sim <- function(x, centrality = "all", dispersion = FALSE, parameters = NULL, ...) { out <- .point_estimate_models( x = x, effects = "fixed", component = "conditional", parameters = parameters, centrality = centrality, dispersion = dispersion, ... ) attr(out, "data") <- insight::get_parameters(x, parameters = parameters) attr(out, "centrality") <- centrality class(out) <- unique(c("point_estimate", "see_point_estimate", class(out))) out } #' @rdname point_estimate #' @export point_estimate.BFBayesFactor <- function(x, centrality = "all", dispersion = FALSE, ...) { out <- point_estimate(insight::get_parameters(x), centrality = centrality, dispersion = dispersion, ...) attr(out, "object_name") <- .safe_deparse(substitute(x)) attr(out, "centrality") <- centrality class(out) <- unique(c("point_estimate", "see_point_estimate", class(out))) out } bayestestR/R/bayesfactor_inclusion.R0000644000176200001440000002210713616534204017316 0ustar liggesusers#' Inclusion Bayes Factors for testing predictors across Bayesian models #' #' The \code{bf_*} function is an alias of the main function. #' \cr \cr #' For more info, see \href{https://easystats.github.io/bayestestR/articles/bayes_factors.html}{the Bayes factors vignette}. #' #' @author Mattan S. Ben-Shachar #' @param models An object of class \code{\link{bayesfactor_models}} or \code{BFBayesFactor}. #' @param match_models See details. #' @param prior_odds Optional vector of prior odds for the models. See \code{BayesFactor::priorOdds<-}. #' @param ... Arguments passed to or from other methods. #' #' @return a data frame containing the prior and posterior probabilities, and BF for each effect. #' #' @details Inclusion Bayes factors answer the question: Are the observed data more #' probable under models with a particular effect, than they are under models without #' that particular effect? In other words, on average - are models with effect \eqn{X} #' more likely to have produced the observed data than models without effect \eqn{X}? #' #' \subsection{Match Models}{ #' If \code{match_models=FALSE} (default), Inclusion BFs are computed by comparing all models #' with a predictor against all models without that predictor. If \code{TRUE}, #' comparison is restricted to models that (1) do not include any interactions #' with the predictor of interest; (2) for interaction predictors, averaging is done #' only across models that containe the main effect from which the interaction #' predictor is comprised. #' } #' #' @note Random effects in the \code{lme} style will be displayed as interactions: #' i.e., \code{(X|G)} will become \code{1:G} and \code{X:G}. #' #' @seealso \code{\link{weighted_posteriors}} for Bayesian parameter averaging. #' #' @examples #' library(bayestestR) #' #' # Using bayesfactor_models: #' # ------------------------------ #' mo0 <- lm(Sepal.Length ~ 1, data = iris) #' mo1 <- lm(Sepal.Length ~ Species, data = iris) #' mo2 <- lm(Sepal.Length ~ Species + Petal.Length, data = iris) #' mo3 <- lm(Sepal.Length ~ Species * Petal.Length, data = iris) #' #' BFmodels <- bayesfactor_models(mo1, mo2, mo3, denominator = mo0) #' bayesfactor_inclusion(BFmodels) #' \dontrun{ #' # BayesFactor #' # ------------------------------- #' library(BayesFactor) #' #' BF <- generalTestBF(len ~ supp * dose, ToothGrowth, progress = FALSE) #' #' bayesfactor_inclusion(BF) #' #' # compare only matched models: #' bayesfactor_inclusion(BF, match_models = TRUE) #' } #' @references #' \itemize{ #' \item Hinne, M., Gronau, Q. F., van den Bergh, D., and Wagenmakers, E. (2019, March 25). A conceptual introduction to Bayesian Model Averaging. \doi{10.31234/osf.io/wgb64} #' \item Clyde, M. A., Ghosh, J., & Littman, M. L. (2011). Bayesian adaptive sampling for variable selection and model averaging. Journal of Computational and Graphical Statistics, 20(1), 80-101. #' \item Mathot, S. (2017). Bayes like a Baws: Interpreting Bayesian Repeated Measures in JASP [Blog post]. Retrieved from https://www.cogsci.nl/blog/interpreting-bayesian-repeated-measures-in-jasp #' } #' #' #' @export bayesfactor_inclusion <- function(models, match_models = FALSE, prior_odds = NULL, ...) { UseMethod("bayesfactor_inclusion") } #' @rdname bayesfactor_inclusion #' @export bf_inclusion <- bayesfactor_inclusion #' @export bayesfactor_inclusion.bayesfactor_models <- function(models, match_models = FALSE, prior_odds = NULL, ...) { if (isTRUE(attr(models, "unsupported_models"))) { stop("Can not compute inclusion Bayes factors - passed models are not (yet) supported.", call. = FALSE) } # Build Models Table # df.model <- .get_model_table(models, priorOdds = prior_odds) effnames <- colnames(df.model)[-(1:3)] # Build Interaction Matrix # if (isTRUE(match_models)) { effects.matrix <- as.matrix(df.model[, -c(1:3)]) df.interaction <- data.frame(effnames, stringsAsFactors = FALSE) for (eff in effnames) { df.interaction[, eff] <- sapply(effnames, function(x) .includes_interaction(x, eff)) } rownames(df.interaction) <- effnames df.interaction <- as.matrix(df.interaction[, -1]) } # Build Effect Table # df.effect <- data.frame( effnames, Pinc = rep(NA, length(effnames)), PincD = rep(NA, length(effnames)), BF_inclusion = rep(NA, length(effnames)), stringsAsFactors = FALSE ) for (eff in effnames) { if (isTRUE(match_models)) { idx1 <- df.interaction[eff, ] idx2 <- df.interaction[, eff] has_not_high_order_interactions <- !apply(effects.matrix[, idx1, drop = FALSE], 1, any) ind_include <- has_not_high_order_interactions & effects.matrix[, eff] ind_exclude <- apply(effects.matrix[, idx2, drop = FALSE], 1, all) & has_not_high_order_interactions & !effects.matrix[, eff] df.model_temp <- df.model[ind_include | ind_exclude, , drop = FALSE] } else { df.model_temp <- df.model } # models with effect mwith <- which(df.model_temp[[eff]]) mwithprior <- sum(df.model_temp[mwith, "priorProbs"]) mwithpost <- sum(df.model_temp[mwith, "postProbs"]) # models without effect mwithoutprior <- sum(df.model_temp[-mwith, "priorProbs"]) mwithoutpost <- sum(df.model_temp[-mwith, "postProbs"]) # Save results df.effect$Pinc[effnames == eff] <- mwithprior df.effect$PincD[effnames == eff] <- mwithpost df.effect$BF_inclusion[effnames == eff] <- (mwithpost / mwithoutpost) / (mwithprior / mwithoutprior) } df.effect$BF_inclusion <- df.effect$BF_inclusion df.effect <- df.effect[, -1, drop = FALSE] colnames(df.effect) <- c("p_prior", "p_posterior", "BF") rownames(df.effect) <- effnames class(df.effect) <- c("bayesfactor_inclusion", class(df.effect)) attr(df.effect, "matched") <- match_models attr(df.effect, "priorOdds") <- prior_odds return(df.effect) } #' @export bayesfactor_inclusion.BFBayesFactor <- function(models, match_models = FALSE, prior_odds = NULL, ...) { models <- bayesfactor_models.BFBayesFactor(models) bayesfactor_inclusion.bayesfactor_models(models, match_models = match_models, prior_odds = prior_odds) } #' @keywords internal #' @importFrom stats as.formula terms terms.formula .get_model_table <- function(BFGrid, priorOdds = NULL, ...) { denominator <- attr(BFGrid, "denominator") BFGrid <- rbind(BFGrid[denominator, ], BFGrid[-denominator, ]) attr(BFGrid, "denominator") <- 1 # Prior and post odds Modelnames <- BFGrid$Model if (!is.null(priorOdds)) { priorOdds <- c(1, priorOdds) } else { priorOdds <- rep(1, length(Modelnames)) } posterior_odds <- priorOdds * BFGrid$BF priorProbs <- priorOdds / sum(priorOdds) postProbs <- posterior_odds / sum(posterior_odds) df.model <- data.frame( Modelnames, priorProbs, postProbs, stringsAsFactors = FALSE ) # add effects table make_terms <- function(formula) { sort_interactions <- function(x) { if (grepl("\\:", x)) { effs <- unlist(strsplit(x, "\\:")) x <- paste0(sort(effs), collapse = ":") } x } formula.f <- stats::as.formula(paste0("~", formula)) all.terms <- attr(stats::terms(formula.f), "term.labels") # Fixed fix_trms <- all.terms[!grepl("\\|", all.terms)] # no random if (length(fix_trms) > 0) { fix_trms <- sapply(fix_trms, sort_interactions) } # Random random_parts <- paste0(all.terms[grepl("\\|", all.terms)]) # only random if (length(random_parts) == 0) { return(fix_trms) } random_units <- sub("^.+\\|\\s+", "", random_parts) tmp_random <- lapply( sub("\\|.+$", "", random_parts), function(x) stats::as.formula(paste0("~", x)) ) rand_trms <- vector("list", length(random_parts)) for (i in seq_along(random_parts)) { tmp_trms <- attr(stats::terms.formula(tmp_random[[i]]), "term.labels") tmp_trms <- sapply(tmp_trms, sort_interactions) if (!any(unlist(strsplit(as.character(tmp_random[[i]])[[2]], " \\+ ")) == "0")) { tmp_trms <- c("1", tmp_trms) } rand_trms[[i]] <- paste0(tmp_trms, ":", random_units[[i]]) } c(fix_trms, unlist(rand_trms)) } for (m in seq_len(nrow(df.model))) { tmp_terms <- make_terms(df.model$Modelnames[m]) if (length(tmp_terms) > 0) { missing_terms <- !tmp_terms %in% colnames(df.model) # For R < 3.6.0 if (any(missing_terms)) df.model[, tmp_terms[missing_terms]] <- NA # For R < 3.6.0 df.model[m, tmp_terms] <- TRUE } } df.model[is.na(df.model)] <- FALSE df.model } #' @keywords internal .includes_interaction <- function(eff, effnames) { eff_b <- strsplit(eff, "\\:") effnames_b <- strsplit(effnames, "\\:") is_int <- sapply(effnames_b, function(x) length(x) > 1) temp <- logical(length(effnames)) for (rr in seq_along(effnames)) { if (is_int[rr]) { temp[rr] <- all(eff_b[[1]] %in% effnames_b[[rr]]) & !all(effnames_b[[rr]] %in% eff_b[[1]]) } } temp } bayestestR/R/p_direction.R0000644000176200001440000002530113613227664015235 0ustar liggesusers#' Probability of Direction (pd) #' #' Compute the \strong{Probability of Direction} (\strong{\emph{pd}}, also known as the Maximum Probability of Effect - \emph{MPE}). It varies between 50\% and 100\% (\emph{i.e.}, \code{0.5} and \code{1}) and can be interpreted as the probability (expressed in percentage) that a parameter (described by its posterior distribution) is strictly positive or negative (whichever is the most probable). It is mathematically defined as the proportion of the posterior distribution that is of the median's sign. Altough differently expressed, this index is fairly similar (\emph{i.e.}, is strongly correlated) to the frequentist \strong{p-value}. #' #' @param x Vector representing a posterior distribution. Can also be a Bayesian model (\code{stanreg}, \code{brmsfit} or \code{BayesFactor}). #' @param method Can be \code{"direct"} or one of methods of \link[=estimate_density]{density estimation}, such as \code{"kernel"}, \code{"logspline"} or \code{"KernSmooth"}. If \code{"direct"} (default), the computation is based on the raw ratio of samples superior and inferior to 0. Else, the result is based on the \link[=auc]{Area under the Curve (AUC)} of the estimated \link[=estimate_density]{density} function. #' @inheritParams hdi #' #' @details #' \subsection{What is the \emph{pd}?}{ #' The Probability of Direction (pd) is an index of effect existence, ranging from 50\% to 100\%, representing the certainty with which an effect goes in a particular direction (\emph{i.e.}, is positive or negative). Beyond its simplicity of interpretation, understanding and computation, this index also presents other interesting properties: #' \itemize{ #' \item It is independent from the model: It is solely based on the posterior distributions and does not require any additional information from the data or the model. #' \item It is robust to the scale of both the response variable and the predictors. #' \item It is strongly correlated with the frequentist p-value, and can thus be used to draw parallels and give some reference to readers non-familiar with Bayesian statistics. #' } #' } #' \subsection{Relationship with the p-value}{ #' In most cases, it seems that the \emph{pd} has a direct correspondance with the frequentist one-sided \emph{p}-value through the formula \ifelse{html}{\out{pone sided = 1 - p(d)/100}}{\eqn{p_{one sided}=1-\frac{p_{d}}{100}}} and to the two-sided p-value (the most commonly reported one) through the formula \ifelse{html}{\out{ptwo sided = 2 * (1 - p(d)/100)}}{\eqn{p_{two sided}=2*(1-\frac{p_{d}}{100})}}. Thus, a two-sided p-value of respectively \code{.1}, \code{.05}, \code{.01} and \code{.001} would correspond approximately to a \emph{pd} of 95\%, 97.5\%, 99.5\% and 99.95\%. #' } #' \subsection{Methods of computation}{ #' The most simple and direct way to compute the \emph{pd} is to 1) look at the median's sign, 2) select the portion of the posterior of the same sign and 3) compute the percentage that this portion represents. This "simple" method is the most straigtfoward, but its precision is directly tied to the number of posterior draws. The second approach relies on \link[=estimate_density]{density estimation}. It starts by estimating the density function (for which many methods are available), and then computing the \link[=area_under_curve]{area under the curve} (AUC) of the density curve on the other side of 0. #' } #' \subsection{Strengths and Limitations}{ #' \strong{Strengths:} Straightforward computation and interpretation. Objective property of the posterior distribution. 1:1 correspondence with the frequentist p-value. #' \cr \cr #' \strong{Limitations:} Limited information favoring the null hypothesis. #' } #' #' @return Values between 0.5 and 1 corresponding to the probability of direction (pd). #' #' @references Makowski D, Ben-Shachar MS, Chen SHA, Lüdecke D (2019) Indices of Effect Existence and Significance in the Bayesian Framework. Frontiers in Psychology 2019;10:2767. \doi{10.3389/fpsyg.2019.02767} #' #' @examples #' library(bayestestR) #' #' # Simulate a posterior distribution of mean 1 and SD 1 #' # ---------------------------------------------------- #' posterior <- rnorm(1000, mean = 1, sd = 1) #' p_direction(posterior) #' p_direction(posterior, method = "kernel") #' #' # Simulate a dataframe of posterior distributions #' # ----------------------------------------------- #' df <- data.frame(replicate(4, rnorm(100))) #' p_direction(df) #' p_direction(df, method = "kernel") #' #' \dontrun{ #' # rstanarm models #' # ----------------------------------------------- #' if (require("rstanarm")) { #' model <- rstanarm::stan_glm(mpg ~ wt + cyl, #' data = mtcars, #' chains = 2, refresh = 0 #' ) #' p_direction(model) #' p_direction(model, method = "kernel") #' } #' #' # emmeans #' # ----------------------------------------------- #' if (require("emmeans")) { #' p_direction(emtrends(model, ~1, "wt")) #' } #' #' # brms models #' # ----------------------------------------------- #' if (require("brms")) { #' model <- brms::brm(mpg ~ wt + cyl, data = mtcars) #' p_direction(model) #' p_direction(model, method = "kernel") #' } #' #' # BayesFactor objects #' # ----------------------------------------------- #' if (require("BayesFactor")) { #' bf <- ttestBF(x = rnorm(100, 1, 1)) #' p_direction(bf) #' p_direction(bf, method = "kernel") #' } #' } #' @export p_direction <- function(x, ...) { UseMethod("p_direction") } #' @rdname p_direction #' @export pd <- p_direction #' @importFrom stats density #' @rdname p_direction #' @export p_direction.numeric <- function(x, method = "direct", ...) { if (method == "direct") { pdir <- max( c( length(x[x > 0]) / length(x), # pd positive length(x[x < 0]) / length(x) # pd negative ) ) } else { dens <- estimate_density(x, method = method, precision = 2^10, extend = TRUE, ...) if (length(x[x > 0]) > length(x[x < 0])) { dens <- dens[dens$x > 0, ] } else { dens <- dens[dens$x < 0, ] } pdir <- area_under_curve(dens$x, dens$y, method = "spline") if (pdir >= 1) pdir <- 1 # Enforce bounds } attr(pdir, "method") <- method attr(pdir, "data") <- x class(pdir) <- unique(c("p_direction", "see_p_direction", class(pdir))) pdir } #' @rdname p_direction #' @export p_direction.data.frame <- function(x, method = "direct", ...) { x <- .select_nums(x) if (ncol(x) == 1) { pd <- p_direction(x[, 1], method = method, ...) } else { pd <- sapply(x, p_direction, method = method, simplify = TRUE, ...) } out <- data.frame( "Parameter" = names(x), "pd" = pd, row.names = NULL, stringsAsFactors = FALSE ) attr(out, "object_name") <- .safe_deparse(substitute(x)) class(out) <- unique(c("p_direction", "see_p_direction", class(out))) out } #' @rdname p_direction #' @export p_direction.MCMCglmm <- function(x, method = "direct", ...) { nF <- x$Fixed$nfl out <- p_direction(as.data.frame(x$Sol[, 1:nF, drop = FALSE]), method = method, ...) attr(out, "object_name") <- .safe_deparse(substitute(x)) out } #' @export p_direction.mcmc <- function(x, method = "direct", ...) { p_direction(as.data.frame(x), method = method, ...) } #' @rdname p_direction #' @export p_direction.emmGrid <- function(x, method = "direct", ...) { if (!requireNamespace("emmeans")) { stop("Package 'emmeans' required for this function to work. Please install it by running `install.packages('emmeans')`.") } xdf <- as.data.frame(as.matrix(emmeans::as.mcmc.emmGrid(x, names = FALSE))) out <- p_direction(xdf, method = method, ...) attr(out, "object_name") <- deparse(substitute(x), width.cutoff = 500) out } #' @importFrom insight get_parameters #' @keywords internal .p_direction_models <- function(x, effects, component, parameters, method = "direct", ...) { p_direction(insight::get_parameters(x, effects = effects, component = component, parameters = parameters), method = method, ...) } #' @export p_direction.sim.merMod <- function(x, effects = c("fixed", "random", "all"), parameters = NULL, method = "direct", ...) { effects <- match.arg(effects) out <- .p_direction_models( x = x, effects = effects, component = "conditional", parameters = parameters, method = method, ... ) attr(out, "data") <- insight::get_parameters(x, effects = effects, parameters = parameters) out } #' @export p_direction.sim <- function(x, parameters = NULL, method = "direct", ...) { out <- .p_direction_models( x = x, effects = "fixed", component = "conditional", parameters = parameters, method = method, ... ) attr(out, "data") <- insight::get_parameters(x, parameters = parameters) out } #' @rdname p_direction #' @export p_direction.stanreg <- function(x, effects = c("fixed", "random", "all"), parameters = NULL, method = "direct", ...) { effects <- match.arg(effects) out <- .prepare_output( p_direction(insight::get_parameters(x, effects = effects, parameters = parameters), method = method, ...), insight::clean_parameters(x) ) class(out) <- unique(c("p_direction", "see_p_direction", class(out))) attr(out, "object_name") <- .safe_deparse(substitute(x)) out } #' @rdname p_direction #' @export p_direction.brmsfit <- function(x, effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, method = "direct", ...) { effects <- match.arg(effects) component <- match.arg(component) out <- .prepare_output( p_direction(insight::get_parameters(x, effects = effects, component = component, parameters = parameters), method = method, ...), insight::clean_parameters(x) ) class(out) <- unique(c("p_direction", "see_p_direction", class(out))) attr(out, "object_name") <- .safe_deparse(substitute(x)) out } #' @rdname p_direction #' @export p_direction.BFBayesFactor <- function(x, method = "direct", ...) { out <- p_direction(insight::get_parameters(x), method = method, ...) attr(out, "object_name") <- .safe_deparse(substitute(x)) out } #' Convert to Numeric #' #' @inheritParams base::as.numeric #' @method as.numeric p_direction #' @export as.numeric.p_direction <- function(x, ...) { if ("data.frame" %in% class(x)) { return(as.numeric(as.vector(x$pd))) } else { return(as.vector(x)) } } #' @method as.double p_direction #' @export as.double.p_direction <- as.numeric.p_direction bayestestR/R/si.R0000644000176200001440000002170513610032401013331 0ustar liggesusers#' Compute Support Intervals #' #' A support interval contains only the values of the parameter that predict the observed data better #' than average, by some degree \emph{k}; these are values of the parameter that are associated with an #' updating factor greater or equal than \emph{k}. From the perspective of the Savage-Dickey Bayes factor, testing #' against a point null hypothesis for any value within the support interval will yield a Bayes factor smaller #' than \emph{1/k}. #' \cr \cr #' \strong{For more info, in particular on specifying correct priors for factors with more than 2 levels, #' see \href{https://easystats.github.io/bayestestR/articles/bayes_factors.html}{the Bayes factors vignette}.} #' #' @param posterior A numerical vector, \code{stanreg} / \code{brmsfit} object, \code{emmGrid} #' or a data frame - representing a posterior distribution(s) from (see 'Details'). #' @param prior An object representing a prior distribution (see 'Details'). #' @param BF The amount of support required to be included in the support interval. #' @param ... Arguments passed to and from other methods. #' @inheritParams hdi #' #' @details This method is used to compute support intervals based on prior and posterior distributions. #' For the computation of support intervals, the model priors must be proper priors (at the very least #' they should be \emph{not flat}, and it is preferable that they be \emph{informative} - note #' that by default, \code{brms::brm()} uses flat priors for fixed-effects; see example below). #' #' \subsection{Setting the correct \code{prior}}{ #' It is important to provide the correct \code{prior} for meaningful results. #' \itemize{ #' \item When \code{posterior} is a numerical vector, \code{prior} should also be a numerical vector. #' \item When \code{posterior} is a \code{data.frame}, \code{prior} should also be a \code{data.frame}, with matching column order. #' \item When \code{posterior} is a \code{stanreg} or \code{brmsfit} model: \itemize{ #' \item \code{prior} can be set to \code{NULL}, in which case prior samples are drawn internally. #' \item \code{prior} can also be a model equvilant to \code{posterior} but with samples from the priors \emph{only}. #' } #' \item When \code{posterior} is an \code{emmGrid} object: \itemize{ #' \item \code{prior} should be the \code{stanreg} or \code{brmsfit} model used to create the \code{emmGrid} objects. #' \item \code{prior} can also be an \code{emmGrid} object equvilant to \code{posterior} but created with a model of priors samples \emph{only}. #' } #' }} #' #' \subsection{Choosing a value of \code{BF}}{ #' The choice of \code{BF} (the level of support) depends on what we want our interval to represent: #' \itemize{ #' \item A \code{BF} = 1 contains values whose credibility is not decreased by observing the data. #' \item A \code{BF} > 1 contains values who recived more impressive support from the data. #' \item A \code{BF} < 1 contains values whose credibility has \emph{not} been impressively decreased by observing the data. #' Testing against values outside this interval will produce a Bayes factor larger than 1/\code{BF} in support of #' the alternative. E.g., if an SI (BF = 1/3) excludes 0, the Bayes factor against the point-null will be larger than 3. #' } #' } #' #' @return #' A data frame containing the lower and upper bounds of the SI. #' \cr #' Note that if the level of requested support is higher than observed in the data, the #' interval will be \code{[NA,NA]}. #' #' @examples #' library(bayestestR) #' #' prior <- distribution_normal(1000, mean = 0, sd = 1) #' posterior <- distribution_normal(1000, mean = .5, sd = .3) #' #' si(posterior, prior) #' \dontrun{ #' # rstanarm models #' # --------------- #' library(rstanarm) #' contrasts(sleep$group) <- contr.bayes # see vingette #' stan_model <- stan_lmer(extra ~ group + (1 | ID), data = sleep) #' si(stan_model) #' si(stan_model, BF = 3) #' #' # emmGrid objects #' # --------------- #' library(emmeans) #' group_diff <- pairs(emmeans(stan_model, ~group)) #' si(group_diff, prior = stan_model) #' #' # brms models #' # ----------- #' library(brms) #' contrasts(sleep$group) <- contr.bayes # see vingette #' my_custom_priors <- #' set_prior("student_t(3, 0, 1)", class = "b") + #' set_prior("student_t(3, 0, 1)", class = "sd", group = "ID") #' #' brms_model <- brm(extra ~ group + (1 | ID), #' data = sleep, #' prior = my_custom_priors #' ) #' si(brms_model) #' } #' @references #' Wagenmakers, E., Gronau, Q. F., Dablander, F., & Etz, A. (2018, November 22). The Support Interval. \doi{10.31234/osf.io/zwnxb} #' #' @export si <- function(posterior, prior = NULL, BF = 1, verbose = TRUE, ...) { UseMethod("si") } #' @rdname si #' @export si.numeric <- function(posterior, prior = NULL, BF = 1, verbose = TRUE, ...) { if (is.null(prior)) { prior <- posterior if (verbose) { warning( "Prior not specified! ", "Please specify a prior (in the form 'prior = distribution_normal(1000, 0, 1)')", " to get meaningful results." ) } } prior <- data.frame(X = prior) posterior <- data.frame(X = posterior) # Get SIs out <- si.data.frame( posterior = posterior, prior = prior, BF = BF, verbose = verbose, ... ) out$Parameter <- NULL out } #' @importFrom insight clean_parameters #' @rdname si #' @export si.stanreg <- function(posterior, prior = NULL, BF = 1, verbose = TRUE, effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, ...) { cleaned_parameters <- insight::clean_parameters(posterior) effects <- match.arg(effects) component <- match.arg(component) samps <- .clean_priors_and_posteriors(posterior, prior, verbose = verbose, effects = effects, component = component, parameters = parameters) # Get SIs temp <- si.data.frame( posterior = samps$posterior, prior = samps$prior, BF = BF, verbose = verbose, ... ) out <- .prepare_output(temp, cleaned_parameters) attr(out, "object_name") <- .safe_deparse(substitute(posterior)) class(out) <- class(temp) attr(out, "plot_data") <- attr(temp, "plot_data") out } #' @rdname si #' @export si.brmsfit <- si.stanreg #' @rdname si #' @export si.emmGrid <- function(posterior, prior = NULL, BF = 1, verbose = TRUE, ...) { samps <- .clean_priors_and_posteriors(posterior, prior, verbose = verbose) # Get SIs out <- si.data.frame( posterior = samps$posterior, prior = samps$prior, BF = BF, verbose = verbose, ... ) attr(out, "object_name") <- .safe_deparse(substitute(posterior)) out } #' @rdname si #' @export si.data.frame <- function(posterior, prior = NULL, BF = 1, verbose = TRUE, ...){ if (is.null(prior)) { prior <- posterior warning( "Prior not specified! ", "Please specify priors (with column order matching 'posterior')", " to get meaningful results." ) } sis <- matrix(NA, nrow = ncol(posterior), ncol = 2) for (par in seq_along(posterior)) { sis[par, ] <- .si(posterior[[par]], prior[[par]], BF = BF, ...) } out <- data.frame( Parameter = colnames(posterior), CI = BF, CI_low = sis[,1], CI_high = sis[,2], stringsAsFactors = FALSE ) class(out) <- unique(c("bayestestR_si", "see_si", "bayestestR_ci", "see_ci", class(out))) attr(out, "plot_data") <- .make_BF_plot_data(posterior,prior,0,0)$plot_data out } .si <- function(posterior, prior, BF = 1, extend_scale = 0.05, precision = 2^8, ...) { if (!requireNamespace("logspline")) { stop("Package \"logspline\" needed for this function to work. Please install it.") } if (isTRUE(all.equal(prior, posterior))) { return(c(NA,NA)) } x <- c(prior, posterior) x_range <- range(x) x_rangex <- stats::median(x) + 7 * stats::mad(x) * c(-1, 1) x_range <- c( max(c(x_range[1], x_rangex[1])), min(c(x_range[2], x_rangex[2])) ) extension_scale <- diff(x_range) * extend_scale x_range[1] <- x_range[1] - extension_scale x_range[2] <- x_range[2] + extension_scale x_axis <- seq(x_range[1], x_range[2], length.out = precision) f_prior <- logspline::logspline(prior) f_posterior <- logspline::logspline(posterior) d_prior <- logspline::dlogspline(x_axis, f_prior) d_posterior <- logspline::dlogspline(x_axis, f_posterior) relative_d <- d_posterior / d_prior x_supported <- x_axis[relative_d >= BF] if (length(x_supported) < 2) { return(c(NA,NA)) } else { range(range(x_supported)) } }bayestestR/R/print.p_map.R0000644000176200001440000000052213536442413015156 0ustar liggesusers#' @export print.p_map <- function(x, digits = 3, ...) { if ("data_plot" %in% class(x)) { print(as.data.frame(x)) } else if ("data.frame" %in% class(x)) { insight::print_color("# MAP-based p-value\n\n", "blue") print_data_frame(x, digits = digits) } else { cat(sprintf("p (MAP) = %.*f", digits, x)) } } bayestestR/R/print.p_significance.R0000644000176200001440000000144613552545303017031 0ustar liggesusers#' @export print.p_significance <- function(x, digits = 2, ...) { if ("data_plot" %in% class(x)) { print(as.data.frame(x)) } else if ("data.frame" %in% class(x)) { .print_ps(x, digits, ...) } else { cat(sprintf( "ps [%s] = %s%%", insight::format_value(attributes(x)$threshold, digits = digits), insight::format_value(x * 100, digits = digits) )) } } #' @keywords internal .print_ps <- function(x, digits, ...) { insight::print_color(sprintf( "# Probability of Significance (ps [%s])\n\n", insight::format_value(attributes(x)$threshold, digits = digits) ), "blue") x$Parameter <- as.character(x$Parameter) x$ps <- sprintf("%s%%", insight::format_value(x$ps * 100, digits = digits)) print_data_frame(x, digits = digits) } bayestestR/R/eti.R0000644000176200001440000001244113613227664013520 0ustar liggesusers#' Equal-Tailed Interval (ETI) #' #' Compute the \strong{Equal-Tailed Interval (ETI)} of posterior distributions using the quantiles method. The probability of being below this interval is equal to the probability of being above it. The ETI can be used in the context of uncertainty characterisation of posterior distributions as \strong{Credible Interval (CI)}. #' #' @inheritParams hdi #' @inherit ci return #' @inherit hdi details #' #' @examples #' library(bayestestR) #' #' posterior <- rnorm(1000) #' eti(posterior) #' eti(posterior, ci = c(.80, .89, .95)) #' #' df <- data.frame(replicate(4, rnorm(100))) #' eti(df) #' eti(df, ci = c(.80, .89, .95)) #' \dontrun{ #' library(rstanarm) #' model <- stan_glm(mpg ~ wt + gear, data = mtcars, chains = 2, iter = 200, refresh = 0) #' eti(model) #' eti(model, ci = c(.80, .89, .95)) #' #' library(emmeans) #' eti(emtrends(model, ~1, "wt")) #' #' library(brms) #' model <- brms::brm(mpg ~ wt + cyl, data = mtcars) #' eti(model) #' eti(model, ci = c(.80, .89, .95)) #' #' library(BayesFactor) #' bf <- ttestBF(x = rnorm(100, 1, 1)) #' eti(bf) #' eti(bf, ci = c(.80, .89, .95)) #' } #' #' @export eti <- function(x, ...) { UseMethod("eti") } #' @rdname eti #' @export eti.numeric <- function(x, ci = .89, verbose = TRUE, ...) { out <- do.call(rbind, lapply(ci, function(i) { .eti(x = x, ci = i, verbose = verbose) })) class(out) <- unique(c("bayestestR_eti", "see_eti", "bayestestR_ci", "see_ci", class(out))) attr(out, "data") <- x out } #' @rdname eti #' @export eti.data.frame <- function(x, ci = .89, verbose = TRUE, ...) { dat <- .compute_interval_dataframe(x = x, ci = ci, verbose = verbose, fun = "eti") attr(dat, "object_name") <- .safe_deparse(substitute(x)) dat } #' @rdname eti #' @export eti.MCMCglmm <- function(x, ci = .89, verbose = TRUE, ...) { nF <- x$Fixed$nfl d <- as.data.frame(x$Sol[, 1:nF, drop = FALSE]) dat <- .compute_interval_dataframe(x = d, ci = ci, verbose = verbose, fun = "eti") attr(dat, "data") <- .safe_deparse(substitute(x)) dat } #' @export eti.mcmc <- function(x, ci = .89, verbose = TRUE, ...) { d <- as.data.frame(x) dat <- .compute_interval_dataframe(x = d, ci = ci, verbose = verbose, fun = "eti") attr(dat, "data") <- .safe_deparse(substitute(x)) dat } #' @rdname eti #' @export eti.sim.merMod <- function(x, ci = .89, effects = c("fixed", "random", "all"), parameters = NULL, verbose = TRUE, ...) { effects <- match.arg(effects) dat <- .compute_interval_simMerMod(x = x, ci = ci, effects = effects, parameters = parameters, verbose = verbose, fun = "eti") out <- dat$result attr(out, "data") <- dat$data out } #' @rdname eti #' @export eti.sim <- function(x, ci = .89, parameters = NULL, verbose = TRUE, ...) { dat <- .compute_interval_sim(x = x, ci = ci, parameters = parameters, verbose = verbose, fun = "eti") out <- dat$result attr(out, "data") <- dat$data out } #' @rdname eti #' @export eti.emmGrid <- function(x, ci = .89, verbose = TRUE, ...) { if (!requireNamespace("emmeans")) { stop("Package 'emmeans' required for this function to work. Please install it by running `install.packages('emmeans')`.") } xdf <- as.data.frame(as.matrix(emmeans::as.mcmc.emmGrid(x, names = FALSE))) dat <- .compute_interval_dataframe(x = xdf, ci = ci, verbose = verbose, fun = "eti") attr(dat, "object_name") <- .safe_deparse(substitute(x)) dat } #' @rdname eti #' @export eti.stanreg <- function(x, ci = .89, effects = c("fixed", "random", "all"), parameters = NULL, verbose = TRUE, ...) { effects <- match.arg(effects) out <- .prepare_output( eti(insight::get_parameters(x, effects = effects, parameters = parameters), ci = ci, verbose = verbose, ...), insight::clean_parameters(x) ) class(out) <- unique(c("bayestestR_eti", "see_eti", class(out))) attr(out, "object_name") <- .safe_deparse(substitute(x)) out } #' @rdname eti #' @export eti.brmsfit <- function(x, ci = .89, effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, verbose = TRUE, ...) { effects <- match.arg(effects) component <- match.arg(component) out <- .prepare_output( eti(insight::get_parameters(x, effects = effects, component = component, parameters = parameters), ci = ci, verbose = verbose, ...), insight::clean_parameters(x) ) class(out) <- unique(c("bayestestR_eti", "see_eti", class(out))) attr(out, "object_name") <- .safe_deparse(substitute(x)) out } #' @rdname eti #' @export eti.BFBayesFactor <- function(x, ci = .89, verbose = TRUE, ...) { out <- eti(insight::get_parameters(x), ci = ci, verbose = verbose, ...) attr(out, "object_name") <- .safe_deparse(substitute(x)) out } #' @importFrom stats quantile .eti <- function(x, ci, verbose = TRUE) { check_ci <- .check_ci_argument(x, ci, verbose) if (!is.null(check_ci)) { return(check_ci) } results <- as.vector(stats::quantile( x, probs = c((1 - ci) / 2, (1 + ci) / 2), names = FALSE )) data.frame( "CI" = ci * 100, "CI_low" = results[1], "CI_high" = results[2] ) } bayestestR/R/overlap.R0000644000176200001440000000445013552545256014412 0ustar liggesusers#' Overlap Coefficient #' #' A method to calculate the overlap coefficient between two empirical distributions (that can be used as a measure of similarity between two samples). #' #' @param x Vector of x values. #' @param y Vector of x values. #' @param method_auc Area Under the Curve (AUC) estimation method. See \code{\link{area_under_curve}}. #' @param method_density Density estimation method. See \code{\link{estimate_density}}. #' @inheritParams estimate_density #' #' @examples #' library(bayestestR) #' #' x <- distribution_normal(1000, 2, 0.5) #' y <- distribution_normal(1000, 0, 1) #' #' overlap(x, y) #' plot(overlap(x, y)) #' @importFrom stats approxfun #' @export overlap <- function(x, y, method_density = "kernel", method_auc = "trapezoid", precision = 2^10, extend = TRUE, extend_scale = 0.1, ...) { # Generate densities dx <- estimate_density(x, method = method_density, precision = precision, extend = extend, extend_scale = extend_scale, ...) dy <- estimate_density(y, method = method_density, precision = precision, extend = extend, extend_scale = extend_scale, ...) # Create density estimation functions fx <- approxfun(dx$x, dx$y, method = "linear", rule = 2) fy <- approxfun(dy$x, dy$y, method = "linear", rule = 2) x_axis <- seq(min(c(dx$x, dy$x)), max(c(dx$x, dy$x)), length.out = precision) data <- data.frame(x = x_axis, y1 = fx(x_axis), y2 = fy(x_axis)) # calculate intersection densities data$intersection <- pmin(data$y1, data$y2) data$exclusion <- pmax(data$y1, data$y2) # integrate areas under curves area_intersection <- area_under_curve(data$x, data$intersection, method = method_auc) # area_exclusion <- area_under_curve(data$x, data$exclusion, method = method_auc) # compute overlap coefficient overlap <- area_intersection attr(overlap, "data") <- data class(overlap) <- c("overlap", class(overlap)) overlap } #' @export print.overlap <- function(x, ...) { insight::print_color("# Overlap\n\n", "blue") cat(sprintf("%.2f", as.numeric(x))) } #' @importFrom graphics plot polygon #' @export plot.overlap <- function(x, ...) { # Can be improved through see data <- attributes(x)$data plot(data$x, data$exclusion, type = "l") polygon(data$x, data$intersection, col = "red") } bayestestR/R/equivalence_test.R0000644000176200001440000002631413603574330016275 0ustar liggesusers#' Test for Practical Equivalence #' #' Perform a \strong{Test for Practical Equivalence} for Bayesian and frequentist models. #' #' Documentation is accessible for: #' \itemize{ #' \item \href{https://easystats.github.io/bayestestR/reference/equivalence_test.html}{Bayesian models} #' \item \href{https://easystats.github.io/parameters/reference/equivalence_test.lm.html}{Frequentist models} #' } #' #' For Bayesian models, the \strong{Test for Practical Equivalence} is based on the \emph{"HDI+ROPE decision rule"} (\cite{Kruschke, 2014, 2018}) to check whether parameter values should be accepted or rejected against an explicitly formulated "null hypothesis" (i.e., a ROPE). In other words, it checks the percentage of the 89\% \link[=hdi]{HDI} that is the null region (the ROPE). If this percentage is sufficiently low, the null hypothesis is rejected. If this percentage is sufficiently high, the null hypothesis is accepted. #' #' #' @inheritParams rope #' #' @details Using the \link[=rope]{ROPE} and the \link[=hdi]{HDI}, \cite{Kruschke (2018)} #' suggests using the percentage of the 95\% (or 89\%, considered more stable) #' HDI that falls within the ROPE as a decision rule. If the HDI #' is completely outside the ROPE, the "null hypothesis" for this parameter is #' "rejected". If the ROPE completely covers the HDI, i.e., all most credible #' values of a parameter are inside the region of practical equivalence, the #' null hypothesis is accepted. Else, it’s undecided whether to accept or #' reject the null hypothesis. If the full ROPE is used (i.e., 100\% of the #' HDI), then the null hypothesis is rejected or accepted if the percentage #' of the posterior within the ROPE is smaller than to 2.5\% or greater than #' 97.5\%. Desirable results are low proportions inside the ROPE (the closer #' to zero the better). #' \cr \cr #' Some attention is required for finding suitable values for the ROPE limits #' (argument \code{range}). See 'Details' in \code{\link[=rope_range]{rope_range()}} #' for further information. #' \cr \cr #' \strong{Multicollinearity: Non-independent covariates} #' \cr \cr #' When parameters show strong correlations, i.e. when covariates are not #' independent, the joint parameter distributions may shift towards or #' away from the ROPE. In such cases, the test for practical equivalence may #' have inappropriate results. Collinearity invalidates ROPE and hypothesis #' testing based on univariate marginals, as the probabilities are conditional #' on independence. Most problematic are the results of the "undecided" #' parameters, which may either move further towards "rejection" or away #' from it (\cite{Kruschke 2014, 340f}). #' \cr \cr #' \code{equivalence_test()} performs a simple check for pairwise correlations #' between parameters, but as there can be collinearity between more than two variables, #' a first step to check the assumptions of this hypothesis testing is to look #' at different pair plots. An even more sophisticated check is the projection #' predictive variable selection (\cite{Piironen and Vehtari 2017}). #' #' #' @references \itemize{ #' \item Kruschke, J. K. (2018). Rejecting or accepting parameter values in Bayesian estimation. Advances in Methods and Practices in Psychological Science, 1(2), 270-280. \doi{10.1177/2515245918771304} #' \item Kruschke, J. (2014). Doing Bayesian data analysis: A tutorial with R, JAGS, and Stan. Academic Press #' \item Piironen, J., & Vehtari, A. (2017). Comparison of Bayesian predictive methods for model selection. Statistics and Computing, 27(3), 711–735. \doi{10.1007/s11222-016-9649-y} #' } #' #' @return A data frame with following columns: #' \itemize{ #' \item \code{Parameter} The model parameter(s), if \code{x} is a model-object. If \code{x} is a vector, this column is missing. #' \item \code{CI} The probability of the HDI. #' \item \code{ROPE_low}, \code{ROPE_high} The limits of the ROPE. These values are identical for all parameters. #' \item \code{ROPE_Percentage} The proportion of the HDI that lies inside the ROPE. #' \item \code{ROPE_Equivalence} The "test result", as character. Either "rejected", "accepted" or "undecided". #' \item \code{HDI_low} , \code{HDI_high} The lower and upper HDI limits for the parameters. #' } #' #' @note There is a \code{print()}-method with a \code{digits}-argument to control #' the amount of digits in the output, and there is a \code{plot()}-method #' to visualize the results from the equivalence-test (for models only). #' #' @examples #' library(bayestestR) #' #' equivalence_test(x = rnorm(1000, 0, 0.01), range = c(-0.1, 0.1)) #' equivalence_test(x = rnorm(1000, 0, 1), range = c(-0.1, 0.1)) #' equivalence_test(x = rnorm(1000, 1, 0.01), range = c(-0.1, 0.1)) #' equivalence_test(x = rnorm(1000, 1, 1), ci = c(.50, .99)) #' #' # print more digits #' test <- equivalence_test(x = rnorm(1000, 1, 1), ci = c(.50, .99)) #' print(test, digits = 4) #' \dontrun{ #' library(rstanarm) #' model <- rstanarm::stan_glm(mpg ~ wt + cyl, data = mtcars) #' equivalence_test(model) #' equivalence_test(model, ci = c(.50, 1)) #' #' # plot result #' test <- equivalence_test(model) #' plot(test) #' #' library(emmeans) #' equivalence_test(emtrends(model, ~1, "wt")) #' #' library(brms) #' model <- brms::brm(mpg ~ wt + cyl, data = mtcars) #' equivalence_test(model) #' equivalence_test(model, ci = c(.50, .99)) #' #' ibrary(BayesFactor) #' bf <- ttestBF(x = rnorm(100, 1, 1)) #' equivalence_test(bf) #' equivalence_test(bf, ci = c(.50, .99)) #' } #' #' @importFrom insight print_color #' @export equivalence_test <- function(x, ...) { UseMethod("equivalence_test") } #' @rdname equivalence_test #' @export equivalence_test.default <- function(x, ...) { NULL } #' @rdname equivalence_test #' @export equivalence_test.numeric <- function(x, range = "default", ci = .89, verbose = TRUE, ...) { rope_data <- rope(x, range = range, ci = ci) out <- as.data.frame(rope_data) if (all(ci < 1)) { out$ROPE_Equivalence <- ifelse(out$ROPE_Percentage == 0, "Rejected", ifelse(out$ROPE_Percentage == 1, "Accepted", "Undecided") ) } else { # Related to guidelines for full rope (https://easystats.github.io/bayestestR/articles/4_Guidelines.html) out$ROPE_Equivalence <- ifelse(out$ROPE_Percentage < 0.025, "Rejected", ifelse(out$ROPE_Percentage > 0.975, "Accepted", "Undecided") ) } out$HDI_low <- attr(rope_data, "HDI_area", exact = TRUE)$CI_low out$HDI_high <- attr(rope_data, "HDI_area", exact = TRUE)$CI_high # remove attribute attr(out, "HDI_area") <- NULL attr(out, "data") <- x class(out) <- unique(c("equivalence_test", "see_equivalence_test", class(out))) out } #' @rdname equivalence_test #' @export equivalence_test.data.frame <- function(x, range = "default", ci = .89, verbose = TRUE, ...) { l <- .compact_list(lapply( x, equivalence_test, range = range, ci = ci, verbose = verbose )) dat <- do.call(rbind, l) out <- data.frame( Parameter = rep(names(l), each = nrow(dat) / length(l)), dat, stringsAsFactors = FALSE ) row.names(out) <- NULL attr(out, "object_name") <- .safe_deparse(substitute(x)) class(out) <- unique(c("equivalence_test", "see_equivalence_test", class(out))) out } #' @rdname equivalence_test #' @export equivalence_test.emmGrid <- function(x, range = "default", ci = .89, verbose = TRUE, ...) { if (!requireNamespace("emmeans")) { stop("Package 'emmeans' required for this function to work. Please install it by running `install.packages('emmeans')`.") } xdf <- as.data.frame(as.matrix(emmeans::as.mcmc.emmGrid(x, names = FALSE))) out <- equivalence_test(xdf, range = range, ci = ci, verbose = verbose, ...) attr(out, "object_name") <- .safe_deparse(substitute(x)) out } #' @rdname equivalence_test #' @export equivalence_test.BFBayesFactor <- function(x, range = "default", ci = .89, verbose = TRUE, ...) { out <- equivalence_test(insight::get_parameters(x), range = range, ci = ci, verbose = verbose, ...) attr(out, "object_name") <- .safe_deparse(substitute(x)) out } #' @importFrom stats sd #' @keywords internal .equivalence_test_models <- function(x, range = "default", ci = .89, effects = "fixed", component = "conditional", parameters = NULL, verbose = TRUE) { if (all(range == "default")) { range <- rope_range(x) } else if (!all(is.numeric(range)) || length(range) != 2) { stop("`range` should be 'default' or a vector of 2 numeric values (e.g., c(-0.1, 0.1)).") } if (verbose) .check_multicollinearity(x) params <- insight::get_parameters(x, component = component, effects = effects, parameters = parameters) l <- sapply( params, equivalence_test, range = range, ci = ci, verbose = verbose, simplify = FALSE ) dat <- do.call(rbind, l) out <- data.frame( Parameter = rep(names(l), each = nrow(dat) / length(l)), dat, stringsAsFactors = FALSE ) class(out) <- unique(c("equivalence_test", "see_equivalence_test", class(out))) out } #' @rdname equivalence_test #' @export equivalence_test.stanreg <- function(x, range = "default", ci = .89, effects = c("fixed", "random", "all"), parameters = NULL, verbose = TRUE, ...) { effects <- match.arg(effects) out <- .equivalence_test_models(x, range, ci, effects, component = "conditional", parameters, verbose) out <- merge(out, insight::clean_parameters(x)[, c("Parameter", "Effects", "Cleaned_Parameter")], by = "Parameter", sort = FALSE) class(out) <- unique(c("equivalence_test", "see_equivalence_test", class(out))) attr(out, "object_name") <- .safe_deparse(substitute(x)) out } #' @rdname equivalence_test #' @export equivalence_test.brmsfit <- function(x, range = "default", ci = .89, effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, verbose = TRUE, ...) { effects <- match.arg(effects) component <- match.arg(component) out <- .equivalence_test_models(x, range, ci, effects, component, parameters, verbose) out <- merge(out, insight::clean_parameters(x)[, c("Parameter", "Effects", "Component", "Cleaned_Parameter")], by = "Parameter", sort = FALSE) class(out) <- unique(c("equivalence_test", "see_equivalence_test", class(out))) attr(out, "object_name") <- .safe_deparse(substitute(x)) out } #' @export equivalence_test.sim.merMod <- function(x, range = "default", ci = .89, parameters = NULL, verbose = TRUE, ...) { out <- .equivalence_test_models(x, range, ci, effects = "fixed", component = "conditional", parameters, verbose = FALSE) attr(out, "object_name") <- .safe_deparse(substitute(x)) out } #' @export equivalence_test.sim <- equivalence_test.sim.merMod #' @export equivalence_test.mcmc <- function(x, range = "default", ci = .89, parameters = NULL, verbose = TRUE, ...) { out <- .equivalence_test_models(as.data.frame(x), range, ci, effects = "fixed", component = "conditional", parameters, verbose = FALSE) attr(out, "object_name") <- .safe_deparse(substitute(x)) out } bayestestR/R/hdi.R0000644000176200001440000002323113613227664013502 0ustar liggesusers#' Highest Density Interval (HDI) #' #' Compute the \strong{Highest Density Interval (HDI)} of posterior distributions. All points within this interval have a higher probability density than points outside the interval. The HDI can be used in the context of uncertainty characterisation of posterior distributions as \strong{Credible Interval (CI)}. #' #' @param x Vector representing a posterior distribution. Can also be a #' \code{stanreg}, \code{brmsfit} or a \code{BayesFactor} model. #' @param ci Value or vector of probability of the (credible) interval - CI (between 0 and 1) #' to be estimated. Default to \code{.89} (89\%). #' @param effects Should results for fixed effects, random effects or both be returned? #' Only applies to mixed models. May be abbreviated. #' @param component Should results for all parameters, parameters for the conditional model #' or the zero-inflated part of the model be returned? May be abbreviated. Only #' applies to \pkg{brms}-models. #' @param parameters Regular expression pattern that describes the parameters that #' should be returned. Meta-parameters (like \code{lp__} or \code{prior_}) are #' filtered by default, so only parameters that typically appear in the #' \code{summary()} are returned. Use \code{parameters} to select specific parameters #' for the output. #' @param verbose Toggle off warnings. #' @param ... Currently not used. #' #' #' #' @details Unlike equal-tailed intervals (see \code{eti()}) that typically exclude 2.5\% #' from each tail of the distribution and always include the median, the HDI is #' \emph{not} equal-tailed and therefore always includes the mode(s) of posterior #' distributions. #' \cr \cr #' By default, \code{hdi()} and \code{eti()} return the 89\% intervals (\code{ci = 0.89}), #' deemed to be more stable than, for instance, 95\% intervals (\cite{Kruschke, 2014}). #' An effective sample size of at least 10.000 is recommended if 95\% intervals #' should be computed (\cite{Kruschke, 2014, p. 183ff}). Moreover, 89 indicates #' the arbitrariness of interval limits - its only remarkable property is being #' the highest prime number that does not exceed the already unstable 95\% #' threshold (\cite{McElreath, 2015}). #' \cr \cr #' A 90\% equal-tailed interval (ETI) has 5\% of the distribution on either #' side of its limits. It indicates the 5th percentile and the 95h percentile. #' In symmetric distributions, the two methods of computing credible intervals, #' the ETI and the \link[=hdi]{HDI}, return similar results. #' \cr \cr #' This is not the case for skewed distributions. Indeed, it is possible that #' parameter values in the ETI have lower credibility (are less probable) than #' parameter values outside the ETI. This property seems undesirable as a summary #' of the credible values in a distribution. #' \cr \cr #' On the other hand, the ETI range does change when transformations are applied #' to the distribution (for instance, for a log odds scale to probabilities): #' the lower and higher bounds of the transformed distribution will correspond #' to the transformed lower and higher bounds of the original distribution. #' On the contrary, applying transformations to the distribution will change #' the resulting HDI. #' #' @inherit ci return #' #' @examples #' library(bayestestR) #' #' posterior <- rnorm(1000) #' hdi(posterior, ci = .89) #' hdi(posterior, ci = c(.80, .90, .95)) #' #' df <- data.frame(replicate(4, rnorm(100))) #' hdi(df) #' hdi(df, ci = c(.80, .90, .95)) #' #' \dontrun{ #' library(rstanarm) #' model <- stan_glm(mpg ~ wt + gear, data = mtcars, chains = 2, iter = 200, refresh = 0) #' hdi(model) #' hdi(model, ci = c(.80, .90, .95)) #' #' library(emmeans) #' hdi(emtrends(model, ~1, "wt")) #' #' library(brms) #' model <- brms::brm(mpg ~ wt + cyl, data = mtcars) #' hdi(model) #' hdi(model, ci = c(.80, .90, .95)) #' #' library(BayesFactor) #' bf <- ttestBF(x = rnorm(100, 1, 1)) #' hdi(bf) #' hdi(bf, ci = c(.80, .90, .95)) #' } #' #' @author Credits go to \href{https://rdrr.io/cran/ggdistribute/src/R/stats.R}{ggdistribute} and \href{https://github.com/mikemeredith/HDInterval}{HDInterval}. #' #' @references \itemize{ #' \item Kruschke, J. (2014). Doing Bayesian data analysis: A tutorial with R, JAGS, and Stan. Academic Press. #' \item McElreath, R. (2015). Statistical rethinking: A Bayesian course with examples in R and Stan. Chapman and Hall/CRC. #' } #' #' @export hdi <- function(x, ...) { UseMethod("hdi") } #' @rdname hdi #' @export hdi.numeric <- function(x, ci = .89, verbose = TRUE, ...) { out <- do.call(rbind, lapply(ci, function(i) { .hdi(x, ci = i, verbose = verbose) })) class(out) <- unique(c("bayestestR_hdi", "see_hdi", "bayestestR_ci", "see_ci", class(out))) attr(out, "data") <- x out } #' @rdname hdi #' @export hdi.data.frame <- function(x, ci = .89, verbose = TRUE, ...) { dat <- .compute_interval_dataframe(x = x, ci = ci, verbose = verbose, fun = "hdi") attr(dat, "object_name") <- .safe_deparse(substitute(x)) dat } #' @rdname hdi #' @export hdi.MCMCglmm <- function(x, ci = .89, verbose = TRUE, ...) { nF <- x$Fixed$nfl d <- as.data.frame(x$Sol[, 1:nF, drop = FALSE]) dat <- .compute_interval_dataframe(x = d, ci = ci, verbose = verbose, fun = "hdi") attr(dat, "data") <- deparse(substitute(x), width.cutoff = 500) dat } #' @export hdi.mcmc <- function(x, ci = .89, verbose = TRUE, ...) { d <- as.data.frame(x) dat <- .compute_interval_dataframe(x = d, ci = ci, verbose = verbose, fun = "hdi") attr(dat, "data") <- .safe_deparse(substitute(x)) dat } #' @rdname hdi #' @export hdi.sim.merMod <- function(x, ci = .89, effects = c("fixed", "random", "all"), parameters = NULL, verbose = TRUE, ...) { effects <- match.arg(effects) dat <- .compute_interval_simMerMod(x = x, ci = ci, effects = effects, parameters = parameters, verbose = verbose, fun = "hdi") out <- dat$result attr(out, "data") <- dat$data out } #' @rdname hdi #' @export hdi.sim <- function(x, ci = .89, parameters = NULL, verbose = TRUE, ...) { dat <- .compute_interval_sim(x = x, ci = ci, parameters = parameters, verbose = verbose, fun = "hdi") out <- dat$result attr(out, "data") <- dat$data out } #' @rdname hdi #' @export hdi.emmGrid <- function(x, ci = .89, verbose = TRUE, ...) { if (!requireNamespace("emmeans")) { stop("Package 'emmeans' required for this function to work. Please install it by running `install.packages('emmeans')`.") } xdf <- as.data.frame(as.matrix(emmeans::as.mcmc.emmGrid(x, names = FALSE))) out <- hdi(xdf, ci = ci, verbose = verbose, ...) attr(out, "object_name") <- .safe_deparse(substitute(x)) out } #' @importFrom insight get_parameters #' @rdname hdi #' @export hdi.stanreg <- function(x, ci = .89, effects = c("fixed", "random", "all"), parameters = NULL, verbose = TRUE, ...) { effects <- match.arg(effects) out <- .prepare_output( hdi(insight::get_parameters(x, effects = effects, parameters = parameters), ci = ci, verbose = verbose, ...), insight::clean_parameters(x) ) attr(out, "object_name") <- .safe_deparse(substitute(x)) class(out) <- unique(c("bayestestR_hdi", "see_hdi", class(out))) out } #' @rdname hdi #' @export hdi.brmsfit <- function(x, ci = .89, effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, verbose = TRUE, ...) { effects <- match.arg(effects) component <- match.arg(component) out <- .prepare_output( hdi(insight::get_parameters(x, effects = effects, component = component, parameters = parameters), ci = ci, verbose = verbose, ...), insight::clean_parameters(x) ) attr(out, "object_name") <- .safe_deparse(substitute(x)) class(out) <- unique(c("bayestestR_hdi", "see_hdi", class(out))) out } #' @rdname hdi #' @export hdi.BFBayesFactor <- function(x, ci = .89, verbose = TRUE, ...) { out <- hdi(insight::get_parameters(x), ci = ci, verbose = verbose, ...) attr(out, "object_name") <- .safe_deparse(substitute(x)) out } #' @keywords internal .hdi <- function(x, ci = .89, verbose = TRUE) { check_ci <- .check_ci_argument(x, ci, verbose) if (!is.null(check_ci)) { return(check_ci) } x_sorted <- unname(sort.int(x, method = "quick")) # removes NA/NaN, but not Inf window_size <- ceiling(ci * length(x_sorted)) # See https://github.com/easystats/bayestestR/issues/39 if (window_size < 2) { if (verbose) { warning("`ci` is too small or x does not contain enough data points, returning NAs.") } return(data.frame( "CI" = ci * 100, "CI_low" = NA, "CI_high" = NA )) } nCIs <- length(x_sorted) - window_size if (nCIs < 1) { if (verbose) { warning("`ci` is too large or x does not contain enough data points, returning NAs.") } return(data.frame( "CI" = ci * 100, "CI_low" = NA, "CI_high" = NA )) } ci.width <- sapply(1:nCIs, function(.x) x_sorted[.x + window_size] - x_sorted[.x]) # find minimum of width differences, check for multiple minima min_i <- which(ci.width == min(ci.width)) n_candies <- length(min_i) if (n_candies > 1) { if (any(diff(sort(min_i)) != 1)) { if (verbose) { warning("Identical densities found along different segments of the distribution, choosing rightmost.", call. = FALSE) } min_i <- max(min_i) } else { min_i <- floor(mean(min_i)) } } data.frame( "CI" = ci * 100, "CI_low" = x_sorted[min_i], "CI_high" = x_sorted[min_i + window_size] ) } bayestestR/R/print.describe_posterior.R0000644000176200001440000000101213607554753017755 0ustar liggesusers#' @importFrom insight format_table #' @export print.describe_posterior <- function(x, digits = 3, ...) { if ("data_plot" %in% class(x)) { cat(insight::format_table(as.data.frame(x), digits = digits)) } else { insight::print_color("# Description of Posterior Distributions\n\n", "blue") if (!is.null(attributes(x)$ci_method) && tolower(attributes(x)$ci_method) == "si") { cn <- gsub("^CI", "SI", colnames(x)) colnames(x) <- cn } print_data_frame(x, digits = digits) } } bayestestR/R/plot.R0000644000176200001440000000566613603574000013714 0ustar liggesusers#' @export plot.equivalence_test <- function(x, ...) { if (!requireNamespace("see", quietly = TRUE)) { stop("Package 'see' needed to plot results from equivalence-test. Please install it by running `install.packages('see')`.") } NextMethod() } #' @export plot.p_direction <- function(x, ...) { if (!requireNamespace("see", quietly = TRUE)) { stop("Package 'see' needed to plot results from p_direction(). Please install it by running `install.packages('see')`.") } NextMethod() } #' @export plot.point_estimate <- function(x, ...) { if (!requireNamespace("see", quietly = TRUE)) { stop("Package 'see' needed to plot point-estimates. Please install it by running `install.packages('see')`.") } NextMethod() } #' @export plot.map_estimate <- function(x, ...) { if (!requireNamespace("see", quietly = TRUE)) { stop("Package 'see' needed to plot point-estimates. Please install it by running `install.packages('see')`.") } NextMethod() } #' @export plot.rope <- function(x, ...) { if (!requireNamespace("see", quietly = TRUE)) { stop("Package 'see' needed to plot ROPE. Please install it by running `install.packages('see')`.") } NextMethod() } #' @export plot.bayestestR_hdi <- function(x, ...) { if (!requireNamespace("see", quietly = TRUE)) { stop("Package 'see' needed to plot HDI. Please install it by running `install.packages('see')`.") } NextMethod() } #' @export plot.bayestestR_eti <- function(x, ...) { if (!requireNamespace("see", quietly = TRUE)) { stop("Package 'see' needed to plot credible intervals. Please install it by running `install.packages('see')`.") } NextMethod() } #' @export plot.bayestestR_si <- function(x, ...) { if (!requireNamespace("see", quietly = TRUE)) { stop("Package 'see' needed to plot support intervals. Please install it by running `install.packages('see')`.") } NextMethod() } #' @export plot.bayesfactor_parameters <- function(x, ...) { if (!requireNamespace("see", quietly = TRUE)) { stop("Package 'see' needed to plot Savage-Dickey Bayes factor. Please install it by running `install.packages('see')`.") } NextMethod() } #' @export plot.bayesfactor_models <- function(x, ...) { if (!requireNamespace("see", quietly = TRUE)) { stop("Package 'see' needed to plot models' Bayes factors. Please install it by running `install.packages('see')`.") } NextMethod() } #' @export plot.estimate_density <- function(x, ...) { if (!requireNamespace("see", quietly = TRUE)) { stop("Package 'see' needed to plot densities. Please install it by running `install.packages('see')`.") } NextMethod() } #' @export plot.p_significance <- function(x, ...) { if (!requireNamespace("see", quietly = TRUE)) { stop("Package 'see' needed to plot practical significance Please install it by running `install.packages('see')`.") } NextMethod() } bayestestR/R/utils_print_data_frame.R0000644000176200001440000000444613607554753017471 0ustar liggesusersprint_data_frame <- function(x, digits) { out <- list(x) names(out) <- "fixed" if (all(c("Effects", "Component") %in% colnames(x))) { x$split <- sprintf("%s_%s", x$Effects, x$Component) } else if ("Effects" %in% colnames(x)) { colnames(x)[which(colnames(x) == "Effects")] <- "split" } else if ("Component" %in% colnames(x)) { colnames(x)[which(colnames(x) == "Component")] <- "split" } if ("split" %in% colnames(x)) { out <- lapply(split(x, f = x$split), function(i) { .remove_column(i, c("split", "Component", "Effects")) }) } for (i in names(out)) { header <- switch( i, "conditional" = , "fixed_conditional" = , "fixed" = "# Fixed Effects (Conditional Model)", "zi" = , "zero_inflated" = , "fixed_zero_inflated" = , "fixed_zi" = "# Fixed Effects (Zero-Inflated Model)", "random" = , "random_conditional" = "# Random Effects (Conditional Model)", "random_zero_inflated" = , "random_zi" = "# Random Effects (Zero-Inflated Model)", "smooth_sd" = , "fixed_smooth_sd" = "# Smooth Terms" ) if ("Parameter" %in% colnames(out[[i]])) { # clean parameters names out[[i]]$Parameter <- gsub("(b_|bs_|bsp_|bcs_)(?!zi_)(.*)", "\\2", out[[i]]$Parameter, perl = TRUE) out[[i]]$Parameter <- gsub("(b_zi_|bs_zi_|bsp_zi_|bcs_zi_)(.*)", "\\2", out[[i]]$Parameter, perl = TRUE) # clean random effect parameters names out[[i]]$Parameter <- gsub("r_(.*)\\.(.*)\\.", "\\1", out[[i]]$Parameter) out[[i]]$Parameter <- gsub("b\\[\\(Intercept\\) (.*)\\]", "\\1", out[[i]]$Parameter) out[[i]]$Parameter <- gsub("b\\[(.*) (.*)\\]", "\\2", out[[i]]$Parameter) # clean smooth terms out[[i]]$Parameter <- gsub("^smooth_sd\\[(.*)\\]", "\\1", out[[i]]$Parameter) out[[i]]$Parameter <- gsub("^sds_", "\\1", out[[i]]$Parameter) # remove ".1" etc. suffix out[[i]]$Parameter <- gsub("(.*)(\\.)(\\d)$", "\\1 \\3", out[[i]]$Parameter) # remove "__zi" out[[i]]$Parameter <- gsub("__zi", "", out[[i]]$Parameter) } if (length(out) > 1) { insight::print_color(header, "blue") cat("\n\n") } cat(insight::format_table(out[[i]], digits = digits)) cat("\n") } } bayestestR/R/rope_range.R0000644000176200001440000001015113613227664015054 0ustar liggesusers#' @title Find Default Equivalence (ROPE) Region Bounds #' #' @description This function attempts at automatically finding suitable "default" #' values for the Region Of Practical Equivalence (ROPE). #' #' @details \cite{Kruschke (2018)} suggests that the region of practical #' equivalence could be set, by default, to a range from \code{-0.1} to #' \code{0.1} of a standardized parameter (negligible effect size #' according to Cohen, 1988). #' #' \itemize{ #' \item For \strong{linear models (lm)}, this can be generalised to \ifelse{html}{\out{-0.1 * SDy, 0.1 * SDy}}{\eqn{[-0.1*SD_{y}, 0.1*SD_{y}]}}. #' \item For \strong{logistic models}, the parameters expressed in log odds ratio can be converted to standardized difference through the formula \ifelse{html}{\out{π/√(3)}}{\eqn{\pi/\sqrt{3}}}, resulting in a range of \code{-0.18} to \code{0.18}. #' \item For other models with \strong{binary outcome}, it is strongly recommended to manually specify the rope argument. Currently, the same default is applied that for logistic models. #' \item For \strong{t-tests}, the standard deviation of the response is used, similarly to linear models (see above). #' \item For \strong{correlations}, \code{-0.05, 0.05} is used, i.e., half the value of a negligible correlation as suggested by Cohen's (1988) rules of thumb. #' \item For all other models, \code{-0.1, 0.1} is used to determine the ROPE limits, but it is strongly advised to specify it manually. #' } #' #' @param x A \code{stanreg}, \code{brmsfit} or \code{BFBayesFactor} object. #' @inheritParams rope #' #' @examples #' \dontrun{ #' if (require("rstanarm")) { #' model <- stan_glm( #' mpg ~ wt + gear, #' data = mtcars, #' chains = 2, #' iter = 200, #' refresh = 0 #' ) #' rope_range(model) #' #' model <- stan_glm(vs ~ mpg, data = mtcars, family = "binomial") #' rope_range(model) #' } #' #' if (require("brms")) { #' model <- brm(mpg ~ wt + cyl, data = mtcars) #' rope_range(model) #' } #' #' if (require("BayesFactor")) { #' bf <- ttestBF(x = rnorm(100, 1, 1)) #' rope_range(bf) #' } #' } #' @references Kruschke, J. K. (2018). Rejecting or accepting parameter values in Bayesian estimation. Advances in Methods and Practices in Psychological Science, 1(2), 270-280. \doi{10.1177/2515245918771304}. #' #' @importFrom insight get_response model_info is_multivariate #' @importFrom stats sd #' @export rope_range <- function(x, ...) { UseMethod("rope_range") } #' @export rope_range.brmsfit <- function(x, ...) { response <- insight::get_response(x) information <- insight::model_info(x) if (insight::is_multivariate(x)) { mapply(function(i, j) .rope_range(i, j), x, information, response) } else { .rope_range(x, information, response) } } #' @export rope_range.stanreg <- rope_range.brmsfit #' @export rope_range.BFBayesFactor <- rope_range.brmsfit #' @export rope_range.lm <- rope_range.brmsfit #' @export rope_range.merMod <- rope_range.brmsfit #' @export rope_range.default <- function(x, ...) { c(-.1, .1) } .rope_range <- function(x, information, response) { negligible_value <- tryCatch( { # Linear Models if (information$is_linear) { 0.1 * stats::sd(response) # General Linear Models } else if (information$is_binomial) { 0.1 * pi / sqrt(3) # T-tests } else if (information$is_ttest) { if ("BFBayesFactor" %in% class(x)) { 0.1 * stats::sd(x@data[, 1]) } else { warning("Could not estimate a good default ROPE range. Using 'c(-0.1, 0.1)'.", call. = FALSE) 0.1 } # Correlations } else if (information$is_correlation) { # https://github.com/easystats/bayestestR/issues/121 0.05 # Default } else { 0.1 } }, error = function(e) { warning("Could not estimate a good default ROPE range. Using 'c(-0.1, 0.1)'.", call. = FALSE) 0.1 } ) c(-1, 1) * negligible_value } bayestestR/R/print.ci.R0000644000176200001440000000415013603574000014447 0ustar liggesusers#' @export print.bayestestR_hdi <- function(x, digits = 2, ...) { if ("data_plot" %in% class(x)) { print(as.data.frame(x)) } else { .print_hdi(x, digits, title = "Highest Density Interval", ci_string = "HDI", ...) } } #' @export print.bayestestR_eti <- function(x, digits = 2, ...) { if ("data_plot" %in% class(x)) { print(as.data.frame(x)) } else { .print_hdi(x, digits, title = "Equal-Tailed Interval", ci_string = "ETI", ...) } } #' @export print.bayestestR_si <- function(x, digits = 2, ...) { .print_hdi(x, digits, title = "Support Interval", ci_string = "SI", ...) } #' @export print.bayestestR_ci <- function(x, digits = 2, ...) { if ("data_plot" %in% class(x)) { print(as.data.frame(x)) } else { .print_hdi(x, digits, title = "Credible Interval", ci_string = "CI", ...) } } .print_hdi <- function(x, digits, title, ci_string, ...) { insight::print_color(sprintf( "# %s%s\n\n", title, ifelse(all(x$CI[1] == x$CI), "", "s") ), "blue") ci <- unique(x$CI) # find the longest HDI-value, so we can align the brackets in the ouput x$CI_low <- sprintf("%.*f", digits, x$CI_low) x$CI_high <- sprintf("%.*f", digits, x$CI_high) maxlen_low <- max(nchar(x$CI_low)) maxlen_high <- max(nchar(x$CI_high)) x$HDI <- sprintf("[%*s, %*s]", maxlen_low, x$CI_low, maxlen_high, x$CI_high) if (length(ci) == 1) { xsub <- .remove_column(x, c("CI", "CI_low", "CI_high")) colnames(xsub)[ncol(xsub)] <- sprintf("%.5g%% %s", ci, ci_string) if (inherits(x,"bayestestR_si")) colnames(xsub)[ncol(xsub)] <- sprintf("BF = %.5g %s", ci, ci_string) print_data_frame(xsub, digits = digits) } else { for (i in ci) { xsub <- x[x$CI == i, -which(colnames(x) == "CI"), drop = FALSE] xsub <- .remove_column(xsub, c("CI", "CI_low", "CI_high")) colnames(xsub)[ncol(xsub)] <- sprintf("%.5g%% %s", i, ci_string) if (inherits(x,"bayestestR_si")) colnames(xsub)[ncol(xsub)] <- sprintf("BF = %.5g %s", ci, ci_string) print_data_frame(xsub, digits = digits) cat("\n") } } } bayestestR/R/print.bayesfactor_parameters.R0000644000176200001440000000204013573411455020607 0ustar liggesusers#' @export print.bayesfactor_parameters <- function(x, digits = 2, log = FALSE, ...) { BFE <- x null <- attr(BFE, "hypothesis") direction <- attr(BFE, "direction") if (log) { BFE$BF <- log(BFE$BF) } BFE$BF <- .format_big_small(BFE$BF, digits = digits) if (length(null) == 1) { insight::print_color("# Bayes Factor (Savage-Dickey density ratio)\n\n", "blue") } else { insight::print_color("# Bayes Factor (Null-Interval)\n\n", "blue") } print_data_frame(BFE, digits = digits) cat("* Evidence Against The Null: ") insight::print_color( paste0("[", paste0(round(null, digits), collapse = ", "), "]\n", sep = ""), "cyan" ) if (direction < 0) { cat("* Direction: ") insight::print_color("Left-Sided test\n", "cyan") } else if (direction > 0) { cat("* Direction: ") insight::print_color("Right-Sided test\n", "cyan") } if (log) insight::print_color("\nBayes Factors are on the log-scale.\n", "red") invisible(x) } bayestestR/R/bayesfactor.R0000644000176200001440000000605513613227664015245 0ustar liggesusers#' Bayes Factors (BF) #' #' This function compte the Bayes factors (BFs) that are appropriate to the input. #' For vectors or single models, it will compute \code{\link[=bayesfactor_parameters]{BFs for single parameters}}, #' or is \code{hypothesis} is specified, \code{\link[=bayesfactor_restricted]{BFs for restricted models}}. #' For multiple models, it will return the BF corresponding to \code{\link[=bayesfactor_models]{comparison between models}} #' and if a model comparison is passed, it will compute the \code{\link[=bayesfactor_inclusion]{inclusion BF}}. #' \cr\cr #' For a complete overview of these functions, read the \href{https://easystats.github.io/bayestestR/articles/bayes_factors.html}{Bayes factor vignette}. #' #' @param ... A numeric vector, model object(s), or the output from \code{bayesfactor_models}. #' @inheritParams bayesfactor_parameters #' @inheritParams bayesfactor_restricted #' @inheritParams bayesfactor_models #' @inheritParams bayesfactor_inclusion #' #' @return Some type of Bayes factor, depending on the input. See \code{\link{bayesfactor_parameters}}, \code{\link{bayesfactor_models}} or \code{\link{bayesfactor_inclusion}} #' #' #' @examples #' library(bayestestR) #' #' # Vectors #' prior <- distribution_normal(1000, mean = 0, sd = 1) #' posterior <- distribution_normal(1000, mean = .5, sd = .3) #' #' bayesfactor(posterior, prior = prior) #' \dontrun{ #' # rstanarm models #' # --------------- #' if (require("rstanarm")) { #' model <- stan_lmer(extra ~ group + (1 | ID), data = sleep) #' bayesfactor(model) #' } #' } #' #' # Frequentist models #' # --------------- #' m0 <- lm(extra ~ 1, data = sleep) #' m1 <- lm(extra ~ group, data = sleep) #' m2 <- lm(extra ~ group + ID, data = sleep) #' #' comparison <- bayesfactor(m0, m1, m2) #' comparison #' #' bayesfactor(comparison) #' @export bayesfactor <- function(..., prior = NULL, direction = "two-sided", null = 0, hypothesis = NULL, effects = c("fixed", "random", "all"), verbose = TRUE, denominator = 1, match_models = FALSE, prior_odds = NULL) { mods <- list(...) if (length(mods) > 1) { bayesfactor_models(..., denominator = denominator) } else if (inherits(mods[[1]], "bayesfactor_models")) { bayesfactor_inclusion(..., match_models = match_models, prior_odds = prior_odds) } else if (inherits(mods[[1]], "BFBayesFactor")) { if (class(mods[[1]]@numerator[[1]]) == "BFlinearModel") { bayesfactor_inclusion(..., match_models = match_models, prior_odds = prior_odds) } else { bayesfactor_models(...) } } else if (!is.null(hypothesis)) { bayesfactor_restricted(..., prior = prior, verbose = verbose, effects = effects ) } else { bayesfactor_parameters( ..., prior = prior, direction = direction, null = null, effects = effects, verbose = verbose ) } } bayestestR/R/rope.R0000644000176200001440000003735713613227664013721 0ustar liggesusers#' Region of Practical Equivalence (ROPE) #' #' Compute the proportion of the HDI (default to the 89\% HDI) of a posterior distribution that lies within a region of practical equivalence. #' #' @param x Vector representing a posterior distribution. Can also be a \code{stanreg} or \code{brmsfit} model. #' @param range ROPE's lower and higher bounds. Should be a vector of length two (e.g., \code{c(-0.1, 0.1)}) or \code{"default"}. If \code{"default"}, the range is set to \code{c(-0.1, 0.1)} if input is a vector, and based on \code{\link[=rope_range]{rope_range()}} if a Bayesian model is provided. #' @param ci The Credible Interval (CI) probability, corresponding to the proportion of HDI, to use for the percentage in ROPE. #' @param ci_method The type of interval to use to quantify the percentage in ROPE. Can be 'HDI' (default) or 'ETI'. See \code{\link{ci}}. #' #' @inheritParams hdi #' #' @details #' \subsection{ROPE}{ #' Statistically, the probability of a posterior distribution of being #' different from 0 does not make much sense (the probability of a single value #' null hypothesis in a continuous distribution is 0). Therefore, the idea #' underlining ROPE is to let the user define an area around the null value #' enclosing values that are \emph{equivalent to the null} value for practical #' purposes (\cite{Kruschke 2010, 2011, 2014}). #' \cr \cr #' Kruschke (2018) suggests that such null value could be set, by default, #' to the -0.1 to 0.1 range of a standardized parameter (negligible effect #' size according to Cohen, 1988). This could be generalized: For instance, #' for linear models, the ROPE could be set as \code{0 +/- .1 * sd(y)}. #' This ROPE range can be automatically computed for models using the #' \link{rope_range} function. #' \cr \cr #' Kruschke (2010, 2011, 2014) suggests using the proportion of the 95\% #' (or 89\%, considered more stable) \link[=hdi]{HDI} that falls within the #' ROPE as an index for "null-hypothesis" testing (as understood under the #' Bayesian framework, see \code{\link[=equivalence_test]{equivalence_test()}}). #' } #' \subsection{Sensitivity to parameter's scale}{ #' It is important to consider the unit (i.e., the scale) of the predictors #' when using an index based on the ROPE, as the correct interpretation of the #' ROPE as representing a region of practical equivalence to zero is dependent #' on the scale of the predictors. Indeed, the percentage in ROPE depend on #' the unit of its parameter. In other words, as the ROPE represents a fixed #' portion of the response's scale, its proximity with a coefficient depends #' on the scale of the coefficient itself. #' } #' \subsection{Multicollinearity: Non-independent covariates}{ #' When parameters show strong correlations, i.e. when covariates are not #' independent, the joint parameter distributions may shift towards or #' away from the ROPE. Collinearity invalidates ROPE and hypothesis #' testing based on univariate marginals, as the probabilities are conditional #' on independence. Most problematic are parameters that only have partial #' overlap with the ROPE region. In case of collinearity, the (joint) distributions #' of these parameters may either get an increased or decreased ROPE, which #' means that inferences based on \code{rope()} are inappropriate #' (\cite{Kruschke 2014, 340f}). #' \cr \cr #' \code{rope()} performs a simple check for pairwise correlations between #' parameters, but as there can be collinearity between more than two variables, #' a first step to check the assumptions of this hypothesis testing is to look #' at different pair plots. An even more sophisticated check is the projection #' predictive variable selection (\cite{Piironen and Vehtari 2017}). #' } #' \subsection{Strengths and Limitations}{ #' \strong{Strengths:} Provides information related to the practical relevance of the effects. #' \cr \cr #' \strong{Limitations:} A ROPE range needs to be arbitrarily defined. Sensitive to the scale (the unit) of the predictors. Not sensitive to highly significant effects. #' } #' #' @references \itemize{ #' \item Cohen, J. (1988). Statistical power analysis for the behavioural sciences. #' \item Kruschke, J. K. (2010). What to believe: Bayesian methods for data analysis. Trends in cognitive sciences, 14(7), 293-300. \doi{10.1016/j.tics.2010.05.001}. #' \item Kruschke, J. K. (2011). Bayesian assessment of null values via parameter estimation and model comparison. Perspectives on Psychological Science, 6(3), 299-312. \doi{10.1177/1745691611406925}. #' \item Kruschke, J. K. (2014). Doing Bayesian data analysis: A tutorial with R, JAGS, and Stan. Academic Press. \doi{10.1177/2515245918771304}. #' \item Kruschke, J. K. (2018). Rejecting or accepting parameter values in Bayesian estimation. Advances in Methods and Practices in Psychological Science, 1(2), 270-280. \doi{10.1177/2515245918771304}. #' \item Makowski D, Ben-Shachar MS, Chen SHA, Lüdecke D (2019) Indices of Effect Existence and Significance in the Bayesian Framework. Frontiers in Psychology 2019;10:2767. \doi{10.3389/fpsyg.2019.02767} #' \item Piironen, J., & Vehtari, A. (2017). Comparison of Bayesian predictive methods for model selection. Statistics and Computing, 27(3), 711–735. \doi{10.1007/s11222-016-9649-y} #' } #' #' @examples #' library(bayestestR) #' #' rope(x = rnorm(1000, 0, 0.01), range = c(-0.1, 0.1)) #' rope(x = rnorm(1000, 0, 1), range = c(-0.1, 0.1)) #' rope(x = rnorm(1000, 1, 0.01), range = c(-0.1, 0.1)) #' rope(x = rnorm(1000, 1, 1), ci = c(.90, .95)) #' #' \dontrun{ #' library(rstanarm) #' model <- stan_glm(mpg ~ wt + gear, data = mtcars, chains = 2, iter = 200, refresh = 0) #' rope(model) #' rope(model, ci = c(.90, .95)) #' #' library(emmeans) #' rope(emtrends(model, ~1, "wt"), ci = c(.90, .95)) #' #' library(brms) #' model <- brms::brm(mpg ~ wt + cyl, data = mtcars) #' rope(model) #' rope(model, ci = c(.90, .95)) #' #' library(BayesFactor) #' bf <- ttestBF(x = rnorm(100, 1, 1)) #' rope(bf) #' rope(bf, ci = c(.90, .95)) #' } #' @importFrom insight get_parameters is_multivariate #' @export rope <- function(x, ...) { UseMethod("rope") } #' @method as.double rope #' @export as.double.rope <- function(x, ...) { x$ROPE_Percentage } #' @rdname rope #' @export rope.default <- function(x, ...) { NULL } #' @rdname rope #' @export rope.numeric <- function(x, range = "default", ci = .89, ci_method = "HDI", verbose = TRUE, ...) { if (all(range == "default")) { range <- c(-0.1, 0.1) } else if (!all(is.numeric(range)) || length(range) != 2) { stop("`range` should be 'default' or a vector of 2 numeric values (e.g., c(-0.1, 0.1)).") } rope_values <- lapply(ci, function(i) { .rope(x, range = range, ci = i, ci_method = ci_method, verbose = verbose) }) # "do.call(rbind)" does not bind attribute values together # so we need to capture the information about HDI separately out <- do.call(rbind, rope_values) if (nrow(out) > 1) { out$ROPE_Percentage <- as.numeric(out$ROPE_Percentage) } # Attributes hdi_area <- cbind(CI = ci * 100, data.frame(do.call(rbind, lapply(rope_values, attr, "HDI_area")))) names(hdi_area) <- c("CI", "CI_low", "CI_high") attr(out, "HDI_area") <- hdi_area attr(out, "data") <- x class(out) <- unique(c("rope", "see_rope", class(out))) out } #' @rdname rope #' @export rope.data.frame <- function(x, range = "default", ci = .89, ci_method = "HDI", verbose = TRUE, ...) { out <- .prepare_rope_df(x, range, ci, ci_method, verbose) HDI_area_attributes <- .compact_list(out$HDI_area) dat <- data.frame( Parameter = rep(names(HDI_area_attributes), each = length(ci)), out$tmp, stringsAsFactors = FALSE ) row.names(dat) <- NULL attr(dat, "HDI_area") <- HDI_area_attributes attr(dat, "object_name") <- .safe_deparse(substitute(x)) class(dat) <- c("rope", "see_rope", "data.frame") dat } #' @rdname rope #' @export rope.emmGrid <- function(x, range = "default", ci = .89, ci_method = "HDI", verbose = TRUE, ...) { if (!requireNamespace("emmeans")) { stop("Package 'emmeans' required for this function to work. Please install it by running `install.packages('emmeans')`.") } xdf <- as.data.frame(as.matrix(emmeans::as.mcmc.emmGrid(x, names = FALSE))) dat <- rope(xdf, range = range, ci = ci, ci_method = ci_method, verbose = verbose, ...) attr(dat, "object_name") <- .safe_deparse(substitute(x)) dat } #' @rdname rope #' @export rope.BFBayesFactor <- function(x, range = "default", ci = .89, ci_method = "HDI", verbose = TRUE, ...) { out <- rope(insight::get_parameters(x), range = range, ci = ci, ci_method = ci_method, verbose = verbose, ...) attr(out, "object_name") <- .safe_deparse(substitute(x)) out } #' @rdname rope #' @export rope.MCMCglmm <- function(x, range = "default", ci = .89, ci_method = "HDI", verbose = TRUE, ...) { nF <- x$Fixed$nfl out <- rope(as.data.frame(x$Sol[, 1:nF, drop = FALSE]), range = range, ci = ci, ci_method = ci_method, verbose = verbose, ...) attr(out, "object_name") <- .safe_deparse(substitute(x)) out } #' @export rope.mcmc <- function(x, range = "default", ci = .89, ci_method = "HDI", verbose = TRUE, ...) { out <- rope(as.data.frame(x), range = range, ci = ci, ci_method = ci_method, verbose = verbose, ...) attr(out, "object_name") <- NULL attr(out, "data") <- .safe_deparse(substitute(x)) out } #' @keywords internal .rope <- function(x, range = c(-0.1, 0.1), ci = .89, ci_method = "HDI", verbose = TRUE) { ci_bounds <- ci(x, ci = ci, method = ci_method, verbose = verbose) if (anyNA(ci_bounds)) { rope_percentage <- NA } else { HDI_area <- x[x >= ci_bounds$CI_low & x <= ci_bounds$CI_high] area_within <- HDI_area[HDI_area >= min(range) & HDI_area <= max(range)] rope_percentage <- length(area_within) / length(HDI_area) } rope <- data.frame( "CI" = ci * 100, "ROPE_low" = range[1], "ROPE_high" = range[2], "ROPE_Percentage" = rope_percentage ) attr(rope, "HDI_area") <- c(ci_bounds$CI_low, ci_bounds$CI_high) attr(rope, "CI_bounds") <- c(ci_bounds$CI_low, ci_bounds$CI_high) class(rope) <- unique(c("rope", "see_rope", class(rope))) rope } #' @rdname rope #' @export rope.stanreg <- function(x, range = "default", ci = .89, ci_method = "HDI", effects = c("fixed", "random", "all"), parameters = NULL, verbose = TRUE, ...) { effects <- match.arg(effects) if (all(range == "default")) { range <- rope_range(x) } else if (!all(is.numeric(range)) || length(range) != 2) { stop("`range` should be 'default' or a vector of 2 numeric values (e.g., c(-0.1, 0.1)).") } # check for possible collinearity that might bias ROPE if (verbose) .check_multicollinearity(x, "rope") rope_data <- rope( insight::get_parameters(x, effects = effects, parameters = parameters), range = range, ci = ci, ci_method = ci_method, verbose = verbose, ... ) out <- .prepare_output(rope_data, insight::clean_parameters(x)) attr(out, "HDI_area") <- attr(rope_data, "HDI_area") attr(out, "object_name") <- .safe_deparse(substitute(x)) class(out) <- class(rope_data) out } #' @rdname rope #' @export rope.brmsfit <- function(x, range = "default", ci = .89, ci_method = "HDI", effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, verbose = TRUE, ...) { effects <- match.arg(effects) component <- match.arg(component) if (insight::is_multivariate(x)) { stop("Multivariate response models are not yet supported.") } if (all(range == "default")) { range <- rope_range(x) } else if (!all(is.numeric(range)) || length(range) != 2) { stop("`range` should be 'default' or a vector of 2 numeric values (e.g., c(-0.1, 0.1)).") } # check for possible collinearity that might bias ROPE if (verbose) .check_multicollinearity(x, "rope") rope_data <- rope( insight::get_parameters(x, effects = effects, component = component, parameters = parameters), range = range, ci = ci, ci_method = ci_method, verbose = verbose, ... ) out <- .prepare_output(rope_data, insight::clean_parameters(x)) attr(out, "HDI_area") <- attr(rope_data, "HDI_area") attr(out, "object_name") <- .safe_deparse(substitute(x)) class(out) <- class(rope_data) out } #' @export rope.sim.merMod <- function(x, range = "default", ci = .89, ci_method = "HDI", effects = c("fixed", "random", "all"), parameters = NULL, verbose = TRUE, ...) { effects <- match.arg(effects) if (all(range == "default")) { range <- rope_range(x) } else if (!all(is.numeric(range)) || length(range) != 2) { stop("`range` should be 'default' or a vector of 2 numeric values (e.g., c(-0.1, 0.1)).") } list <- lapply(c("fixed", "random"), function(.x) { parms <- insight::get_parameters(x, effects = .x, parameters = parameters) getropedata <- .prepare_rope_df(parms, range, ci, ci_method, verbose) tmp <- getropedata$tmp HDI_area <- getropedata$HDI_area if (!.is_empty_object(tmp)) { tmp <- .clean_up_tmp_stanreg( tmp, group = .x, cols = c("CI", "ROPE_low", "ROPE_high", "ROPE_Percentage", "Group"), parms = names(parms) ) if (!.is_empty_object(HDI_area)) { attr(tmp, "HDI_area") <- HDI_area } } else { tmp <- NULL } tmp }) dat <- do.call(rbind, args = c(.compact_list(list), make.row.names = FALSE)) dat <- switch( effects, fixed = .select_rows(dat, "Group", "fixed"), random = .select_rows(dat, "Group", "random"), dat ) if (all(dat$Group == dat$Group[1])) { dat <- .remove_column(dat, "Group") } HDI_area_attributes <- lapply(.compact_list(list), attr, "HDI_area") if (effects != "all") { HDI_area_attributes <- HDI_area_attributes[[1]] } else { names(HDI_area_attributes) <- c("fixed", "random") } attr(dat, "HDI_area") <- HDI_area_attributes attr(dat, "object_name") <- .safe_deparse(substitute(x)) dat } #' @export rope.sim <- function(x, range = "default", ci = .89, ci_method = "HDI", parameters = NULL, verbose = TRUE, ...) { if (all(range == "default")) { range <- rope_range(x) } else if (!all(is.numeric(range)) || length(range) != 2) { stop("`range` should be 'default' or a vector of 2 numeric values (e.g., c(-0.1, 0.1)).") } parms <- insight::get_parameters(x, parameters = parameters) getropedata <- .prepare_rope_df(parms, range, ci, ci_method, verbose) dat <- getropedata$tmp HDI_area <- getropedata$HDI_area if (!.is_empty_object(dat)) { dat <- .clean_up_tmp_stanreg( dat, group = "fixed", cols = c("CI", "ROPE_low", "ROPE_high", "ROPE_Percentage"), parms = names(parms) ) if (!.is_empty_object(HDI_area)) { attr(dat, "HDI_area") <- HDI_area } } else { dat <- NULL } attr(dat, "object_name") <- .safe_deparse(substitute(x)) dat } #' @keywords internal .prepare_rope_df <- function(parms, range, ci, ci_method, verbose) { tmp <- sapply( parms, rope, range = range, ci = ci, ci_method = ci_method, verbose = verbose, simplify = FALSE ) HDI_area <- lapply(tmp, function(.x) { attr(.x, "HDI_area") }) # HDI_area <- lapply(HDI_area, function(.x) { # dat <- cbind(CI = ci, data.frame(do.call(rbind, .x))) # colnames(dat) <- c("CI", "HDI_low", "HDI_high") # dat # }) list( tmp = do.call(rbind, tmp), HDI_area = HDI_area ) } bayestestR/R/ci.R0000644000176200001440000001335113613227664013333 0ustar liggesusers#' Confidence/Credible/Compatibility Interval (CI) #' #' Compute Confidence/Credible/Compatibility Intervals (CI) or Support Intervals (SI) for Bayesian and frequentist models. The Documentation is accessible for: #' #' \itemize{ #' \item \href{https://easystats.github.io/bayestestR/articles/credible_interval.html}{Bayesian models} #' \item \href{https://easystats.github.io/parameters/reference/ci.merMod.html}{Frequentist models} #' } #' #' @param x A \code{stanreg} or \code{brmsfit} model, or a vector representing a posterior distribution. #' @param method Can be \link[=eti]{'ETI'} (default), \link[=hdi]{'HDI'} or \link[=si]{'SI'}. #' @param ci Value or vector of probability of the CI (between 0 and 1) #' to be estimated. Default to \code{.89} (89\%) for Bayesian models and \code{.95} (95\%) for frequentist models. #' @inheritParams hdi #' @inheritParams si #' #' @return A data frame with following columns: #' \itemize{ #' \item \code{Parameter} The model parameter(s), if \code{x} is a model-object. If \code{x} is a vector, this column is missing. #' \item \code{CI} The probability of the credible interval. #' \item \code{CI_low}, \code{CI_high} The lower and upper credible interval limits for the parameters. #' } #' #' @note When it comes to interpretation, we recommend thinking of the CI in terms of #' an "uncertainty" or "compatibility" interval, the latter being defined as #' \dQuote{Given any value in the interval and the background assumptions, #' the data should not seem very surprising} (\cite{Gelman & Greenland 2019}). #' #' @references Gelman A, Greenland S. Are confidence intervals better termed "uncertainty intervals"? BMJ 2019;l5381. \doi{10.1136/bmj.l5381} #' #' @examples #' library(bayestestR) #' #' posterior <- rnorm(1000) #' ci(posterior, method = "ETI") #' ci(posterior, method = "HDI") #' #' df <- data.frame(replicate(4, rnorm(100))) #' ci(df, method = "ETI", ci = c(.80, .89, .95)) #' ci(df, method = "HDI", ci = c(.80, .89, .95)) #' #' \dontrun{ #' if (require("rstanarm")) { #' model <- stan_glm(mpg ~ wt, data = mtcars, chains = 2, iter = 200, refresh = 0) #' ci(model, method = "ETI", ci = c(.80, .89)) #' ci(model, method = "HDI", ci = c(.80, .89)) #' ci(model, method = "SI") #' } #' #' if (require("brms")) { #' model <- brms::brm(mpg ~ wt + cyl, data = mtcars) #' ci(model, method = "ETI") #' ci(model, method = "HDI") #' ci(model, method = "SI") #' } #' #' if (require("BayesFactor")) { #' bf <- ttestBF(x = rnorm(100, 1, 1)) #' ci(bf, method = "ETI") #' ci(bf, method = "HDI") #' } #' #' if (require("emmeans")) { #' model <- emtrends(model, ~1, "wt") #' ci(model, method = "ETI") #' ci(model, method = "HDI") #' ci(model, method = "SI") #' } #' } #' @export ci <- function(x, ...) { UseMethod("ci") } #' @keywords internal .ci_bayesian <- function(x, ci = .89, method = "ETI", effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, verbose = TRUE, BF = 1, ...) { if (tolower(method) %in% c("eti", "equal", "ci", "quantile")) { return(eti(x, ci = ci, effects = effects, component = component, parameters = parameters, verbose = verbose, ...)) } else if (tolower(method) %in% c("hdi")) { return(hdi(x, ci = ci, effects = effects, component = component, parameters = parameters, verbose = verbose, ...)) } else if (tolower(method) %in% c("si")) { return(si(x, BF = BF, effects = effects, component = component, parameters = parameters, verbose = verbose, ...)) } else { stop("`method` should be 'ETI' (for equal-tailed interval),'HDI' (for highest density interval) or 'SI' (for support interval).") } } #' @rdname ci #' @export ci.numeric <- function(x, ci = .89, method = "ETI", verbose = TRUE, BF = 1, ...) { .ci_bayesian(x, ci = ci, method = method, verbose = verbose, BF = BF, ...) } #' @rdname ci #' @export ci.data.frame <- ci.numeric #' @rdname ci #' @export ci.emmGrid <- ci.numeric #' @rdname ci #' @export ci.sim.merMod <- function(x, ci = .89, method = "ETI", effects = c("fixed", "random", "all"), parameters = NULL, verbose = TRUE, ...) { .ci_bayesian(x, ci = ci, method = method, effects = effects, parameters = parameters, verbose = verbose, ...) } #' @rdname ci #' @export ci.sim <- function(x, ci = .89, method = "ETI", parameters = NULL, verbose = TRUE, ...) { .ci_bayesian(x, ci = ci, method = method, parameters = parameters, verbose = verbose, ...) } #' @rdname ci #' @export ci.stanreg <- function(x, ci = .89, method = "ETI", effects = c("fixed", "random", "all"), parameters = NULL, verbose = TRUE, BF = 1,...) { .ci_bayesian(x, ci = ci, method = method, effects = effects, parameters = parameters, verbose = verbose, BF = BF, ...) } #' @rdname ci #' @export ci.brmsfit <- function(x, ci = .89, method = "ETI", effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, verbose = TRUE, BF = 1, ...) { .ci_bayesian(x, ci = ci, method = method, effects = effects, component = component, parameters = parameters, verbose = verbose, BF = BF, ...) } #' @rdname ci #' @export ci.BFBayesFactor <- ci.numeric #' @rdname ci #' @export ci.MCMCglmm <- function(x, ci = .89, method = "ETI", verbose = TRUE, ...) { nF <- x$Fixed$nfl ci(as.data.frame(x$Sol[, 1:nF, drop = FALSE]), ci = ci, method = method, verbose = verbose, ...) } #' @export ci.mcmc <- function(x, ci = .89, method = "ETI", verbose = TRUE, ...) { ci(as.data.frame(x), ci = ci, method = method, verbose = verbose, ...) } bayestestR/R/utils_hdi_ci.R0000644000176200001440000000511413536442410015365 0ustar liggesusers#' @keywords internal .check_ci_argument <- function(x, ci, verbose = TRUE) { if (ci > 1) { if (verbose) { warning("`ci` should be less than 1, returning NAs.") } return(data.frame( "CI" = ci * 100, "CI_low" = NA, "CI_high" = NA )) } if (ci == 1) { return(data.frame( "CI" = ci * 100, "CI_low" = min(x, na.rm = TRUE), "CI_high" = max(x, na.rm = TRUE) )) } if (anyNA(x)) { if (verbose) { warning("The posterior contains NAs, returning NAs.") } return(data.frame( "CI" = ci * 100, "CI_low" = NA, "CI_high" = NA )) } if (length(x) < 3) { if (verbose) { warning("The posterior is too short, returning NAs.") } return(data.frame( "CI" = ci * 100, "CI_low" = NA, "CI_high" = NA )) } NULL } #' @keywords internal .compute_interval_dataframe <- function(x, ci, verbose, fun) { numeric_variables <- sapply(x, is.numeric, simplify = TRUE) out <- .compact_list(lapply( x[, numeric_variables, drop = FALSE], get(fun, asNamespace("bayestestR")), ci = ci, verbose = verbose )) dat <- data.frame( Parameter = rep(names(out), each = length(ci)), do.call(rbind, out), stringsAsFactors = FALSE, row.names = NULL ) class(dat) <- unique(c(paste0("bayestestR_", fun), paste0("see_", fun), class(dat))) dat } #' @keywords internal .compute_interval_simMerMod <- function(x, ci, effects, parameters, verbose, fun) { fixed <- fixed.data <- NULL random <- random.data <- NULL if (effects %in% c("fixed", "all")) { fixed.data <- insight::get_parameters(x, effects = "fixed", parameters = parameters) fixed <- .compute_interval_dataframe(fixed.data, ci, verbose, fun) fixed$Group <- "fixed" } if (effects %in% c("random", "all")) { random.data <- insight::get_parameters(x, effects = "random", parameters = parameters) random <- .compute_interval_dataframe(random.data, ci, verbose, fun) random$Group <- "random" } d <- do.call(rbind, list(fixed, random)) if (length(unique(d$Group)) == 1) { d <- .remove_column(d, "Group") } list(result = d, data = do.call(cbind, .compact_list(list(fixed.data, random.data)))) } #' @keywords internal .compute_interval_sim <- function(x, ci, parameters, verbose, fun) { fixed.data <- insight::get_parameters(x, parameters = parameters) d <- .compute_interval_dataframe(fixed.data, ci, verbose, fun) list(result = d, data = fixed.data) } bayestestR/R/print.bayesfactor_restricted.R0000644000176200001440000000114013573411500020603 0ustar liggesusers#' @export print.bayesfactor_restricted <- function(x, digits = 2, log = FALSE, ...) { BFE <- x if (log) { BFE$BF <- log(BFE$BF) } BFE$BF <- .format_big_small(BFE$BF, digits = digits) colnames(BFE) <- c("Hypothesis", "P(Prior)", "P(Posterior)", "BF") insight::print_color("# Bayes Factor (Order-Restriction)\n\n", "blue") print.data.frame(BFE, digits = digits, row.names = FALSE) cat("\n* Bayes factors for the restricted model vs. the un-restricted model.\n") if (log) insight::print_color("\nBayes Factors are on the log-scale.\n", "red") invisible(x) } bayestestR/R/utils_get_parameter_names.R0000644000176200001440000000134013465543534020157 0ustar liggesusers#' @importFrom insight find_parameters .get_parameter_names <- function(posterior, effects, component, parameters) { pars <- insight::find_parameters(posterior, flatten = FALSE, parameters = parameters) pars <- switch( effects, "fixed" = pars[c("conditional", "zero_inflated")], "random" = pars[c("random", "zero_inflated_random")], "simplex" = pars["simplex"], "smooth_terms" = pars["smooth_terms"], "all" = pars ) pars <- switch( component, "conditional" = pars[c("conditional", "random", "simplex", "smooth_terms")], "zi" = , "zero_inflated" = pars[c("zero_inflated", "zero_inflated_random", "simplex", "smooth_terms")], "all" = pars ) unlist(pars) } bayestestR/R/print.equivalence_test.R0000644000176200001440000000725113571072460017430 0ustar liggesusers#' @importFrom insight print_color #' @export print.equivalence_test <- function(x, digits = 2, ...) { insight::print_color("# Test for Practical Equivalence\n\n", "blue") cat(sprintf(" ROPE: [%.*f %.*f]\n\n", digits, x$ROPE_low[1], digits, x$ROPE_high[1])) # find the longest HDI-value, so we can align the brackets in the ouput x$HDI_low <- sprintf("%.*f", digits, x$HDI_low) x$HDI_high <- sprintf("%.*f", digits, x$HDI_high) maxlen_low <- max(nchar(x$HDI_low)) maxlen_high <- max(nchar(x$HDI_high)) x$ROPE_Percentage <- sprintf("%.*f %%", digits, x$ROPE_Percentage * 100) x$HDI <- sprintf("[%*s %*s]", maxlen_low, x$HDI_low, maxlen_high, x$HDI_high) # clean parameter names # if ("Parameter" %in% colnames(x) && "Cleaned_Parameter" %in% colnames(x)) { # x$Parameter <- x$Cleaned_Parameter # } ci <- unique(x$CI) keep.columns <- c("CI", "Parameter", "ROPE_Equivalence", "ROPE_Percentage", "HDI", "Effects", "Component") x <- x[, intersect(keep.columns, colnames(x))] colnames(x)[which(colnames(x) == "ROPE_Equivalence")] <- "H0" colnames(x)[which(colnames(x) == "ROPE_Percentage")] <- "inside ROPE" .print_equivalence_component(x, ci, digits) # split_column <- "" # split_column <- c(split_column, ifelse("Component" %in% names(x) && length(unique(x$Component)) > 1, "Component", "")) # split_column <- c(split_column, ifelse("Effects" %in% names(x) && length(unique(x$Effects)) > 1, "Effects", "")) # split_column <- split_column[nchar(split_column) > 0] # # if (length(split_column)) { # # # set up split-factor # if (length(split_column) > 1) { # split_by <- lapply(split_column, function(i) x[[i]]) # } else { # split_by <- list(x[[split_column]]) # } # names(split_by) <- split_column # # # # make sure we have correct sorting here... # tables <- split(x, f = split_by) # # for (type in names(tables)) { # # # Don't print Component column # tables[[type]][["Effects"]] <- NULL # tables[[type]][["Component"]] <- NULL # # component_name <- switch( # type, # "fixed" = , # "conditional" = "Fixed Effects", # "random" = "Random Effects", # "conditional.fixed" = "Fixed Effects (Count Model)", # "conditional.random" = "Random Effects (Count Model)", # "zero_inflated" = "Zero-Inflated", # "zero_inflated.fixed" = "Fixed Effects (Zero-Inflated Model)", # "zero_inflated.random" = "Random Effects (Zero-Inflated Model)", # "smooth_sd" = "Smooth Terms (SD)", # "smooth_terms" = "Smooth Terms", # type # ) # # insight::print_color(sprintf(" %s\n\n", component_name), "red") # .print_equivalence_component(tables[[type]], ci, digits) # } # } else { # type <- paste0(unique(x$Component), ".", unique(x$Effects)) # component_name <- switch( # type, # "conditional.fixed" = "Fixed Effects", # "conditional.random" = "Random Effects", # "zero_inflated.fixed" = "Fixed Effects (Zero-Inflated Model)", # "zero_inflated.random" = "Random Effects (Zero-Inflated Model)", # type # ) # # x$Effects <- NULL # x$Component <- NULL # # insight::print_color(sprintf(" %s\n\n", component_name), "red") # .print_equivalence_component(x, ci, digits) # } } .print_equivalence_component <- function(x, ci, digits) { for (i in ci) { xsub <- x[x$CI == i, -which(colnames(x) == "CI"), drop = FALSE] colnames(xsub)[colnames(xsub) == "HDI"] <- sprintf("%i%% HDI", i) print_data_frame(xsub, digits = digits) cat("\n") } } bayestestR/R/utils_flatten_list.R0000644000176200001440000000140213571072516016637 0ustar liggesusers#' Flatten a list #' #' @param object A list. #' @param name Name of column of keys in the case the output is a dataframe. #' @keywords internal .flatten_list <- function(object, name = "name") { if (length(object) == 1) { object[[1]] } else if (all(sapply(object, is.data.frame))) { if (is.null(names(object))) { as.data.frame(t(sapply(object, rbind))) } else { tryCatch( { rn <- names(object) object <- do.call(rbind, object) object[name] <- rn object[c(name, setdiff(names(object), name))] }, warning = function(w) { object }, error = function(e) { object } ) } } else { object } } bayestestR/R/check_prior.R0000644000176200001440000000701613613227664015231 0ustar liggesusers#' Check if Prior is Informative #' #' Performs a simple test to check whether the prior is informative to the posterior. This idea, and the accompanying heuristics, were discussed in \href{https://statmodeling.stat.columbia.edu/2019/08/10/}{this blogpost}. #' #' @inheritParams effective_sample #' @param method Can be "gelman" or "lakeland". For the "gelman" method, if the SD of the posterior is more than 0.1 times the SD of the prior, then the prior is considered as informative. For the "lakeland" method, the prior is considered as informative if the posterior falls within the 95\% HDI of the prior. #' @param simulate_priors Should prior distributions be simulated using \code{simulate_prior} (default; faster) or sampled (slower, more accurate). #' @examples #' \dontrun{ #' library(bayestestR) #' if (require("rstanarm")) { #' model <- stan_glm(mpg ~ wt + am, data = mtcars, chains = 1, refresh = 0) #' check_prior(model, method = "gelman") #' check_prior(model, method = "lakeland") #' #' # An extreme example where both methods diverge: #' model <- stan_glm(mpg ~ wt, data = mtcars[1:3,], #' prior = normal(-3.3, 1, FALSE), #' prior_intercept = normal(0, 1000, FALSE), #' refresh = 0) #' check_prior(model, method = "gelman") #' check_prior(model, method = "lakeland") #' plot(si(model)) # can provide visual confirmation to the Lakeland method #' } #' } #' @references https://statmodeling.stat.columbia.edu/2019/08/10/ #' #' @export check_prior <- function(model, method = "gelman", simulate_priors = TRUE, ...) { UseMethod("check_prior") } #' @export check_prior.brmsfit <- function(model, method = "gelman", simulate_priors = TRUE, effects = c("fixed", "random", "all"), component = c("conditional", "zi", "zero_inflated", "all"), parameters = NULL, ...) { # check arguments effects <- match.arg(effects) component <- match.arg(component) posteriors <- insight::get_parameters( model, effects = effects, component = component, parameters = parameters ) if (isTRUE(simulate_priors)) { priors <- simulate_prior( model, effects = effects, component = component, parameters = parameters ) } else { priors <- .update_to_priors(model, verbose = FALSE) priors <- insight::get_parameters( priors, effects = effects, component = component, parameters = parameters ) } .check_prior(priors, posteriors, method) } #' @export check_prior.stanreg <- check_prior.brmsfit #' @importFrom stats sd #' @keywords internal .check_prior <- function(priors, posteriors, method = "gelman") { .gelman <- function(prior, posterior){ if (stats::sd(posterior) > 0.1 * stats::sd(prior)) { "informative" } else { "uninformative" } } .lakeland <- function(prior, posterior){ hdi <- hdi(prior, ci = .95) r <- rope(posterior, ci = 1, range = c(hdi$CI_low, hdi$CI_high)) if (as.numeric(r) > 0.99) { "informative" } else { "misinformative" } } if (method == "gelman") { result <- mapply(.gelman, priors, posteriors) } else if (method == "lakeland") { result <- mapply(.lakeland, priors, posteriors) } else { stop("method should be 'gelman' or 'lakeland'.") } data.frame( Parameter = names(posteriors), Prior_Quality = unname(result), stringsAsFactors = FALSE ) } bayestestR/R/sensitivity_to_prior.R0000644000176200001440000000716613613227664017256 0ustar liggesusers#' Sensitivity to Prior #' #' Computes the sensitivity to priors specification. This represents the proportion of change in some indices when the model is fitted with an antagonistic prior (a prior of same shape located on the opposite of the effect). #' #' @param model A Bayesian model (\code{stanreg} or \code{brmsfit}). #' @param index The indices from which to compute the sensitivity. Can be one or multiple names of the columns returned by \code{describe_posterior}. The case is important here (e.g., write 'Median' instead of 'median'). #' @param magnitude This represent the magnitude by which to shift the antagonistic prior (to test the sensitivity). For instance, a magnitude of 10 (default) means that the mode wil be updated with a prior located at 10 standard deviations from its original location. #' @param ... Arguments passed to or from other methods. #' #' @examples #' \dontrun{ #' library(bayestestR) #' #' # rstanarm models #' # ----------------------------------------------- #' if (require("rstanarm")) { #' model <- rstanarm::stan_glm(mpg ~ wt, data = mtcars) #' sensitivity_to_prior(model) #' #' model <- rstanarm::stan_glm(mpg ~ wt + cyl, data = mtcars) #' sensitivity_to_prior(model, index = c("Median", "MAP")) #' } #' #' # brms models #' # ----------------------------------------------- #' if (require("brms")) { #' model <- brms::brm(mpg ~ wt + cyl, data = mtcars) #' # sensitivity_to_prior(model) #' } #' } #' @importFrom stats update #' @seealso DescTools #' @export sensitivity_to_prior <- function(model, index = "Median", magnitude = 10, ...) { UseMethod("sensitivity_to_prior") } #' @export sensitivity_to_prior.stanreg <- function(model, index = "Median", magnitude = 10, ...) { # Original params <- .extract_parameters(model, index = index, ...) # Priors priors <- .extract_priors_rstanarm(model) new_priors <- .prior_new_location(prior = priors$prior, sign = sign(params$Median), magnitude = magnitude) model_updated <- stats::update(model, data = insight::get_data(model), prior = new_priors, refresh = 0) # New model params_updated <- .extract_parameters(model_updated, index = index, ...) # Compute index sensitivity <- abs(as.matrix(params_updated[-1]) - as.matrix(params[-1])) / abs(as.matrix(params[-1])) # Clean up sensitivity <- as.data.frame(sensitivity) names(sensitivity) <- paste0("Sensitivity_", names(params_updated)[-1]) sensitivity <- cbind(params_updated[1], sensitivity) row.names(sensitivity) <- NULL sensitivity } #' @keywords internal .extract_parameters <- function(model, index = "Median", ...) { # Handle BF test <- c("pd", "rope", "p_map") if (any(c("bf", "bayesfactor", "bayes_factor") %in% c(index))) { test <- c(test, "bf") } params <- suppressMessages(describe_posterior(model, centrality = "all", dispersion = TRUE, test = test, ...)) params <- params[params$Parameter != "(Intercept)", ] params[unique(c("Parameter", "Median", index))] } #' Set a new location for a prior #' @keywords internal .prior_new_location <- function(prior, sign, magnitude = 10) { prior$location <- -1 * sign * magnitude * prior$scale prior } #' Extract and Returns the priors formatted for rstanarm #' @keywords internal .extract_priors_rstanarm <- function(model, ...) { priors <- rstanarm::prior_summary(model) # Deal with adjusted scale if (!is.null(priors$prior$adjusted_scale)) { priors$prior$scale <- priors$prior$adjusted_scale priors$prior$adjusted_scale <- NULL } priors$prior$autoscale <- FALSE priors } bayestestR/NEWS.md0000644000176200001440000001641313607554753013521 0ustar liggesusers# bayestestR 0.5.0 ## General - Added `p_pointnull()` as an alias to `p_MAP()`. - Added `si()` function to compute support intervals. - Added `weighted_posteriors()` for generating posterior samples averaged across models. - Added `plot()`-method for `p_significance()`. - `p_significance()` now also works for *brmsfit*-objects. - `estimate_density()` now also works for *MCMCglmm*-objects. - `equivalence_test()` gets `effects` and `component` arguments for *stanreg* and *brmsfit* models, to print specific model components. - Support for *mcmc* objects (package **coda**) - Provide more distributions via `distribution()`. - Added `distribution_tweedie()`. - Better handling of `stanmvreg` models for `describe_posterior()`, `diagnostic_posterior()` and `describe_prior()`. ## Breaking changes - `point_estimate()`: argument `centrality` default value changed from 'median' to 'all'. - `p_rope()`, previously as exploratory index, was renamed as `mhdior()` (for *Max HDI inside/outside ROPE*), as `p_rope()` will refer to `rope(..., ci = 1)` ( #258 ) ## Bug fixes - Fixed mistake in description of `p_significance()`. - Fixed error when computing BFs with `emmGrid` based on some non-linear models ( #260 ). - Fixed wrong output for percentage-values in `print.equivalence_test()`. - Fixed issue in `describe_posterior()` for `BFBayesFactor`-objects with more than one model. # bayestestR 0.4.0 ## New functions / features - `convert_bayesian_to_frequentist()` Convert (refit) Bayesian model as frequentist - `distribution_binomial()` for perfect binomial distributions - `simulate_ttest()` Simulate data with a mean difference - `simulate_correlation()` Simulate correlated datasets - `p_significance()` Compute the probability of Practical Significance (ps) - `overlap()` Compute overlap between two empirical distributions - `estimate_density()`: `method = "mixture"` argument added for mixture density estimation ## Bug fixes - Fixed bug in `simulate_prior()` for stanreg-models when `autoscale` was set to `FALSE` # bayestestR 0.3.0 ## General - revised `print()`-methods for functions like `rope()`, `p_direction()`, `describe_posterior()` etc., in particular for model objects with random effects and/or zero-inflation component ## New functions / features - `check_prior()` to check if prior is informative - `simulate_prior()` to simulate model's priors as distributions - `distribution_gamma()` to generate a (near-perfect or random) Gamma distribution - `contr.bayes` function for orthogonal factor coding (implementation from Singmann & Gronau's [`bfrms`](https://github.com/bayesstuff/bfrms/), used for proper prior estimation when factor have 3 levels or more. See Bayes factor vignette ## Changes to functions - Added support for `sim`, `sim.merMod` (from `arm::sim()`) and `MCMCglmm`-objects to many functions (like `hdi()`, `ci()`, `eti()`, `rope()`, `p_direction()`, `point_estimate()`, ...) - `describe_posterior()` gets an `effects` and `component` argument, to include the description of posterior samples from random effects and/or zero-inflation component. - More user-friendly warning for non-supported models in `bayesfactor()`-methods ## Bug fixes - Fixed bug in `bayesfactor_inclusion()` where the same interaction sometimes appeared more than once (#223) - Fixed bug in `describe_posterior()` for *stanreg* models fitted with fullrank-algorithm # bayestestR 0.2.5 ## Breaking changes - `rope_range()` for binomial model has now a different default (-.18; .18 ; instead of -.055; .055) - `rope()`: returns a proportion (between 0 and 1) instead of a value between 0 and 100 - `p_direction()`: returns a proportion (between 0.5 and 1) instead of a value between 50 and 100 ([#168](https://github.com/easystats/bayestestR/issues/168)) - `bayesfactor_savagedickey()`: `hypothesis` argument replaced by `null` as part of the new `bayesfactor_parameters()` function ## New functions / features - `density_at()`, `p_map()` and `map_estimate()`: `method` argument added - `rope()`: `ci_method` argument added - `eti()`: Computes equal-tailed intervals - `reshape_ci()`: Reshape CIs between wide/long - `bayesfactor_parameters()`: New function, replacing `bayesfactor_savagedickey()`, allows for computing Bayes factors against a *point-null* or an *interval-null* - `bayesfactor_restricted()`: Function for computing Bayes factors for order restricted models ## Minor changes ## Bug fixes - `bayesfactor_inclusion()` now works with `R < 3.6`. # bayestestR 0.2.2 ## Breaking changes - `equivalence_test()`: returns capitalized output (e.g., `Rejected` instead of `rejected`) - `describe_posterior.numeric()`: `dispersion` defaults to `FALSE` for consistency with the other methods ## New functions / features - `pd_to_p()` and `p_to_pd()`: Functions to convert between probability of direction (pd) and p-value - Support of `emmGrid` objects: `ci()`, `rope()`, `bayesfactor_savagedickey()`, `describe_posterior()`, ... ## Minor changes - Improved tutorial 2 ## Bug fixes - `describe_posterior()`: Fixed column order restoration - `bayesfactor_inclusion()`: Inclusion BFs for matched models are more inline with JASP results. # bayestestR 0.2.0 ## Breaking changes - plotting functions now require the installation of the `see` package - `estimate` argument name in `describe_posterior()` and `point_estimate()` changed to `centrality` - `hdi()`, `ci()`, `rope()` and `equivalence_test()` default `ci` to `0.89` - `rnorm_perfect()` deprecated in favour of `distribution_normal()` - `map_estimate()` now returns a single value instead of a dataframe and the `density` parameter has been removed. The MAP density value is now accessible via `attributes(map_output)$MAP_density` ## New functions / features - `describe_posterior()`, `describe_prior()`, `diagnostic_posterior()`: added wrapper function - `point_estimate()` added function to compute point estimates - `p_direction()`: new argument `method` to compute pd based on AUC - `area_under_curve()`: compute AUC - `distribution()` functions have been added - `bayesfactor_savagedickey()`, `bayesfactor_models()` and `bayesfactor_inclusion()` functions has been added - Started adding plotting methods (currently in the [`see`](https://github.com/easystats/see) package) for `p_direction()` and `hdi()` - `probability_at()` as alias for `density_at()` - `effective_sample()` to return the effective sample size of Stan-models - `mcse()` to return the Monte Carlo standard error of Stan-models ## Minor changes - Improved documentation - Improved testing - `p_direction()`: improved printing - `rope()` for model-objects now returns the HDI values for all parameters as attribute in a consistent way - Changes legend-labels in `plot.equivalence_test()` to align plots with the output of the `print()`-method (#78) ## Bug fixes - `hdi()` returned multiple class attributes (#72) - Printing results from `hdi()` failed when `ci`-argument had fractional parts for percentage values (e.g. `ci = .995`). - `plot.equivalence_test()` did not work properly for *brms*-models (#76). # bayestestR 0.1.0 - CRAN initial publication and [0.1.0 release](https://github.com/easystats/bayestestR/releases/tag/v0.1.0) - Added a `NEWS.md` file to track changes to the package bayestestR/MD50000644000176200001440000002640013620704270012712 0ustar liggesusers5114c771a439319c26c0e20442f82140 *DESCRIPTION d72227c499e120fc5d2bb98a5b94cf1d *NAMESPACE f746c6c199610eb7a0b36e86d19f1a59 *NEWS.md ca56af3c957ced1735e52517a509016b *R/area_under_curve.R db93bc5ed459198affbe7374dca775ac *R/bayesfactor.R c618f984c8fb21d361a7b5e9bb255298 *R/bayesfactor_inclusion.R 51502635787d3a1a4c6d0be56e79bb73 *R/bayesfactor_models.R d19ecca96ae816787c901e61fcdb8712 *R/bayesfactor_parameters.R ba6718d5e8e36fb7e8f77744d618f8c5 *R/bayesfactor_restricted.R 21826f07d00587f303866b095fc864b1 *R/check_prior.R 4d390c4b1e45454618bcfec3c9da0b2f *R/ci.R 0ea5cee4b4b4f79cbdd5d3b810d1bb00 *R/contr.bayes.R 07189f15d11ce54bb97d6d8f864a972c *R/convert_bayesian_to_frequentist.R 7eb0c19b2e00c6460c7969f78026c0ff *R/convert_pd_to_p.R c9c0dee245a463590e5b673742e06c41 *R/describe_posterior.R 87b7aa3a78a826a4ce49c522f5f1c86d *R/describe_prior.R e41d5027528ad30276fd5e61d1dfc9eb *R/diagnostic_posterior.R b39dd6d20e5479a801395e05c04bfec5 *R/distribution.R fd33bf9d622d4d5ae449a2853c72095b *R/effective_sample.R cdea20f09652c2e49f44d5814b4a9c40 *R/equivalence_test.R f610fbfe78810534b99ecffe09469079 *R/estimate_density.R ae9dd19f4f1d7e72d64f97329fddb173 *R/eti.R 25b3a39f8a50d84faad45033355885db *R/hdi.R d951a6fff9e2fc9245b26e212cf7d4b6 *R/map_estimate.R b881e76d759595d3c97e81b9a633b57a *R/mcse.R a46a78ed68c9d3efa302d318e1c3c5ee *R/mhdior.R 56be2b240227802596fe2067aabe7b6f *R/overlap.R 6a90570fce2492a0398d26017371c0fd *R/p_direction.R cc6a9e8a507f24fe9ce53dd6f2738c5a *R/p_map.R 4dc19d7ee86ef59bbd2f0fd599704e39 *R/p_rope.R f28d013542dda4e904859b226007f153 *R/p_significance.R 84bcbd41bb168c4773ad4efdcf3ec817 *R/plot.R 092b73f77351993908e99878e3d6e00f *R/point_estimate.R 3b62236760eace27905ed3510c68d2ee *R/print.bayesfactor_inclusion.R ed91f16a4588c6a1b63b06bb4e5b23f6 *R/print.bayesfactor_models.R 0806a5c638bf56ae9f487321f1c30e48 *R/print.bayesfactor_parameters.R f970536fa2efb434f005213cc1d03c9d *R/print.bayesfactor_restricted.R 02c2b1c8822c20cb8cf51dc327acdff6 *R/print.ci.R 30ad3c4e79222b4b370200d3cd277108 *R/print.describe_posterior.R 932cf983e6712d7cc32ce2771ec34b5f *R/print.equivalence_test.R 79a2a99f4da5f9a76a88c4ed606f15a2 *R/print.mhdior.R 69c3092fec984606501ff859a97ce8f8 *R/print.p_direction.R 77a9f004b4a9dd838c4ba0a52d75c938 *R/print.p_map.R 0a5289c69989d46324e9ef5be75e2e64 *R/print.p_rope.R f9722525c3ee7608a9de762b9fe94376 *R/print.p_significance.R e8bc9de7ab87e0156dd95da972f2f48e *R/print.point_estimate.R 71a82835d7b7a9ff9a54a7a018323053 *R/print.rope.R a27b23f7d1275fec344cce60238dbb16 *R/reshape_ci.R 1c06e8b7b1c944013d723d49ea296f1b *R/rope.R bb2e4e61fe93cbb78eaec39b9c7e08f1 *R/rope_range.R 8d4720d7249924612d63b111039b6cf9 *R/sensitivity_to_prior.R feb6781aaa68c86c691d469a151f6742 *R/si.R 4669514a8081926425f760f8078d25f5 *R/simulate_data.R 8c6409b005a15fb643f156b9d1ee75a8 *R/simulate_priors.R c84483dfdaaab5ee29f480e0b64d3cda *R/update.bayesfactor_models.R 0cfba66d6279a5e4901c7302b96d8924 *R/utils.R 9738bd876735dc74345f449d900d418c *R/utils_bayesfactor.R e7bfa20fa35fef1b27162131adb387ac *R/utils_check_collinearity.R 8117073ed9981ca4f4cc32780cbeffaf *R/utils_clean_stan_parameters.R 60064bd2553a426d55406ae563c84424 *R/utils_flatten_list.R 55c28c23ca4a702aaaacaaa140659fdb *R/utils_get_parameter_names.R a01330d9bb13569b022e79a32fd9bfe3 *R/utils_hdi_ci.R d78729909b1e92105894be7d55b49e5f *R/utils_print_data_frame.R 19b93abc36c21f36a7d0ed5c15b67b77 *R/weighted_posteriors.R f9ee060317feab1aa2ba9fc23ed8e7e2 *README.md 2b08001925808cced136f931cd6fa103 *build/partial.rdb 4569e8a78b060c698932cb9f7dc5719a *build/vignette.rds c5cfd3e44877e6f8487a7d57c28dd0e2 *inst/CITATION 4c98584ffc6bba923b03c3ca757ed61d *inst/doc/bayes_factors.R 523dca3aff9ff8b51330518b3a69c078 *inst/doc/bayes_factors.Rmd fbefdd76a52c31ca06b745e9c311998a *inst/doc/bayes_factors.html 95f74a856454df7ddcfc05957c8830c3 *inst/doc/bayestestR.R 4c495c74c9dfda50f57d8ff10c5b4810 *inst/doc/bayestestR.Rmd c6d210f24b98fbef4c23629432832598 *inst/doc/bayestestR.html 8dcde27cd74dd7d3ae8c3def81ba7085 *inst/doc/credible_interval.R eda02965645efba7d9313ebf5f12d809 *inst/doc/credible_interval.Rmd 4c002207f636009e164b7f33a6a0602f *inst/doc/credible_interval.html ef1b6ba8df5bd550ad0b45e875cdb829 *inst/doc/example1.R 130c757919048db66f66a7eefae98d0c *inst/doc/example1.Rmd 1da97934f2fd1192e38d43c7aeb55840 *inst/doc/example1.html c2ddcb67ca775d8d17fe7102599daedd *inst/doc/example2.R 045d30e850ca04b1662ea1ea300990cf *inst/doc/example2.Rmd 2c5c956c32ee1c527239421704a8cc65 *inst/doc/example2.html d387d006344f6ae4277c9ea06ae0f9ca *inst/doc/example3.R b43bb38c1720e7e78fe61e7189d09934 *inst/doc/example3.Rmd 6b9ba3ea1ab19ce7dde39b32727a4afd *inst/doc/example3.html 921edef273c58a7c01e71085fc1c52d9 *inst/doc/guidelines.R a395b229544cc1d2f161fac6e095fe9f *inst/doc/guidelines.Rmd ec11935a3b1a4eafae661e656644fd4a *inst/doc/guidelines.html d16525a4eab5ab74cc611ce54632a3de *inst/doc/indicesEstimationComparison.R cc359f0d11e4269c6cf2ba65d1273971 *inst/doc/indicesEstimationComparison.Rmd dc978b6f385aa41307ab613870e6cfbc *inst/doc/indicesEstimationComparison.html e6a69c340f408a30999dd4f52fac64ae *inst/doc/indicesExistenceComparison.R db6e8513b63aa5e8167f984909a4218b *inst/doc/indicesExistenceComparison.Rmd 29ec664f74f9f3cc0979f7e61c3f54fd *inst/doc/indicesExistenceComparison.html 5ef206313432216cc1f6223366871307 *inst/doc/probability_of_direction.R 55be2932523c0c4c5f383f4ba29eb564 *inst/doc/probability_of_direction.Rmd bcf3a90c461984651fb65dd01a01b2ce *inst/doc/probability_of_direction.html b1f7bb720f540c1c1e40bd82a57bd779 *inst/doc/region_of_practical_equivalence.R 7b45bcd527d54347434f7de9a2278991 *inst/doc/region_of_practical_equivalence.Rmd 860fa5adb11c0d1914d8ee19c1a98184 *inst/doc/region_of_practical_equivalence.html 40487b1288ed77673574855fdefe53b3 *man/area_under_curve.Rd b6bc8514c89ddb09e19bca7ea1a4e6a0 *man/as.data.frame.density.Rd 96a01e9c417b3f29dfd0716449ce3c27 *man/as.numeric.p_direction.Rd d2df43126e5f3b85449d2d0862d5b7d7 *man/bayesfactor.Rd 0c918606b8b637435d2e082dacc3950c *man/bayesfactor_inclusion.Rd dd02a5ba876eab56e553e9ee5de7f34d *man/bayesfactor_models.Rd a3763fe3328a344ea5797182d620b173 *man/bayesfactor_parameters.Rd 2b366a73056afc86a8a6d054c348979a *man/bayesfactor_restricted.Rd 430764a991e7889239df6592745d6502 *man/check_prior.Rd 8a20f60bb9889803df5c9277e25829e2 *man/ci.Rd d72fcf0a6f201fd0404b706e34457e2b *man/contr.bayes.Rd 5b41bb435ba8cb8929d81839fb3d4b6f *man/convert_bayesian_as_frequentist.Rd 56f6e1bd1044ea63eb4a6184d727e872 *man/density_at.Rd de06dd6fb62339dce0fd5f4f13b0c0ff *man/describe_posterior.Rd 1f3f80c6a2e42e6c7b97459242227dc3 *man/describe_prior.Rd 013fc2f7e76e53aa62b3f770c6101ce1 *man/diagnostic_posterior.Rd 10aa96b7ed1273dcebf5f2afa0255f6a *man/distribution.Rd bcc88b0337e119b612423920b136f682 *man/dot-extract_priors_rstanarm.Rd 516cdac388adcb1b61a7bf82789b7923 *man/dot-flatten_list.Rd e334b16d81e2e0e72855f616fa2ec951 *man/dot-prior_new_location.Rd bfdba578d513979f5baf71bc2ceeb026 *man/dot-select_nums.Rd a289054ffd192e558dbd04d841fc047e *man/effective_sample.Rd 5c4fb8f9513fac9be64acf0916c306fe *man/equivalence_test.Rd 27b87354c100b41a8de804698e5e18a9 *man/estimate_density.Rd 00529e2b4c4583f65865d66272cf5ba4 *man/eti.Rd 1bbfecd97bea822e2c158547933eaaea *man/figures/logo.png 96074f930a1322a5f0a436fded65d861 *man/figures/unnamed-chunk-10-1.png dbeda87c3be41ff2ca6e40a1e7fe44bc *man/figures/unnamed-chunk-12-1.png af78ab292dc3629a045e89851d5bed40 *man/figures/unnamed-chunk-14-1.png 3bb01b2eb6f7768e76bb752a410d7cec *man/figures/unnamed-chunk-16-1.png 8cd68d6f81242a96a1e03da2354c8424 *man/figures/unnamed-chunk-5-1.png 990f77c7189bf99153ec5a5a690f8b47 *man/figures/unnamed-chunk-6-1.png 8cd68d6f81242a96a1e03da2354c8424 *man/figures/unnamed-chunk-7-1.png 990f77c7189bf99153ec5a5a690f8b47 *man/figures/unnamed-chunk-8-1.png 5834f9e1810eaad86eb09263ee7c9e2a *man/figures/unnamed-chunk-9-1.png 2a5d2d57dfe8d4230fa41519209a278e *man/hdi.Rd c460a3b3a0505dd23edeb6dd1d40227d *man/map_estimate.Rd b47acb5290373f3f9ffda480cbcb7158 *man/mcse.Rd e4fbc8ce8ffa37c4be6e560e65272ca0 *man/mhdior.Rd 35a0cd8ed06cfcbfda041a467330fe0f *man/overlap.Rd f78fa77029ac0a47418d3c4306957c4b *man/p_direction.Rd 0d43d687e64a039dba3e1ce927c9d12b *man/p_map.Rd aa0aeb751d65ca25531c52d3f511893c *man/p_rope.Rd c0da0197e39c7ae9ea95b89f04c7098b *man/p_significance.Rd 59aa741a7e5732380277d00195d0daaf *man/pd_to_p.Rd f4aa1fcb2868e4f897a97dfb661a6e78 *man/point_estimate.Rd 4ca96e6a7f2d43a154ef6801c9dc1b86 *man/reshape_ci.Rd fe4469708de1aa61c7f2368add344723 *man/rope.Rd 622faf12eb2acfe51a8192e99c2d4e6a *man/rope_range.Rd ce48412dc0edce650696196360f78baf *man/sensitivity_to_prior.Rd dbf943f2a66b3682cb943254827e41a9 *man/si.Rd 1a9a2564f6ef720437eda40666041fcd *man/simulate_correlation.Rd c23398c7e538b76587be2f8d1ea6b6d3 *man/simulate_prior.Rd 35926fb0bcd299a0f01bd32d68598b5f *man/update.bayesfactor_models.Rd 5c8e2de44a53df19a1f188b31c6e5e0b *man/weighted_posteriors.Rd ed019fb28c42d301a471042302b2215d *tests/testthat.R dee6aa3e94316574a6a77f844bfc6676 *tests/testthat/test-BFBayesFactor.R 3904ff21b671aa6c9a5b77a1db4d826b *tests/testthat/test-as.data.frame.density.R 696787de86fcc245298d4e3990506336 *tests/testthat/test-bayesfactor_models.R 56bc34f7bb13a03d26258b646aafa4a2 *tests/testthat/test-bayesfactor_parameters.R 6a13382440e4acb2b43472f659e0ab68 *tests/testthat/test-bayesfactor_restricted.R b0aae3f7f684905f1beb1dadeb668565 *tests/testthat/test-brms.R 8b9a176073fa03b8ae7b27dbe2062bbf *tests/testthat/test-ci.R 38e49cb862d98ffd9c63525870299451 *tests/testthat/test-density_at.R 223678d962c41ef869e7d3e26bb8bc9b *tests/testthat/test-describe_posterior.R c4f42d4a537c0f018a13b374ac54f28a *tests/testthat/test-distributions.R 42c3c7f54dd6c8494008ef22bbee10e2 *tests/testthat/test-emmGrid.R f4fd55174bb6f0e88349a8e952795228 *tests/testthat/test-estimate_density.R 1fe6531ff974a4e1f707db5b6274686e *tests/testthat/test-hdi.R 1bacf7a241c04c13a1d7c246c4db137c *tests/testthat/test-map_estimate.R 29f50586b6cd06328dbe998a608dd5c1 *tests/testthat/test-mhdior.R d0f9921d73117843d01b5039a0ea13ba *tests/testthat/test-overlap.R 81559d614e58fefa7b066001668f2ddc *tests/testthat/test-p_direction.R 1f4a180527bd7aab1076ee07d103a2c1 *tests/testthat/test-p_map.R 9e17c88c3917095a9ea28770fd8b3e35 *tests/testthat/test-p_significance.R da6a250677844141e0165c794850f9eb *tests/testthat/test-point_estimate.R 6aba88efc72ac70711d8224cde04a779 *tests/testthat/test-rope.R c26cf5d5a8c65ad12204e7d362ff31c5 *tests/testthat/test-rstanarm.R dfc5bab684f1da3046479f2af2bd8ac3 *tests/testthat/test-si.R 27ccb5acf8bd62a7595de68d64bf4ba1 *tests/testthat/test-simulate_data.R d30ca9906d26b753697791b1cfeab0f8 *tests/testthat/test-weighted_posteriors.R 9ca941f5f2faa90c7d7c0729e22bc376 *vignettes/apa.csl 523dca3aff9ff8b51330518b3a69c078 *vignettes/bayes_factors.Rmd 4c495c74c9dfda50f57d8ff10c5b4810 *vignettes/bayestestR.Rmd 6ea86da55c1e19e166fc89268fa45cef *vignettes/bibliography.bib eda02965645efba7d9313ebf5f12d809 *vignettes/credible_interval.Rmd 130c757919048db66f66a7eefae98d0c *vignettes/example1.Rmd 045d30e850ca04b1662ea1ea300990cf *vignettes/example2.Rmd b43bb38c1720e7e78fe61e7189d09934 *vignettes/example3.Rmd a395b229544cc1d2f161fac6e095fe9f *vignettes/guidelines.Rmd cc359f0d11e4269c6cf2ba65d1273971 *vignettes/indicesEstimationComparison.Rmd db6e8513b63aa5e8167f984909a4218b *vignettes/indicesExistenceComparison.Rmd 55be2932523c0c4c5f383f4ba29eb564 *vignettes/probability_of_direction.Rmd 7b45bcd527d54347434f7de9a2278991 *vignettes/region_of_practical_equivalence.Rmd bayestestR/inst/0000755000176200001440000000000013620150636013356 5ustar liggesusersbayestestR/inst/doc/0000755000176200001440000000000013620150636014123 5ustar liggesusersbayestestR/inst/doc/guidelines.html0000644000176200001440000051020313620150417017137 0ustar liggesusers

Reporting Guidelines

This vignette can be referred to by citing the package:

  • Makowski, D., Ben-Shachar, M. S., & Lüdecke, D. (2019). bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework. Journal of Open Source Software, 4(40), 1541. https://doi.org/10.21105/joss.01541
  • Makowski, D., Ben-Shachar, M. S., Chen, S. H. A., & Lüdecke, D. (2019). Indices of Effect Existence and Significance in the Bayesian Framework. Frontiers in Psychology 2019;10:2767. 10.3389/fpsyg.2019.02767

Reporting Guidelines

How to describe and report the parameters of a model

A Bayesian analysis returns a posterior distribution for each parameter (or effect). To minimally describe these distributions, we recommend reporting a point-estimate of centrality as well as information characterizing the estimation uncertainty (the dispersion). Additionally, one can also report indices of effect existence and/or significance.

Based on the previous comparison of point-estimates and indices of effect existence, we can draw the following recommendations.

Centrality

We suggest reporting the median as an index of centrality, as it is more robust compared to the mean or the MAP estimate. However, in case of severly skewed posterior distributions, the MAP estimate could be a good alternative.

Uncertainty

The 89% Credible Interval (CI) appears as a reasonable range to characterize the uncertainty related to the estimation, being more stable than higher thresholds (such as 90% and 95%). We also recommend computing the CI based on the HDI rather than quantiles, favouring probable, - over central - values.

Note that a CI based on the quantile (equal-tailed interval) might be more appropriate in case of transformations (for instance when transforming log-odds to probabilities). Otherwise, intervals that originally do not cover the null might cover it after transformation (see here).

Existence

Reviewer 2 (circa a long time ago in a galaxy far away).

Reviewer 2 (circa a long time ago in a galaxy far away).

The Bayesian framework can neatly delineate and quantify different aspects of hypothesis testing, such as effect existence and significance. The most straightforward index to describe effect existence is the Probability of Direction (pd), representing the certainty associated with the most probable direction (positive or negative) of the effect. This index is easy to understand, simple to interpret, straightforward to compute, robust to model characteristics and independent from the scale of the data.

Moreover, it is strongly correlated with the frequentist p-value, and can thus be used to draw parallels and give some reference to readers non-familiar with Bayesian statistics. A two-sided p-value of respectively .1, .05, .01 and .001 would correspond approximately to a pd of 95%, 97.5%, 99.5% and 99.95%. Thus, for convenience, we suggest the following reference values as an interpretation helpers:

  • pd <= 95% ~ p > .1: uncertain
  • pd > 95% ~ p < .1: possibly existing
  • pd > 97%: likely existing
  • pd > 99%: probably existing
  • pd > 99.9%: certainly existing

Significance

The percentage in ROPE is a index of significance (in its primary meaning), informing us whether a parameter is related - or not - to a non-negligible change (in terms of magnitude) in the outcome. We suggest reporting the percentage of the full posterior distribution (the full ROPE) instead of a given proportion of CI, in the ROPE, which appears as more sensitive (especially to delineate highly significant effects). Rather than using it as a binary, all-or-nothing decision criterion, such as suggested by the original equivalence test, we recommend using the percentage as a continuous index of significance. However, based on simulation data, we suggest the following reference values as an interpretation helpers:

  • > 99% in ROPE: negligible (we can accept the null hypothesis)
  • > 97.5% in ROPE: probably negligible
  • <= 97.5% & >= 2.5% in ROPE: undecided significance
  • < 2.5% in ROPE: probably significant
  • < 1% in ROPE: significant (we can reject the null hypothesis)

Note that extra caution is required as its interpretation highly depends on other parameters such as sample size and ROPE range (see here).

Template Sentence

Based on these suggestions, a template sentence for minimal reporting of a parameter based on its posterior distribution could be:

  • “the effect of X has a probability of pd of being negative (Median = median, 89% CI [ HDIlow , HDIhigh ] and can be considered as significant (ROPE% in ROPE).”

How to compare different models

Altough it can also be used to assess effect existence and signficance, the Bayes factor (BF) is a versatile index that can be used to directly compare different models (or data generation processes). The Bayes factor is a ratio, informing us by how much more (or less) likely the observed data are under two compared models - usually a model with an effect vs. a model without the effect. Depending on the specifications of the null model (whether it is a point-estimate (e.g., 0) or an interval), the Bayes factor could be used both in the context of effect existence and significance.

In general, a Bayes factor greater than 1 giving evidence in favour of one of the models, and a Bayes factor smaller than 1 giving evidence in favour of the other model. Several rules of thumb exist to help the interpretation (see here), with > 3 being one common treshold to categorize non-anecdotal evidence.

Template Sentence

When reporting Bayes factors (BF), one can use the following sentence:

  • “There is moderate evidence in favour of an absence of effect of x (BF = BF).”

Note: If you have any advice, opinion or such, we encourage you to let us know by opening an discussion thread or making a pull request.

bayestestR/inst/doc/example3.R0000644000176200001440000000070413620150415015760 0ustar liggesusers## ----message=FALSE, warning=FALSE, include=FALSE------------------------------ library(knitr) options(knitr.kable.NA = '') knitr::opts_chunk$set(comment=">") knitr::opts_chunk$set(dpi=150) options(digits=2) set.seed(333) ## ----echo=FALSE, fig.cap="Yoda Bayes (896 BBY - 4 ABY).", fig.align='center', out.width="80%"---- knitr::include_graphics("https://github.com/easystats/easystats/raw/master/man/figures/bayestestR/YodaBayes.jpg") bayestestR/inst/doc/example1.Rmd0000644000176200001440000004747013620150172016312 0ustar liggesusers--- title: "1. Initiation to Bayesian models" output: github_document: toc: true fig_width: 10.08 fig_height: 6 rmarkdown::html_vignette: toc: true fig_width: 10.08 fig_height: 6 tags: [r, bayesian, posterior, test] vignette: > \usepackage[utf8]{inputenc} %\VignetteIndexEntry{Example 1: Initiation to Bayesian models} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console bibliography: bibliography.bib csl: apa.csl --- This vignette can be referred to by citing the package: - Makowski, D., Ben-Shachar, M. S., \& Lüdecke, D. (2019). *bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework*. Journal of Open Source Software, 4(40), 1541. https://doi.org/10.21105/joss.01541 --- ```{r message=FALSE, warning=FALSE, include=FALSE} library(knitr) library(insight) options(knitr.kable.NA = '') knitr::opts_chunk$set(comment=">") options(digits=2) set.seed(333) if (!requireNamespace("rstanarm", quietly = TRUE) || !requireNamespace("dplyr", quietly = TRUE) || !requireNamespace("ggplot2", quietly = TRUE)) { knitr::opts_chunk$set(eval = FALSE) } format_percent <- function(x, digits = 0, ...) { paste0(format_value(x*100, digits = digits, ...), "%") } ``` Now that you've read the [**Get started**](https://easystats.github.io/bayestestR/articles/bayestestR.html) section, let's dive in the **subtleties of Bayesian modelling using R**. ## Loading the packages Once you've [installed](https://easystats.github.io/bayestestR/articles/bayestestR.html#bayestestr-installation) the necessary packages, we can load `rstanarm` (to fit the models), `bayestestR` (to compute useful indices) and `insight` (to access the parameters). ```{r message=FALSE, warning=FALSE} library(rstanarm) library(bayestestR) library(insight) ``` ## Simple linear model (*aka* a regression) We will begin by conducting a simple linear regression to test the relationship between `Petal.Length` (our predictor, or *independent*, variable) and `Sepal.Length` (our response, or *dependent*, variable) from the [`iris`](https://en.wikipedia.org/wiki/Iris_flower_data_set) dataset which is included by default in R. ### Fitting the model Let's start by fitting the **frequentist** version of the model, just to have a reference point: ```{r message=FALSE, warning=FALSE} model <- lm(Sepal.Length ~ Petal.Length, data=iris) summary(model) ``` In this model, the linear relationship between `Petal.Length` and `Sepal.Length` is **positive and significant** (beta = 0.41, *t*(148) = 21.6, *p* < .001). This means that for each one-unit increase in `Petal.Length` (the predictor), you can expect `Sepal.Length` (the response) to increase by **0.41**. This effect can be visualized by plotting the predictor values on the `x` axis and the response values as `y` using the `ggplot2` package: ```{r message=FALSE, warning=FALSE} library(ggplot2) # Load the package # The ggplot function takes the data as argument, and then the variables # related to aesthetic features such as the x and y axes. ggplot(iris, aes(x=Petal.Length, y=Sepal.Length)) + geom_point() + # This adds the points geom_smooth(method="lm") # This adds a regression line ``` Now let's fit a **Bayesian version** of the model by using the `stan_glm` function in the `rstanarm` package: ```{r message=FALSE, warning=FALSE, eval=FALSE} model <- stan_glm(Sepal.Length ~ Petal.Length, data=iris) ``` ```{r echo=FALSE, message=FALSE, warning=FALSE, comment=NA, results='hide'} library(rstanarm) set.seed(333) model <- stan_glm(Sepal.Length ~ Petal.Length, data=iris) ``` You can see the sampling algorithm being run. ### Extracting the posterior Once it is done, let us extract the parameters (*i.e.*, coefficients) of the model. ```{r message=FALSE, warning=FALSE, eval=FALSE} posteriors <- insight::get_parameters(model) head(posteriors) # Show the first 6 rows ``` ```{r message=FALSE, warning=FALSE, echo=FALSE} posteriors <- insight::get_parameters(model) head(posteriors) # Show the first 6 rows ``` As we can see, the parameters take the form of a lengthy dataframe with two columns, corresponding to the `intercept` and the effect of `Petal.Length`. These columns contain the **posterior distributions** of these two parameters. In simple terms, the posterior distribution is a set of different plausible values for each parameter. #### About posterior draws Let's look at the length of the posteriors. ```{r message=FALSE, warning=FALSE} nrow(posteriors) # Size (number of rows) ``` > **Why is the size 4000, and not more or less?** First of all, these observations (the rows) are usually referred to as **posterior draws**. The underlying idea is that the Bayesian sampling algorithm (*e.g.*, **Monte Carlo Markov Chains - MCMC**) will *draw* from the hidden true posterior distribution. Thus, it is through these posterior draws that we can estimate the underlying true posterior distribution. **Therefore, the more draws you have, the better your estimation of the posterior distribution**. However, increased draws also means longer computation time. If we look at the documentation (`?sampling`) for the rstanarm `"sampling"` algorithm used by default in the model above, we can see several parameters that influence the number of posterior draws. By default, there are **4** `chains` (you can see it as distinct sampling runs), that each create **2000** `iter` (draws). However, only half of these iterations are kept, as half are used for `warm-up` (the convergence of the algorithm). Thus, the total is **`4 chains * (2000 iterations - 1000 warm-up) = 4000`** posterior draws. We can change that, for instance: ```{r message=FALSE, warning=FALSE, eval=FALSE} model <- stan_glm(Sepal.Length ~ Petal.Length, data=iris, chains = 2, iter = 1000, warmup = 250) nrow(insight::get_parameters(model)) # Size (number of rows) ``` ```{r echo=FALSE, message=FALSE, warning=FALSE, comment=NA, echo=FALSE} model <- stan_glm(Sepal.Length ~ Petal.Length, data=iris, chains = 2, iter = 1000, warmup = 250, refresh = 0) nrow(insight::get_parameters(model)) # Size (number of rows) ``` In this case, as would be expected, we have **`2 chains * (1000 iterations - 250 warm-up) = 1500`** posterior draws. But let's keep our first model with the default setup (as it has more draws). #### Visualizing the posterior distribution Now that we've understood where these values come from, let's look at them. We will start by visualizing the posterior distribution of our parameter of interest, the effect of `Petal.Length`. ```{r message=FALSE, warning=FALSE} ggplot(posteriors, aes(x = Petal.Length)) + geom_density(fill = "orange") ``` This distribution represents the [probability](https://en.wikipedia.org/wiki/Probability_density_function) (the y axis) of different effects (the x axis). The central values are more probable than the extreme values. As you can see, this distribution ranges from about **0.35 to 0.50**, with the bulk of it being at around **0.41**. > **Congrats! You've just described your posterior distribution.** And this is at the heart of Bayesian analysis. We don't need *p*-values, *t*-values or degrees of freedom: **everything is there**, within this posterior distribution. Our description above is consistent with the values obtained from the frequentist regression (which resulted in a beta of **0.41**). This is reassuring! Indeed, **in most cases a Bayesian analysis does not drastically change the results** or their interpretation. Rather, it makes the results more interpretable and intuitive, and easier to understand and describe. We can now go ahead and **precisely characterize** this posterior distribution. ### Describing the Posterior Unfortunately, it is often not practical to report the whole posterior distributions as graphs. We need to find a **concise way to summarize it**. We recommend to describe the posterior distribution with **3 elements**: 1. A **point-estimate** which is a one-value summary (similar to the *beta* in frequentist regressions). 2. A **credible interval** representing the associated uncertainty. 3. Some **indices of significance**, giving information about the relative importance of this effect. #### Point-estimate **What single value can best represent my posterior distribution?** Centrality indices, such as the *mean*, the *median* or the *mode* are usually used as point-estimates - but what's the difference between them? Let's answer this by first inspecting the **mean**: ```{r message=FALSE, warning=FALSE} mean(posteriors$Petal.Length) ``` This is close to the frequentist beta. But as we know, the mean is quite sensitive to outliers or extremes values. Maybe the **median** could be more robust? ```{r message=FALSE, warning=FALSE} median(posteriors$Petal.Length) ``` Well, this is **very close to the mean** (and identical when rounding the values). Maybe we could take the **mode**, that is, the *peak* of the posterior distribution? In the Bayesian framework, this value is called the **Maximum A Posteriori (MAP)**. Let's see: ```{r message=FALSE, warning=FALSE} map_estimate(posteriors$Petal.Length) ``` **They are all very close!** Let's visualize these values on the posterior distribution: ```{r message=FALSE, warning=FALSE} ggplot(posteriors, aes(x = Petal.Length)) + geom_density(fill = "orange") + # The mean in blue geom_vline(xintercept=mean(posteriors$Petal.Length), color="blue", size=1) + # The median in red geom_vline(xintercept=median(posteriors$Petal.Length), color="red", size=1) + # The MAP in purple geom_vline(xintercept=map_estimate(posteriors$Petal.Length), color="purple", size=1) ``` Well, all these values give very similar results. Thus, **we will choose the median**, as this value has a direct meaning from a probabilistic perspective: **there is 50\% chance that the true effect is higher and 50\% chance that the effect is lower** (as it divides the distribution in two equal parts). #### Uncertainty Now that the have a point-estimate, we have to **describe the uncertainty**. We could compute the range: ```{r message=FALSE, warning=FALSE} range(posteriors$Petal.Length) ``` But does it make sense to include all these extreme values? Probably not. Thus, we will compute a [**credible interval**](https://easystats.github.io/bayestestR/articles/credible_interval.html). Long story short, it's kind of similar to a frequentist **confidence interval**, but easier to interpret and easier to compute — *and it makes more sense*. We will compute this **credible interval** based on the [Highest Density Interval (HDI)](https://easystats.github.io/bayestestR/articles/credible_interval.html#different-types-of-cis). It will give us the range containing the 89\% most probable effect values. **Note that we will use 89\% CIs instead of 95\%** CIs (as in the frequentist framework), as the 89\% level gives more [stable results](https://easystats.github.io/bayestestR/articles/credible_interval.html#why-is-the-default-89) [@kruschke2014doing] and reminds us about the arbitrarity of such conventions [@mcelreath2018statistical]. ```{r message=FALSE, warning=FALSE} hdi(posteriors$Petal.Length, ci=0.89) ``` Nice, so we can conclude that **the effect has 89\% chance of falling within the `[0.38, 0.44]` range**. We have just computed the two most important pieces of information for describing our effects. #### Effect significance However, in many scientific fields it not sufficient to simply describe the effects. Scientists also want to know if this effect has significance in practical or statistical terms, or in other words, whether the effect is important. For instnace, is the effect different from 0? So how do we **assess the *significance* of an effect**. How can we do this? Well, in this particular case, it is very eloquent: **all possible effect values (*i.e.*, the whole posterior distribution) are positive and over 0.35, which is already substantial evidence the effect is not zero**. But still, we want some objective decision criterion, to say if **yes or no the effect is 'significant'**. One approach, similar to the frequentist framework, would be to see if the **Credible Interval** contains 0. If it is not the case, that would mean that our **effect is 'significant'**. But this index is not very fine-grained, isn't it? **Can we do better? Yes.** ## A linear model with a categorical predictor Imagine for a moment you are interested in how the weight of chickens varies depending on two different **feed types**. For this exampe, we will start by selecting from the `chickwts` dataset (available in base R) two feed types of interest for us (*we do have peculiar interests*): **meat meals** and **sunflowers**. ### Data preparation and model fitting ```{r message=FALSE, warning=FALSE} library(dplyr) # We keep only rows for which feed is meatmeal or sunflower data <- chickwts %>% filter(feed %in% c("meatmeal", "sunflower")) ``` Let's run another Bayesian regression to predict the **weight** with the **two types of feed type**. ```{r message=FALSE, warning=FALSE, eval=FALSE} model <- stan_glm(weight ~ feed, data=data) ``` ```{r echo=FALSE, message=FALSE, warning=FALSE, comment=NA, results='hide'} model <- stan_glm(weight ~ feed, data=data) ``` ### Posterior description ```{r message=FALSE, warning=FALSE} posteriors <- insight::get_parameters(model) ggplot(posteriors, aes(x=feedsunflower)) + geom_density(fill = "red") ``` This represents the **posterior distribution of the difference between `meatmeal` and `sunflowers`**. Seems that the difference is rather **positive** (the values seems concentrated on the right side of 0)... Eating sunflowers makes you more fat (*at least, if you're a chicken*). But, **by how much?** Let us compute the **median** and the **CI**: ```{r message=FALSE, warning=FALSE} median(posteriors$feedsunflower) hdi(posteriors$feedsunflower) ``` It makes you fat by around 51 grams (the median). However, the uncertainty is quite high: **there is 89\% chance that the difference between the two feed types is between 14 and 91.** > **Is this effect different from 0?** ### ROPE Percentage Testing whether this distribution is different from 0 doesn't make sense, as 0 is a single value (*and the probability that any distribution is different from a single value is infinite*). However, one way to assess **significance** could be to define an area around 0, which will consider as *practically equivalent* to zero (*i.e.*, absence of, or negligible, effect). This is called the [**Region of Practical Equivalence (ROPE)**](https://easystats.github.io/bayestestR/articles/region_of_practical_equivalence.html), and is one way of testing the significance of parameters. **How can we define this region?** > ***Driing driiiing*** -- ***The easystats team speaking. How can we help?*** -- ***I am Prof. Sanders. An expert in chicks... I mean chickens. Just calling to let you know that based on my expert knowledge, an effect between -20 and 20 is negligible. Bye.*** Well, that's convenient. Now we know that we can define the ROPE as the `[-20, 20]` range. All effects within this range are considered as *null* (negligible). We can now compute the **proportion of the 89\% most probable values (the 89\% CI) which are not null**, *i.e.*, which are outside this range. ```{r message=FALSE, warning=FALSE} rope(posteriors$feedsunflower, range = c(-20, 20), ci=0.89) ``` **5\% of the 89\% CI can be considered as null**. Is that a lot? Based on our [**guidelines**](https://easystats.github.io/bayestestR/articles/guidelines.html), yes, it is too much. **Based on this particular definition of ROPE**, we conclude that this effect is not significant (the probability of being negligible is too high). Although, to be honest, I have **some doubts about this Prof. Sanders**. I don't really trust **his definition of ROPE**. Is there a more **objective** way of defining it? ```{r echo=FALSE, fig.cap="Prof. Sanders giving default values to define the Region of Practical Equivalence (ROPE).", fig.align='center', out.width="75%"} knitr::include_graphics("https://github.com/easystats/easystats/raw/master/man/figures/bayestestR/profsanders.png") ``` **Yes.** One of the practice is for instance to use the **tenth (`1/10 = 0.1`) of the standard deviation (SD)** of the response variable, which can be considered as a "negligible" effect size [@cohen1988statistical]. ```{r message=FALSE, warning=FALSE} rope_value <- 0.1 * sd(data$weight) rope_range <- c(-rope_value, rope_value) rope_range ``` Let's redefine our ROPE as the region within the `[-6.2, 6.2]` range. **Note that this can be directly obtained by the `rope_range` function :)** ```{r message=FALSE, warning=FALSE} rope_value <- rope_range(model) rope_range ``` Let's recompute the **percentage in ROPE**: ```{r message=FALSE, warning=FALSE} rope(posteriors$feedsunflower, range = rope_range, ci=0.89) ``` With this reasonable definition of ROPE, we observe that the 89\% of the posterior distribution of the effect does **not** overlap with the ROPE. Thus, we can conclude that **the effect is significant** (in the sense of *important* enough to be noted). ### Probability of Direction (pd) Maybe we are not interested in whether the effect is non-negligible. Maybe **we just want to know if this effect is positive or negative**. In this case, we can simply compute the proportion of the posterior that is positive, no matter the "size" of the effect. ```{r message=FALSE, warning=FALSE} n_positive <- posteriors %>% filter(feedsunflower > 0) %>% # select only positive values nrow() # Get length n_positive / nrow(posteriors) * 100 ``` We can conclude that **the effect is positive with a probability of 98\%**. We call this index the [**Probability of Direction (pd)**](https://easystats.github.io/bayestestR/articles/probability_of_direction.html). It can, in fact, be computed more easily with the following: ```{r message=FALSE, warning=FALSE} p_direction(posteriors$feedsunflower) ``` Interestingly, it so happens that **this index is usually highly correlated with the frequentist *p*-value**. We could almost roughly infer the corresponding *p*-value with a simple transformation: ```{r message=FALSE, warning=FALSE, eval=TRUE} pd <- 97.82 onesided_p <- 1 - pd / 100 twosided_p <- onesided_p * 2 twosided_p ``` If we ran our model in the frequentist framework, we should approximately observe an effect with a *p*-value of `r round(twosided_p, digits=3)`. **Is that true?** #### Comparison to frequentist ```{r message=FALSE, warning=FALSE} lm(weight ~ feed, data=data) %>% summary() ``` The frequentist model tells us that the difference is **positive and significant** (beta = 52, p = 0.04). **Although we arrived to a similar conclusion, the Bayesian framework allowed us to develop a more profound and intuitive understanding of our effect, and of the uncertainty of its estimation.** ## All with one function And yet, I agree, it was a bit **tedious** to extract and compute all the indices. **But what if I told you that we can do all of this, and more, with only one function?** > **Behold, `describe_posterior`!** This function computes all of the adored mentioned indices, and can be run directly on the model: ```{r message=FALSE, warning=FALSE} describe_posterior(model, test = c("p_direction","rope","bayesfactor")) ``` **Tada!** There we have it! The **median**, the **CI**, the **pd** and the **ROPE percentage**! Understanding and describing posterior distributions is just one aspect of Bayesian modelling... **Are you ready for more?** [**Click here**](https://easystats.github.io/bayestestR/articles/example2_GLM.html) to see the next example. ## References bayestestR/inst/doc/region_of_practical_equivalence.html0000644000176200001440000021552013620150636023370 0ustar liggesusers

Region of Practical Equivalence (ROPE)

This vignette can be referred to by citing the package:

  • Makowski, D., Ben-Shachar, M. S., & Lüdecke, D. (2019). bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework. Journal of Open Source Software, 4(40), 1541. https://doi.org/10.21105/joss.01541

What is the ROPE?

Unlike a frequentist approach, Bayesian inference is not based on statistical significance, where effects are tested against “zero”. Indeed, the Bayesian framework offers a probabilistic view of the parameters, allowing assessment of the uncertainty related to them. Thus, rather than concluding that an effect is present when it simply differs from zero, we would conclude that the probability of being outside a specific range that can be considered as “practically no effect” (i.e., a negligible magnitude) is sufficient. This range is called the region of practical equivalence (ROPE).

Indeed, statistically, the probability of a posterior distribution being different from 0 does not make much sense (the probability of it being different from a single point being infinite). Therefore, the idea underlining ROPE is to let the user define an area around the null value enclosing values that are equivalent to the null value for practical purposes (Kruschke, 2014, 2010; Kruschke, Aguinis, & Joo, 2012).

Equivalence Test

The ROPE, being a region corresponding to a “null” hypothesis, is used for the equivalence test, to test whether a parameter is significant (in the sense of important enough to be cared about). This test is usually based on the “HDI+ROPE decision rule” (Kruschke, 2014; Kruschke & Liddell, 2018) to check whether parameter values should be accepted or rejected against an explicitly formulated “null hypothesis” (i.e., a ROPE). In other words, it checks the percentage of Credible Interval (CI) that is the null region (the ROPE). If this percentage is sufficiently low, the null hypothesis is rejected. If this percentage is sufficiently high, the null hypothesis is accepted.

Credible interval in ROPE vs full posterior in ROPE

Using the ROPE and the HDI as Credible Interval, Kruschke (2018) suggests using the percentage of the 95% HDI that falls within the ROPE as a decision rule. However, as the 89% HDI is considered a better choice (Kruschke, 2014; McElreath, 2014, 2018), bayestestR provides by default the percentage of the 89% HDI that falls within the ROPE.

However, simulation studies data suggest that using the percentage of the full posterior distribution, instead of a CI, might be more sensitive (especially do delineate highly significant effects). Thus, we recommend that the user considers using the full ROPE percentage (by setting ci = 1), which will return the portion of the entire posterior distribution in the ROPE.

What percentage in ROPE to accept or to reject?

If the HDI is completely outside the ROPE, the “null hypothesis” for this parameter is “rejected”. If the ROPE completely covers the HDI, i.e., all most credible values of a parameter are inside the region of practical equivalence, the null hypothesis is accepted. Else, it’s unclear whether the null hypothesis should be accepted or rejected.

If the full ROPE is used (i.e., 100% of the HDI), then the null hypothesis is rejected or accepted if the percentage of the posterior within the ROPE is smaller than to 2.5% or greater than 97.5%. Desirable results are low proportions inside the ROPE (the closer to zero the better).

How to define the ROPE range?

Kruschke (2018) suggests that the ROPE could be set, by default, to a range from -0.1 to 0.1 of a standardized parameter (negligible effect size according to Cohen, 1988).

  • For linear models (lm), this can be generalised to: [[-0.1SD_{y}, 0.1SD_{y}]].
  • For logistic models, the parameters expressed in log odds ratio can be converted to standardized difference through the formula: [\sqrt{3}/\pi], resulting in a range of -0.055 to -0.055. For other models with binary outcome, it is strongly recommended to manually specify the rope argument. Currently, the same default is applied that for logistic models.
  • For t-tests, the standard deviation of the response is used, similarly to linear models (see above).
  • For correlations, -0.05, 0.05 is used, i.e., half the value of a negligible correlation as suggested by Cohen’s (1988) rules of thumb.
  • For all other models, -0.1, 0.1 is used to determine the ROPE limits, but it is strongly advised to specify it manually.

Sensitivity to parameter’s scale

It is important to consider the unit (i.e., the scale) of the predictors when using an index based on the ROPE, as the correct interpretation of the ROPE as representing a region of practical equivalence to zero is dependent on the scale of the predictors. Indeed, unlike other indices (such as the pd), the percentage in ROPE depend on the unit of its parameter. In other words, as the ROPE represents a fixed portion of the response’s scale, its proximity with a coefficient depends on the scale of the coefficient itself.

For instance, if we consider a simple regression growth ~ time, modelling the development of Wookies babies, a negligible change (the ROPE) is less than 54 cm. If our time variable is expressed in days, we will find that the coefficient (representing the growth by day) is of about 10 cm (the median of the posterior of the coefficient is 10). Which we would consider as negligible. However, if we decide to express the time variable in years, the coefficient will be scaled by this transformation (as it will now represent the growth by year). The coefficient will now be around 3550 cm (10 * 355), which we would now consider as significant.

> # Probability of Direction (pd)
> 
> Parameter   |      pd
> ---------------------
> (Intercept) | 100.00%
> Sepal.Width |  92.95%

> # Proportion of samples inside the ROPE [-0.08, 0.08]:
> 
> Parameter   | inside ROPE
> -------------------------
> (Intercept) |      0.00 %
> Sepal.Width |     15.95 %

We can see that the pd and the percentage in ROPE of the linear relationship between Sepal.Length and Sepal.Width are respectively of about 92.95% and 15.95%, corresponding to an uncertain and not significant effect. What happen if we scale our predictor?

> # Probability of Direction (pd)
> 
> Parameter          |      pd
> ----------------------------
> (Intercept)        | 100.00%
> Sepal.Width_scaled |  92.95%

> # Proportion of samples inside the ROPE [-0.08, 0.08]:
> 
> Parameter          | inside ROPE
> --------------------------------
> (Intercept)        |      0.00 %
> Sepal.Width_scaled |      0.15 %

As you can see, by simply dividing the predictor by 100, we drastically changed the conclusion related to the percentage in ROPE (which became very close to 0): the effect could now be interpreted as being significant. Thus, we recommend paying close attention to the unit of the predictors when selecting the ROPE range (e.g., what coefficient would correspond to a small effect?), and when reporting or reading ROPE results.

Multicollinearity: Non-independent covariates

When parameters show strong correlations, i.e., when covariates are not independent, the joint parameter distributions may shift towards or away from the ROPE. Collinearity invalidates ROPE and hypothesis testing based on univariate marginals, as the probabilities are conditional on independence. Most problematic are parameters that only have partial overlap with the ROPE region. In case of collinearity, the (joint) distributions of these parameters may either get an increased or decreased ROPE, which means that inferences based on ROPE are inappropriate (Kruschke, 2014).

The equivalence_test() and rope() functions perform a simple check for pairwise correlations between parameters, but as there can be collinearity between more than two variables, a first step to check the assumptions of this hypothesis testing is to look at different pair plots. An even more sophisticated check is the projection predictive variable selection (Piironen & Vehtari, 2017).

Kruschke, J. (2014). Doing bayesian data analysis: A tutorial with r, jags, and stan. Academic Press.

Kruschke, J. K. (2010). What to believe: Bayesian methods for data analysis. Trends in Cognitive Sciences, 14(7), 293–300.

Kruschke, J. K., Aguinis, H., & Joo, H. (2012). The time has come: Bayesian methods for data analysis in the organizational sciences. Organizational Research Methods, 15(4), 722–752.

Kruschke, J. K., & Liddell, T. M. (2018). The bayesian new statistics: Hypothesis testing, estimation, meta-analysis, and power analysis from a bayesian perspective. Psychonomic Bulletin & Review, 25(1), 178–206.

McElreath, R. (2014). Rethinking: Statistical rethinking book package. R package version 1.391.

McElreath, R. (2018). Statistical rethinking: A bayesian course with examples in r and stan. Chapman; Hall/CRC.

Piironen, J., & Vehtari, A. (2017). Comparison of bayesian predictive methods for model selection. Statistics and Computing, 27(3), 711–735.

bayestestR/inst/doc/example1.html0000644000176200001440000103251313620150407016526 0ustar liggesusers

1. Initiation to Bayesian models

This vignette can be referred to by citing the package:

  • Makowski, D., Ben-Shachar, M. S., & Lüdecke, D. (2019). bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework. Journal of Open Source Software, 4(40), 1541. https://doi.org/10.21105/joss.01541

Now that you’ve read the Get started section, let’s dive in the subtleties of Bayesian modelling using R.

Loading the packages

Once you’ve installed the necessary packages, we can load rstanarm (to fit the models), bayestestR (to compute useful indices) and insight (to access the parameters).

Simple linear model (aka a regression)

We will begin by conducting a simple linear regression to test the relationship between Petal.Length (our predictor, or independent, variable) and Sepal.Length (our response, or dependent, variable) from the iris dataset which is included by default in R.

Fitting the model

Let’s start by fitting the frequentist version of the model, just to have a reference point:

> 
> Call:
> lm(formula = Sepal.Length ~ Petal.Length, data = iris)
> 
> Residuals:
>     Min      1Q  Median      3Q     Max 
> -1.2468 -0.2966 -0.0152  0.2768  1.0027 
> 
> Coefficients:
>              Estimate Std. Error t value Pr(>|t|)    
> (Intercept)    4.3066     0.0784    54.9   <2e-16 ***
> Petal.Length   0.4089     0.0189    21.6   <2e-16 ***
> ---
> Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
> 
> Residual standard error: 0.41 on 148 degrees of freedom
> Multiple R-squared:  0.76,    Adjusted R-squared:  0.758 
> F-statistic:  469 on 1 and 148 DF,  p-value: <2e-16

In this model, the linear relationship between Petal.Length and Sepal.Length is positive and significant (beta = 0.41, t(148) = 21.6, p < .001). This means that for each one-unit increase in Petal.Length (the predictor), you can expect Sepal.Length (the response) to increase by 0.41. This effect can be visualized by plotting the predictor values on the x axis and the response values as y using the ggplot2 package:

Now let’s fit a Bayesian version of the model by using the stan_glm function in the rstanarm package:

You can see the sampling algorithm being run.

Extracting the posterior

Once it is done, let us extract the parameters (i.e., coefficients) of the model.

>   (Intercept) Petal.Length
> 1         4.4         0.40
> 2         4.5         0.37
> 3         4.3         0.41
> 4         4.4         0.40
> 5         4.3         0.41
> 6         4.3         0.42

As we can see, the parameters take the form of a lengthy dataframe with two columns, corresponding to the intercept and the effect of Petal.Length. These columns contain the posterior distributions of these two parameters. In simple terms, the posterior distribution is a set of different plausible values for each parameter.

About posterior draws

Let’s look at the length of the posteriors.

> [1] 4000

Why is the size 4000, and not more or less?

First of all, these observations (the rows) are usually referred to as posterior draws. The underlying idea is that the Bayesian sampling algorithm (e.g., Monte Carlo Markov Chains - MCMC) will draw from the hidden true posterior distribution. Thus, it is through these posterior draws that we can estimate the underlying true posterior distribution. Therefore, the more draws you have, the better your estimation of the posterior distribution. However, increased draws also means longer computation time.

If we look at the documentation (?sampling) for the rstanarm "sampling" algorithm used by default in the model above, we can see several parameters that influence the number of posterior draws. By default, there are 4 chains (you can see it as distinct sampling runs), that each create 2000 iter (draws). However, only half of these iterations are kept, as half are used for warm-up (the convergence of the algorithm). Thus, the total is 4 chains * (2000 iterations - 1000 warm-up) = 4000 posterior draws. We can change that, for instance:

[1] 1500

In this case, as would be expected, we have 2 chains * (1000 iterations - 250 warm-up) = 1500 posterior draws. But let’s keep our first model with the default setup (as it has more draws).

Visualizing the posterior distribution

Now that we’ve understood where these values come from, let’s look at them. We will start by visualizing the posterior distribution of our parameter of interest, the effect of Petal.Length.

This distribution represents the probability (the y axis) of different effects (the x axis). The central values are more probable than the extreme values. As you can see, this distribution ranges from about 0.35 to 0.50, with the bulk of it being at around 0.41.

Congrats! You’ve just described your posterior distribution.

And this is at the heart of Bayesian analysis. We don’t need p-values, t-values or degrees of freedom: everything is there, within this posterior distribution.

Our description above is consistent with the values obtained from the frequentist regression (which resulted in a beta of 0.41). This is reassuring! Indeed, in most cases a Bayesian analysis does not drastically change the results or their interpretation. Rather, it makes the results more interpretable and intuitive, and easier to understand and describe.

We can now go ahead and precisely characterize this posterior distribution.

Describing the Posterior

Unfortunately, it is often not practical to report the whole posterior distributions as graphs. We need to find a concise way to summarize it. We recommend to describe the posterior distribution with 3 elements:

  1. A point-estimate which is a one-value summary (similar to the beta in frequentist regressions).
  2. A credible interval representing the associated uncertainty.
  3. Some indices of significance, giving information about the relative importance of this effect.

Point-estimate

What single value can best represent my posterior distribution?

Centrality indices, such as the mean, the median or the mode are usually used as point-estimates - but what’s the difference between them? Let’s answer this by first inspecting the mean:

> [1] 0.41

This is close to the frequentist beta. But as we know, the mean is quite sensitive to outliers or extremes values. Maybe the median could be more robust?

> [1] 0.41

Well, this is very close to the mean (and identical when rounding the values). Maybe we could take the mode, that is, the peak of the posterior distribution? In the Bayesian framework, this value is called the Maximum A Posteriori (MAP). Let’s see:

> MAP = 0.41

They are all very close! Let’s visualize these values on the posterior distribution:

Well, all these values give very similar results. Thus, we will choose the median, as this value has a direct meaning from a probabilistic perspective: there is 50% chance that the true effect is higher and 50% chance that the effect is lower (as it divides the distribution in two equal parts).

Uncertainty

Now that the have a point-estimate, we have to describe the uncertainty. We could compute the range:

> [1] 0.35 0.48

But does it make sense to include all these extreme values? Probably not. Thus, we will compute a credible interval. Long story short, it’s kind of similar to a frequentist confidence interval, but easier to interpret and easier to compute — and it makes more sense.

We will compute this credible interval based on the Highest Density Interval (HDI). It will give us the range containing the 89% most probable effect values. Note that we will use 89% CIs instead of 95% CIs (as in the frequentist framework), as the 89% level gives more stable results (Kruschke, 2014) and reminds us about the arbitrarity of such conventions (McElreath, 2018).

> # Highest Density Interval
> 
> 89% HDI     
> ------------
> [0.38, 0.44]

Nice, so we can conclude that the effect has 89% chance of falling within the [0.38, 0.44] range. We have just computed the two most important pieces of information for describing our effects.

Effect significance

However, in many scientific fields it not sufficient to simply describe the effects. Scientists also want to know if this effect has significance in practical or statistical terms, or in other words, whether the effect is important. For instnace, is the effect different from 0? So how do we assess the significance of an effect. How can we do this?

Well, in this particular case, it is very eloquent: all possible effect values (i.e., the whole posterior distribution) are positive and over 0.35, which is already substantial evidence the effect is not zero.

But still, we want some objective decision criterion, to say if yes or no the effect is ‘significant’. One approach, similar to the frequentist framework, would be to see if the Credible Interval contains 0. If it is not the case, that would mean that our effect is ‘significant’.

But this index is not very fine-grained, isn’t it? Can we do better? Yes.

A linear model with a categorical predictor

Imagine for a moment you are interested in how the weight of chickens varies depending on two different feed types. For this exampe, we will start by selecting from the chickwts dataset (available in base R) two feed types of interest for us (we do have peculiar interests): meat meals and sunflowers.

Data preparation and model fitting

Let’s run another Bayesian regression to predict the weight with the two types of feed type.

Posterior description

This represents the posterior distribution of the difference between meatmeal and sunflowers. Seems that the difference is rather positive (the values seems concentrated on the right side of 0)… Eating sunflowers makes you more fat (at least, if you’re a chicken). But, by how much? Let us compute the median and the CI:

> [1] 51
> # Highest Density Interval
> 
> 89% HDI      
> -------------
> [7.77, 87.66]

It makes you fat by around 51 grams (the median). However, the uncertainty is quite high: there is 89% chance that the difference between the two feed types is between 14 and 91.

Is this effect different from 0?

ROPE Percentage

Testing whether this distribution is different from 0 doesn’t make sense, as 0 is a single value (and the probability that any distribution is different from a single value is infinite).

However, one way to assess significance could be to define an area around 0, which will consider as practically equivalent to zero (i.e., absence of, or negligible, effect). This is called the Region of Practical Equivalence (ROPE), and is one way of testing the significance of parameters.

How can we define this region?

Driing driiiing

The easystats team speaking. How can we help?

I am Prof. Sanders. An expert in chicks… I mean chickens. Just calling to let you know that based on my expert knowledge, an effect between -20 and 20 is negligible. Bye.

Well, that’s convenient. Now we know that we can define the ROPE as the [-20, 20] range. All effects within this range are considered as null (negligible). We can now compute the proportion of the 89% most probable values (the 89% CI) which are not null, i.e., which are outside this range.

> # Proportion of samples inside the ROPE [-20.00, 20.00]:
> 
> inside ROPE
> -----------
> 7.75 %

5% of the 89% CI can be considered as null. Is that a lot? Based on our guidelines, yes, it is too much. Based on this particular definition of ROPE, we conclude that this effect is not significant (the probability of being negligible is too high).

Although, to be honest, I have some doubts about this Prof. Sanders. I don’t really trust his definition of ROPE. Is there a more objective way of defining it?

Prof. Sanders giving default values to define the Region of Practical Equivalence (ROPE).

Prof. Sanders giving default values to define the Region of Practical Equivalence (ROPE).

Yes. One of the practice is for instance to use the tenth (1/10 = 0.1) of the standard deviation (SD) of the response variable, which can be considered as a “negligible” effect size (Cohen, 1988).

> [1] -6.2  6.2

Let’s redefine our ROPE as the region within the [-6.2, 6.2] range. Note that this can be directly obtained by the rope_range function :)

> [1] -6.2  6.2

Let’s recompute the percentage in ROPE:

> # Proportion of samples inside the ROPE [-6.17, 6.17]:
> 
> inside ROPE
> -----------
> 0.00 %

With this reasonable definition of ROPE, we observe that the 89% of the posterior distribution of the effect does not overlap with the ROPE. Thus, we can conclude that the effect is significant (in the sense of important enough to be noted).

Probability of Direction (pd)

Maybe we are not interested in whether the effect is non-negligible. Maybe we just want to know if this effect is positive or negative. In this case, we can simply compute the proportion of the posterior that is positive, no matter the “size” of the effect.

> [1] 98

We can conclude that the effect is positive with a probability of 98%. We call this index the Probability of Direction (pd). It can, in fact, be computed more easily with the following:

> pd = 97.82%

Interestingly, it so happens that this index is usually highly correlated with the frequentist p-value. We could almost roughly infer the corresponding p-value with a simple transformation:

> [1] 0.044

If we ran our model in the frequentist framework, we should approximately observe an effect with a p-value of 0.04. Is that true?

Comparison to frequentist

> 
> Call:
> lm(formula = weight ~ feed, data = data)
> 
> Residuals:
>     Min      1Q  Median      3Q     Max 
> -123.91  -25.91   -6.92   32.09  103.09 
> 
> Coefficients:
>               Estimate Std. Error t value Pr(>|t|)    
> (Intercept)      276.9       17.2   16.10  2.7e-13 ***
> feedsunflower     52.0       23.8    2.18     0.04 *  
> ---
> Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
> 
> Residual standard error: 57 on 21 degrees of freedom
> Multiple R-squared:  0.185,   Adjusted R-squared:  0.146 
> F-statistic: 4.77 on 1 and 21 DF,  p-value: 0.0405

The frequentist model tells us that the difference is positive and significant (beta = 52, p = 0.04).

Although we arrived to a similar conclusion, the Bayesian framework allowed us to develop a more profound and intuitive understanding of our effect, and of the uncertainty of its estimation.

All with one function

And yet, I agree, it was a bit tedious to extract and compute all the indices. But what if I told you that we can do all of this, and more, with only one function?

Behold, describe_posterior!

This function computes all of the adored mentioned indices, and can be run directly on the model:

> # Description of Posterior Distributions
> 
> Parameter     |  Median | CI |  CI_low | CI_high |    pd | ROPE_CI | ROPE_low | ROPE_high | ROPE_Percentage |          BF |  Rhat |  ESS
> ----------------------------------------------------------------------------------------------------------------------------------------
> (Intercept)   | 277.269 | 89 | 250.192 | 307.423 | 1.000 |      89 |   -6.175 |     6.175 |               0 | 8.80065e+11 | 1.000 | 3437
> feedsunflower |  50.801 | 89 |   7.775 |  87.662 | 0.978 |      89 |   -6.175 |     6.175 |               0 |       1.375 | 1.001 | 3316

Tada! There we have it! The median, the CI, the pd and the ROPE percentage!

Understanding and describing posterior distributions is just one aspect of Bayesian modelling… Are you ready for more? Click here to see the next example.

References

Cohen, J. (1988). Statistical power analysis for the social sciences.

Kruschke, J. (2014). Doing bayesian data analysis: A tutorial with r, jags, and stan. Academic Press.

McElreath, R. (2018). Statistical rethinking: A bayesian course with examples in r and stan. Chapman; Hall/CRC.

bayestestR/inst/doc/guidelines.Rmd0000644000176200001440000002032513605504705016724 0ustar liggesusers--- title: "Reporting Guidelines" output: github_document: toc: true fig_width: 10.08 fig_height: 6 rmarkdown::html_vignette: toc: true fig_width: 10.08 fig_height: 6 tags: [r, bayesian, posterior, test] vignette: > %\VignetteIndexEntry{Reporting Guidelines} \usepackage[utf8]{inputenc} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console bibliography: bibliography.bib csl: apa.csl --- ```{r message=FALSE, warning=FALSE, include=FALSE} library(knitr) options(knitr.kable.NA = '') knitr::opts_chunk$set(comment=">") options(digits=2) ``` This vignette can be referred to by citing the package: - Makowski, D., Ben-Shachar, M. S., \& Lüdecke, D. (2019). *bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework*. Journal of Open Source Software, 4(40), 1541. https://doi.org/10.21105/joss.01541 - Makowski, D., Ben-Shachar, M. S., Chen, S. H. A., \& Lüdecke, D. (2019). *Indices of Effect Existence and Significance in the Bayesian Framework*. Frontiers in Psychology 2019;10:2767. [10.3389/fpsyg.2019.02767](https://doi.org/10.3389/fpsyg.2019.02767) --- # Reporting Guidelines ## How to describe and report the parameters of a model A Bayesian analysis returns a posterior distribution for each parameter (or *effect*). To minimally describe these distributions, we recommend reporting a point-estimate of [centrality](https://en.wikipedia.org/wiki/Central_tendency) as well as information characterizing the estimation uncertainty (the [dispersion](https://en.wikipedia.org/wiki/Statistical_dispersion)). Additionally, one can also report indices of effect existence and/or significance. Based on the previous [**comparison of point-estimates**](https://easystats.github.io/bayestestR/articles/indicesEstimationComparison.html) and [**indices of effect existence**](https://www.frontiersin.org/articles/10.3389/fpsyg.2019.02767/full), we can draw the following recommendations. #### **Centrality** We suggest reporting the [**median**](https://easystats.github.io/bayestestR/reference/point_estimate.html) as an index of centrality, as it is more robust compared to the [mean](https://easystats.github.io/bayestestR/reference/point_estimate.html) or the [MAP estimate](https://easystats.github.io/bayestestR/reference/map_estimate.html). However, in case of severly skewed posterior distributions, the MAP estimate could be a good alternative. #### **Uncertainty** The [**89\% Credible Interval (CI)**](https://easystats.github.io/bayestestR/articles/credible_interval.html) appears as a reasonable range to characterize the uncertainty related to the estimation, being more stable than higher thresholds (such as 90\% and 95\%). We also recommend computing the CI based on the [HDI](https://easystats.github.io/bayestestR/reference/hdi.html) rather than [quantiles](https://easystats.github.io/bayestestR/reference/ci.html), favouring probable, - over central - values. Note that a CI based on the quantile (equal-tailed interval) might be more appropriate in case of transformations (for instance when transforming log-odds to probabilities). Otherwise, intervals that originally do not cover the null might cover it after transformation (see [here](https://easystats.github.io/bayestestR/articles/credible_interval.html#different-types-of-cis)). #### **Existence** ```{r echo=FALSE, fig.cap="Reviewer 2 (circa a long time ago in a galaxy far away).", fig.align='center', out.width="60%"} knitr::include_graphics("https://github.com/easystats/easystats/raw/master/man/figures/bayestestR/watto.jpg") ``` The Bayesian framework can neatly delineate and quantify different aspects of hypothesis testing, such as effect *existence* and *significance*. The most straightforward index to describe effect existence is the [**Probability of Direction (pd)**](https://easystats.github.io/bayestestR/articles/probability_of_direction.html), representing the certainty associated with the most probable direction (positive or negative) of the effect. This index is easy to understand, simple to interpret, straightforward to compute, robust to model characteristics and independent from the scale of the data. Moreover, it is strongly correlated with the frequentist ***p*-value**, and can thus be used to draw parallels and give some reference to readers non-familiar with Bayesian statistics. A **two-sided *p*-value** of respectively `.1`, `.05`, `.01` and `.001` would correspond approximately to a ***pd*** of 95\%, 97.5\%, 99.5\% and 99.95\%. Thus, for convenience, we suggest the following reference values as an interpretation helpers: - *pd* **\<= 95\%** ~ *p* \> .1: uncertain - *pd* **\> 95\%** ~ *p* \< .1: possibly existing - *pd* **\> 97\%**: likely existing - *pd* **\> 99\%**: probably existing - *pd* **\> 99.9\%**: certainly existing #### **Significance** The percentage in **ROPE** is a index of **significance** (in its primary meaning), informing us whether a parameter is related - or not - to a non-negligible change (in terms of magnitude) in the outcome. We suggest reporting the **percentage of the full posterior distribution** (the *full* ROPE) instead of a given proportion of CI, in the ROPE, which appears as more sensitive (especially to delineate highly significant effects). Rather than using it as a binary, all-or-nothing decision criterion, such as suggested by the original [equivalence test](https://easystats.github.io/bayestestR/articles/region_of_practical_equivalence.html#equivalence-test), we recommend using the percentage as a continuous index of significance. However, based on [simulation data](https://www.frontiersin.org/articles/10.3389/fpsyg.2019.02767/full), we suggest the following reference values as an interpretation helpers: - **\> 99\%** in ROPE: negligible (we can accept the null hypothesis) - **\> 97.5\%** in ROPE: probably negligible - **\<= 97.5\%** \& **\>= 2.5\%** in ROPE: undecided significance - **\< 2.5\%** in ROPE: probably significant - **\< 1\%** in ROPE: significant (we can reject the null hypothesis) *Note that extra caution is required as its interpretation highly depends on other parameters such as sample size and ROPE range (see [here](https://easystats.github.io/bayestestR/articles/region_of_practical_equivalence.html#sensitivity-to-parameters-scale))*. #### **Template Sentence** Based on these suggestions, a template sentence for minimal reporting of a parameter based on its posterior distribution could be: - "the effect of *X* has a probability of ***pd*** of being *negative* (Median = ***median***, 89\% CI [ ***HDIlow*** , ***HDIhigh*** ] and can be considered as *significant* (***ROPE***\% in ROPE)." ## How to compare different models Altough it can also be used to assess effect existence and signficance, the **Bayes factor (BF)** is a versatile index that can be used to directly compare different models (or data generation processes). The [Bayes factor](https://easystats.github.io/bayestestR/articles/bayes_factors.html) is a ratio, informing us by how much more (or less) likely the observed data are under two compared models - usually a model with an effect vs. a model *without* the effect. Depending on the specifications of the null model (whether it is a point-estimate (e.g., **0**) or an interval), the Bayes factor could be used both in the context of effect existence and significance. In general, a Bayes factor greater than 1 giving evidence in favour of one of the models, and a Bayes factor smaller than 1 giving evidence in favour of the other model. Several rules of thumb exist to help the interpretation (see [here](https://easystats.github.io/report/articles/interpret_metrics.html#bayes-factor-bf)), with **\> 3** being one common treshold to categorize non-anecdotal evidence. #### **Template Sentence** When reporting Bayes factors (BF), one can use the following sentence: - "There is *moderate evidence* in favour of an *absence* of effect of *x* (BF = *BF*)." *Note: If you have any advice, opinion or such, we encourage you to let us know by opening an [discussion thread](https://github.com/easystats/bayestestR/issues) or making a pull request.* bayestestR/inst/doc/indicesExistenceComparison.html0000644000176200001440000003153213620150455022335 0ustar liggesusers

In-Depth 2: Comparison of Indices of Effect Existence and Significance

This vignette can be referred to by citing the following:

  • Makowski, D., Ben-Shachar, M. S., & Lüdecke, D. (2019). bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework. Journal of Open Source Software, 4(40), 1541. https://doi.org/10.21105/joss.01541
  • Makowski, D., Ben-Shachar, M. S., Chen, S. H. A., & Lüdecke, D. (2019). Indices of Effect Existence and Significance in the Bayesian Framework. Frontiers in Psychology 2019;10:2767. 10.3389/fpsyg.2019.02767

Indices of Effect Existence and Significance in the Bayesian Framework

A comparison of different Bayesian indices (pd, BFs, ROPE etc.) is accessible here.

bayestestR/inst/doc/probability_of_direction.R0000644000176200001440000001006713620150632021312 0ustar liggesusers## ----message=FALSE, warning=FALSE, include=FALSE------------------------------ if (!requireNamespace("see", quietly = TRUE) || !requireNamespace("dplyr", quietly = TRUE) || !requireNamespace("ggplot2", quietly = TRUE) || !requireNamespace("tidyr", quietly = TRUE) || !requireNamespace("logspline", quietly = TRUE) || !requireNamespace("KernSmooth", quietly = TRUE) || !requireNamespace("tidyr", quietly = TRUE) || !requireNamespace("GGally", quietly = TRUE)) { knitr::opts_chunk$set(eval = FALSE) } library(knitr) options(knitr.kable.NA = '') knitr::opts_chunk$set(comment=">") options(digits=2) set.seed(333) ## ----message=FALSE, warning=FALSE, echo=FALSE, fig.cap="Correlation between the frequentist p-value and the probability of direction (pd)", fig.align='center'---- library(dplyr) library(tidyr) library(ggplot2) library(see) read.csv("https://raw.github.com/easystats/easystats/master/publications/makowski_2019_bayesian/data/data.csv") %>% mutate(effect_existence = ifelse(true_effect == 1, "Presence of true effect", "Absence of true effect"), p_direction = p_direction * 100) %>% ggplot(aes(x=p_direction, y=p_value, color=effect_existence)) + geom_point2(alpha=0.1) + geom_segment(aes(x=95, y=Inf, xend=95, yend=0.1), color="black", linetype="longdash") + geom_segment(aes(x=-Inf, y=0.1, xend=95, yend=0.1), color="black", linetype="longdash") + geom_segment(aes(x=97.5, y=Inf, xend=97.5, yend=0.05), color="black", linetype="dashed") + geom_segment(aes(x=-Inf, y=0.05, xend=97.5, yend=0.05), color="black", linetype="dashed") + theme_modern() + scale_y_reverse(breaks = c(0.05, round(seq(0, 1, length.out = 11), digits=2))) + scale_x_continuous(breaks = c(95, 97.5, round(seq(50, 100, length.out = 6)))) + scale_color_manual(values=c("Presence of true effect"="green", "Absence of true effect"="red")) + theme(legend.title = element_blank()) + guides(colour = guide_legend(override.aes = list(alpha = 1))) + xlab("Probability of Direction (pd)") + ylab("Frequentist p-value") ## ----message=FALSE, warning=FALSE, fig.align='center'------------------------- library(bayestestR) library(logspline) library(KernSmooth) # Compute the correlations data <- data.frame() for(the_mean in runif(25, 0, 4)){ for(the_sd in runif(25, 0.5, 4)){ x <- rnorm(100, the_mean, abs(the_sd)) data <- rbind(data, data.frame("direct" = pd(x), "kernel" = pd(x, method="kernel"), "logspline" = pd(x, method="logspline"), "KernSmooth" = pd(x, method="KernSmooth") )) } } data <- as.data.frame(sapply(data, as.numeric)) # Visualize the correlations library(ggplot2) library(GGally) GGally::ggpairs(data) + theme_classic() ## ----message=FALSE, warning=FALSE--------------------------------------------- data <- data.frame() for(i in 1:25){ the_mean <- runif(1, 0, 4) the_sd <- abs(runif(1, 0.5, 4)) parent_distribution <- rnorm(100000, the_mean, the_sd) true_pd <- pd(parent_distribution) for(j in 1:25){ sample_size <- round(runif(1, 25, 5000)) subsample <- sample(parent_distribution, sample_size) data <- rbind(data, data.frame("sample_size" = sample_size, "true" = true_pd, "direct" = pd(subsample) - true_pd, "kernel" = pd(subsample, method="kernel")- true_pd, "logspline" = pd(subsample, method="logspline") - true_pd, "KernSmooth" = pd(subsample, method="KernSmooth") - true_pd )) } } data <- as.data.frame(sapply(data, as.numeric)) ## ----message=FALSE, warning=FALSE, fig.align='center'------------------------- library(tidyr) library(dplyr) data %>% tidyr::gather(Method, Distance, -sample_size, -true) %>% ggplot(aes(x=sample_size, y = Distance, color = Method, fill= Method)) + geom_point(alpha=0.3, stroke=0, shape=16) + geom_smooth(alpha=0.2) + geom_hline(yintercept=0) + theme_classic() + xlab("\nDistribution Size") bayestestR/inst/doc/indicesEstimationComparison.Rmd0000644000176200001440000002717513620150172022304 0ustar liggesusers--- title: "In-Depth 1: Comparison of Point-Estimates" output: github_document: toc: true fig_width: 10.08 fig_height: 6 rmarkdown::html_vignette: toc: true fig_width: 10.08 fig_height: 6 tags: [r, bayesian, posterior, test] vignette: > \usepackage[utf8]{inputenc} %\VignetteIndexEntry{In-Depth 1: Comparison of Point-Estimates} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console bibliography: bibliography.bib csl: apa.csl --- ```{r message=FALSE, warning=FALSE, include=FALSE} if (!requireNamespace("see", quietly = TRUE) || !requireNamespace("dplyr", quietly = TRUE) || !requireNamespace("ggplot2", quietly = TRUE) || !requireNamespace("tidyr", quietly = TRUE)) { knitr::opts_chunk$set(eval = FALSE) } library(knitr) options(knitr.kable.NA = '') knitr::opts_chunk$set(comment=">") options(digits=2) ``` This vignette can be referred to by citing the package: - Makowski, D., Ben-Shachar, M. S., \& Lüdecke, D. (2019). *bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework*. Journal of Open Source Software, 4(40), 1541. https://doi.org/10.21105/joss.01541 --- # Effect Point-Estimates in the Bayesian Framework ## Introduction One of the main difference between the Bayesian and the frequentist frameworks is that the former returns a probability distribution of each effect (*i.e.*, parameter of interest of a model, such as a regression slope) instead of a single value. However, there is still a need and demand, for reporting or use in further analysis, for a single value (**point-estimate**) that best characterise the underlying posterior distribution. There are three main indices used in the literature for effect estimation: the **mean**, the **median** or the **MAP** (Maximum A Posteriori) estimate (roughly corresponding to the mode (the "peak") of the distribution). Unfortunately, there is no consensus about which one to use, as no systematic comparison has ever been done. In the present work, we will compare these three point-estimates of effect between themselves, as well as with the widely known **beta**, extracted from a comparable frequentist model. With this comparison, we expect to draw bridges and relationships between the two frameworks, helping and easing the transition for the public. ## Experiment 1: Relationship with Error (Noise) and Sample Size ### Methods The simulation aimed at modulating the following characteristics: - **Model type**: linear or logistic. - **"True" effect** (original regression coefficient from which data is drawn): Can be 1 or 0 (no effect). - **Sample size**: From 20 to 100 by steps of 10. - **Error**: Gaussian noise applied to the predictor with SD uniformly spread between 0.33 and 6.66 (with 1000 different values). We generated a dataset for each combination of these characteristics, resulting in a total of `2 * 2 * 9 * 1000 = 36000` Bayesian and frequentist models. The code used for generation is avaible [here](https://easystats.github.io/circus/articles/bayesian_indices.html) (please note that it takes usually several days/weeks to complete). ```{r message=FALSE, warning=FALSE} library(ggplot2) library(dplyr) library(tidyr) library(see) df <- read.csv("https://raw.github.com/easystats/circus/master/data/bayesSim_study1.csv") ``` ### Results #### Sensitivity to Noise ```{r, message=FALSE, warning=FALSE} df %>% select(error, true_effect, outcome_type, Coefficient, Median, Mean, MAP) %>% gather(estimate, value, -error, -true_effect, -outcome_type) %>% mutate(temp = as.factor(cut(error, 10, labels = FALSE))) %>% group_by(temp) %>% mutate(error_group = round(mean(error), 1)) %>% ungroup() %>% filter(value < 6) %>% ggplot(aes(x = error_group, y = value, fill = estimate, group = interaction(estimate, error_group))) + # geom_hline(yintercept = 0) + # geom_point(alpha=0.05, size=2, stroke = 0, shape=16) + # geom_smooth(method="loess") + geom_boxplot(outlier.shape=NA) + theme_modern() + scale_fill_manual(values = c("Coefficient" = "#607D8B", "MAP" = "#795548", "Mean" = "#FF9800", "Median" = "#FFEB3B"), name = "Index") + ylab("Point-estimate") + xlab("Noise") + facet_wrap(~ outcome_type * true_effect, scales="free") ``` #### Sensitivity to Sample Size ```{r, message=FALSE, warning=FALSE} df %>% select(sample_size, true_effect, outcome_type, Coefficient, Median, Mean, MAP) %>% gather(estimate, value, -sample_size, -true_effect, -outcome_type) %>% mutate(temp = as.factor(cut(sample_size, 10, labels = FALSE))) %>% group_by(temp) %>% mutate(size_group = round(mean(sample_size))) %>% ungroup() %>% filter(value < 6) %>% ggplot(aes(x = size_group, y = value, fill = estimate, group = interaction(estimate, size_group))) + # geom_hline(yintercept = 0) + # geom_point(alpha=0.05, size=2, stroke = 0, shape=16) + # geom_smooth(method="loess") + geom_boxplot(outlier.shape=NA) + theme_modern() + scale_fill_manual(values = c("Coefficient" = "#607D8B", "MAP" = "#795548", "Mean" = "#FF9800", "Median" = "#FFEB3B"), name = "Index") + ylab("Point-estimate") + xlab("Sample size") + facet_wrap(~ outcome_type * true_effect, scales="free") ``` #### Statistical Modelling We fitted a (frequentist) multiple linear regression to statistically test the the predict the presence or absence of effect with the estimates as well as their interaction with noise and sample size. ```{r, message=FALSE, warning=FALSE} df %>% select(sample_size, error, true_effect, outcome_type, Coefficient, Median, Mean, MAP) %>% gather(estimate, value, -sample_size, -error, -true_effect, -outcome_type) %>% glm(true_effect ~ outcome_type / estimate / value, data=., family="binomial") %>% broom::tidy() %>% select(term, estimate, p=p.value) %>% filter(stringr::str_detect(term, 'outcome_type'), stringr::str_detect(term, ':value')) %>% arrange(desc(estimate)) %>% knitr::kable(digits=2) ``` This suggests that, in order to delineate between the presence and the absence of an effect, compared to the frequentist's beta: - For linear models, the **Mean** was the better predictor, closely followed by the **Median**, the **MAP** and the frequentist **Coefficient**. - For logistic models, the **MAP** was the better predictor, followed by the **Median**, the **Mean** and, behind, the frequentist **Coefficient**. Overall, the **median** seems to be appears as a safe and approriate choice, maintaining a a high performance accross different types of models. ## Experiment 2: Relationship with Sampling Characteristics ### Methods The simulation aimed at modulating the following characteristics: - **Model type**: linear or logistic. - **"True" effect** (original regression coefficient from which data is drawn): Can be 1 or 0 (no effect). - **draws**: from 10 to 5000 by step of 5 (1000 iterations). - **warmup**: Ratio of warmup iterations. from 1/10 to 9/10 by step of 0.1 (9 iterations). We generated 3 datasets for each combination of these characteristics, resulting in a total of `2 * 2 * 8 * 40 * 9 * 3 = 34560` Bayesian and frequentist models. The code used for generation is avaible [here](https://easystats.github.io/circus/articles/bayesian_indices.html) (please note that it takes usually several days/weeks to complete). ```{r message=FALSE, warning=FALSE} df <- read.csv("https://raw.github.com/easystats/circus/master/data/bayesSim_study2.csv") ``` ### Results #### Sensitivity to number of iterations ```{r, message=FALSE, warning=FALSE} df %>% select(iterations, true_effect, outcome_type, beta, Median, Mean, MAP) %>% gather(estimate, value, -iterations, -true_effect, -outcome_type) %>% mutate(temp = as.factor(cut(iterations, 5, labels = FALSE))) %>% group_by(temp) %>% mutate(iterations_group = round(mean(iterations), 1)) %>% ungroup() %>% filter(value < 6) %>% ggplot(aes(x = iterations_group, y = value, fill = estimate, group = interaction(estimate, iterations_group))) + geom_boxplot(outlier.shape=NA) + theme_classic() + scale_fill_manual(values = c("beta" = "#607D8B", "MAP" = "#795548", "Mean" = "#FF9800", "Median" = "#FFEB3B"), name = "Index") + ylab("Point-estimate of the true value 0\n") + xlab("\nNumber of Iterations") + facet_wrap(~ outcome_type * true_effect, scales="free") ``` #### Sensitivity to warmup ratio ```{r, message=FALSE, warning=FALSE} df %>% mutate(warmup = warmup / iterations) %>% select(warmup, true_effect, outcome_type, beta, Median, Mean, MAP) %>% gather(estimate, value, -warmup, -true_effect, -outcome_type) %>% mutate(temp = as.factor(cut(warmup, 3, labels = FALSE))) %>% group_by(temp) %>% mutate(warmup_group = round(mean(warmup), 1)) %>% ungroup() %>% filter(value < 6) %>% ggplot(aes(x = warmup_group, y = value, fill = estimate, group = interaction(estimate, warmup_group))) + geom_boxplot(outlier.shape=NA) + theme_classic() + scale_fill_manual(values = c("beta" = "#607D8B", "MAP" = "#795548", "Mean" = "#FF9800", "Median" = "#FFEB3B"), name = "Index") + ylab("Point-estimate of the true value 0\n") + xlab("\nNumber of Iterations") + facet_wrap(~ outcome_type * true_effect, scales="free") ``` ## Experiment 3: Relationship with Priors Specification ## Discussion Conclusions can be found in the [guidelines section](https://easystats.github.io/bayestestR/articles/guidelines.html). bayestestR/inst/doc/credible_interval.Rmd0000644000176200001440000002066313620150172020246 0ustar liggesusers--- title: "Credible Intervals (CI)" output: github_document: toc: true fig_width: 10.08 fig_height: 6 rmarkdown::html_vignette: toc: true fig_width: 10.08 fig_height: 6 tags: [r, bayesian, posterior, test, ci, credible interval] vignette: > \usepackage[utf8]{inputenc} %\VignetteIndexEntry{Credible Intervals (CI)} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console bibliography: bibliography.bib csl: apa.csl --- This vignette can be referred to by citing the package: - Makowski, D., Ben-Shachar, M. S., \& Lüdecke, D. (2019). *bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework*. Journal of Open Source Software, 4(40), 1541. https://doi.org/10.21105/joss.01541 --- ```{r message=FALSE, warning=FALSE, include=FALSE} library(knitr) options(knitr.kable.NA = '') knitr::opts_chunk$set(comment=">") options(digits=2) if (!requireNamespace("ggplot2", quietly = TRUE) || !requireNamespace("dplyr", quietly = TRUE)) { knitr::opts_chunk$set(eval = FALSE) } set.seed(333) ``` # What is a *Credible* Interval? Credible intervals are an important concept in Bayesian statistics. Its core purpose is to describe and summarise **the uncertainty** related to your parameters. In this regards, it could appear as quite similar to the frequentist **Confidence Intervals**. However, while their goal is similar, **their statistical definition annd meaning is very different**. Indeed, while the latter is obtained through a complex algorithm full of rarely-tested assumptions and approximations, the credible intervals are fairly straightforward to compute. As the Bayesian inference returns a distribution of possible effect values (the posterior), the credible interval is just the range containing a particular percentage of probable values. For instance, the 95\% credible interval is simply the central portion of the posterior distribution that contains 95\% of the values. Note that this drastically improve the interpretability of the Bayesian interval compared to the frequentist one. Indeed, the Bayesian framework allows us to say *"given the observed data, the effect has 95% probability of falling within this range"*, while the frequentist less straightforward alternative (the 95\% ***Confidence* Interval**) would be "*there is a 95\% probability that when computing a confidence interval from data of this sort, the effect falls within this range*". # Why is the default 89\%? Naturally, when it came about choosing the CI level to report by default, **people started using 95\%**, the arbitrary convention used in the **frequentist** world. However, some authors suggested that 95\% might not be the most apppropriate for Bayesian posterior distributions, potentially lacking stability if not enough posterior samples are drawn [@kruschke2014doing]. The proposition was to use 90\% instead of 95\%. However, recently, McElreath (2014, 2018) suggested that if we were to use arbitrary tresholds in the first place, why not use 89\% as this value has the additional argument of being a prime number. Thus, by default, the CIs are computed with 89\% intervals (`ci = 0.89`), deemed to be more stable than, for instance, 95\% intervals [@kruschke2014doing]. An effective sample size (ESS; see [here](https://easystats.github.io/bayestestR/reference/diagnostic_posterior.html)) of at least 10.000 is recommended if 95\% intervals should be computed (Kruschke, 2014, p. 183ff). Moreover, 89 is the highest **prime number** that does not exceed the already unstable 95\% threshold. What does it have to do with anything? *Nothing*, but it reminds us of the total arbitrarity of any of these conventions [@mcelreath2018statistical]. # Different types of CIs The reader might notice that `bayestestR` provides **two methods** to compute credible intervals, the **Highest Density Interval (HDI)** (`hdi()`) and the **Equal-tailed Interval (ETI)** (`eti()`). These methods can also be changed via the `method` argument of the `ci()` function. What is the difference? Let's see: ```{r warning=FALSE, message=FALSE} library(bayestestR) library(dplyr) library(ggplot2) # Generate a normal distribution posterior <- distribution_normal(1000) # Compute HDI and ETI ci_hdi <- ci(posterior, method = "HDI") ci_eti <- ci(posterior, method = "ETI") # Plot the distribution and add the limits of the two CIs posterior %>% estimate_density(extend=TRUE) %>% ggplot(aes(x=x, y=y)) + geom_area(fill="orange") + theme_classic() + # HDI in blue geom_vline(xintercept=ci_hdi$CI_low, color="royalblue", size=3) + geom_vline(xintercept=ci_hdi$CI_high, color="royalblue", size=3) + # Quantile in red geom_vline(xintercept=ci_eti$CI_low, color="red", size=1) + geom_vline(xintercept=ci_eti$CI_high, color="red", size=1) ``` > **These are exactly the same...** But is it also the case for other types of distributions? ```{r warning=FALSE, message=FALSE} library(bayestestR) library(dplyr) library(ggplot2) # Generate a beta distribution posterior <- distribution_beta(1000, 6, 2) # Compute HDI and Quantile CI ci_hdi <- ci(posterior, method = "HDI") ci_eti <- ci(posterior, method = "ETI") # Plot the distribution and add the limits of the two CIs posterior %>% estimate_density(extend=TRUE) %>% ggplot(aes(x=x, y=y)) + geom_area(fill="orange") + theme_classic() + # HDI in blue geom_vline(xintercept=ci_hdi$CI_low, color="royalblue", size=3) + geom_vline(xintercept=ci_hdi$CI_high, color="royalblue", size=3) + # Quantile in red geom_vline(xintercept=ci_eti$CI_low, color="red", size=1) + geom_vline(xintercept=ci_eti$CI_high, color="red", size=1) ``` > **The difference is strong with this one.** Contrary to the **HDI**, for which all points within the interval have a higher probability density than points outside the interval, the **ETI** is **equal-tailed**. This means that a 90\% interval has 5\% of the distribution on either side of its limits. It indicates the 5th percentile and the 95th percentile. In symmetric distributions, the two methods of computing credible intervals, the ETI and the HDI, return similar results. This is not the case for skewed distributions. Indeed, it is possible that parameter values in the ETI have lower credibility (are less probable) than parameter values outside the ETI. This property seems undesirable as a summary of the credible values in a distribution. On the other hand, the ETI range does change when transformations are applied to the distribution (for instance, for a log odds scale to probabilities): the lower and higher bounds of the transformed distribution will correspond to the transformed lower and higher bounds of the original distribution. On the contrary, applying transformations to the distribution will change the resulting HDI. # The Support Interval Unlike the HDI and the ETI, which look at the posterior distribution, the **Support Interval (SI)** provides information regarding the change in the credability of values from the prior to the posterior - in other words, it indicates which values of a parameter are have gained support by the observed data by some factor greater or equal to *k* [@wagenmakers2018SI]. ```{r warning=FALSE, message=FALSE} prior <- distribution_normal(1000, mean = 0, sd = 1) posterior <- distribution_normal(1000, mean = .5, sd = .3) si_1 <- si(posterior, prior, BF = 1) si_3 <- si(posterior, prior, BF = 3) ggplot(mapping = aes(x=x, y=y)) + theme_classic() + # The posterior geom_area(fill = "orange", data = estimate_density(posterior, extend = TRUE)) + # The prior geom_area(color = "black", fill = NA, size = 1, linetype = "dashed", data = estimate_density(prior, extend = TRUE)) + # BF = 1 SI in blue geom_vline(xintercept=si_1$CI_low, color="royalblue", size=1) + geom_vline(xintercept=si_1$CI_high, color="royalblue", size=1) + # BF = 3 SI in red geom_vline(xintercept=si_3$CI_low, color="red", size=1) + geom_vline(xintercept=si_3$CI_high, color="red", size=1) ``` Between the blue lines are values the recived *some* support by the data (this is a BF = 1 SI), which received at least "moderate" support from the data. From the presepctive of the Savage-Dickey Bayes factor, testing against a point null hypothesis for any value within the support interval will yeild a Bayes factor smaller than 1/`BF`. # References bayestestR/inst/doc/credible_interval.html0000644000176200001440000015615713620150402020474 0ustar liggesusers

Credible Intervals (CI)

This vignette can be referred to by citing the package:

  • Makowski, D., Ben-Shachar, M. S., & Lüdecke, D. (2019). bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework. Journal of Open Source Software, 4(40), 1541. https://doi.org/10.21105/joss.01541

What is a Credible Interval?

Credible intervals are an important concept in Bayesian statistics. Its core purpose is to describe and summarise the uncertainty related to your parameters. In this regards, it could appear as quite similar to the frequentist Confidence Intervals. However, while their goal is similar, their statistical definition annd meaning is very different. Indeed, while the latter is obtained through a complex algorithm full of rarely-tested assumptions and approximations, the credible intervals are fairly straightforward to compute.

As the Bayesian inference returns a distribution of possible effect values (the posterior), the credible interval is just the range containing a particular percentage of probable values. For instance, the 95% credible interval is simply the central portion of the posterior distribution that contains 95% of the values.

Note that this drastically improve the interpretability of the Bayesian interval compared to the frequentist one. Indeed, the Bayesian framework allows us to say “given the observed data, the effect has 95% probability of falling within this range”, while the frequentist less straightforward alternative (the 95% Confidence Interval) would be “there is a 95% probability that when computing a confidence interval from data of this sort, the effect falls within this range”.

Why is the default 89%?

Naturally, when it came about choosing the CI level to report by default, people started using 95%, the arbitrary convention used in the frequentist world. However, some authors suggested that 95% might not be the most apppropriate for Bayesian posterior distributions, potentially lacking stability if not enough posterior samples are drawn (Kruschke, 2014).

The proposition was to use 90% instead of 95%. However, recently, McElreath (2014, 2018) suggested that if we were to use arbitrary tresholds in the first place, why not use 89% as this value has the additional argument of being a prime number.

Thus, by default, the CIs are computed with 89% intervals (ci = 0.89), deemed to be more stable than, for instance, 95% intervals (Kruschke, 2014). An effective sample size (ESS; see here) of at least 10.000 is recommended if 95% intervals should be computed (Kruschke, 2014, p. 183ff). Moreover, 89 is the highest prime number that does not exceed the already unstable 95% threshold. What does it have to do with anything? Nothing, but it reminds us of the total arbitrarity of any of these conventions (McElreath, 2018).

Different types of CIs

The reader might notice that bayestestR provides two methods to compute credible intervals, the Highest Density Interval (HDI) (hdi()) and the Equal-tailed Interval (ETI) (eti()). These methods can also be changed via the method argument of the ci() function. What is the difference? Let’s see:

These are exactly the same…

But is it also the case for other types of distributions?

The difference is strong with this one.

Contrary to the HDI, for which all points within the interval have a higher probability density than points outside the interval, the ETI is equal-tailed. This means that a 90% interval has 5% of the distribution on either side of its limits. It indicates the 5th percentile and the 95th percentile. In symmetric distributions, the two methods of computing credible intervals, the ETI and the HDI, return similar results.

This is not the case for skewed distributions. Indeed, it is possible that parameter values in the ETI have lower credibility (are less probable) than parameter values outside the ETI. This property seems undesirable as a summary of the credible values in a distribution.

On the other hand, the ETI range does change when transformations are applied to the distribution (for instance, for a log odds scale to probabilities): the lower and higher bounds of the transformed distribution will correspond to the transformed lower and higher bounds of the original distribution. On the contrary, applying transformations to the distribution will change the resulting HDI.

The Support Interval

Unlike the HDI and the ETI, which look at the posterior distribution, the Support Interval (SI) provides information regarding the change in the credability of values from the prior to the posterior - in other words, it indicates which values of a parameter are have gained support by the observed data by some factor greater or equal to k (Wagenmakers, Gronau, Dablander, & Etz, 2018).

Between the blue lines are values the recived some support by the data (this is a BF = 1 SI), which received at least “moderate” support from the data.

From the presepctive of the Savage-Dickey Bayes factor, testing against a point null hypothesis for any value within the support interval will yeild a Bayes factor smaller than 1/BF.

References

Kruschke, J. (2014). Doing bayesian data analysis: A tutorial with r, jags, and stan. Academic Press.

McElreath, R. (2018). Statistical rethinking: A bayesian course with examples in r and stan. Chapman; Hall/CRC.

Wagenmakers, E.-J., Gronau, Q. F., Dablander, F., & Etz, A. (2018). The support interval. https://doi.org/10.31234/osf.io/zwnxb

bayestestR/inst/doc/guidelines.R0000644000176200001440000000065213620150416016375 0ustar liggesusers## ----message=FALSE, warning=FALSE, include=FALSE------------------------------ library(knitr) options(knitr.kable.NA = '') knitr::opts_chunk$set(comment=">") options(digits=2) ## ----echo=FALSE, fig.cap="Reviewer 2 (circa a long time ago in a galaxy far away).", fig.align='center', out.width="60%"---- knitr::include_graphics("https://github.com/easystats/easystats/raw/master/man/figures/bayestestR/watto.jpg") bayestestR/inst/doc/bayestestR.html0000644000176200001440000045334613620150377017157 0ustar liggesusers

Get Started with Bayesian Analysis

This vignette can be referred to by citing the package:

  • Makowski, D., Ben-Shachar, M. S., & Lüdecke, D. (2019). bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework. Journal of Open Source Software, 4(40), 1541. https://doi.org/10.21105/joss.01541

Why use the Bayesian Framework?

The Bayesian framework for statistics is quickly gaining in popularity among scientists, associated with the general shift towards open and honest science. Reasons to prefer this approach are reliability, accuracy (in noisy data and small samples), the possibility of introducing prior knowledge into the analysis and, critically, results intuitiveness and their straightforward interpretation (Andrews & Baguley, 2013; Etz & Vandekerckhove, 2016; Kruschke, 2010; Kruschke, Aguinis, & Joo, 2012; Wagenmakers et al., 2018).

In general, the frequentist approach has been associated with the focus on null hypothesis testing, and the misuse of p-values has been shown to critically contribute to the reproducibility crisis of psychological science (Chambers, Feredoes, Muthukumaraswamy, & Etchells, 2014; Szucs & Ioannidis, 2016). There is a general agreement that the generalization of the Bayesian approach is one way of overcoming these issues (Benjamin et al., 2018; Etz & Vandekerckhove, 2016).

Once we agreed that the Bayesian framework is the right way to go, you might wonder what is the Bayesian framework.

What’s all the fuss about?

What is the Bayesian Framework?

Adopting the Bayesian framework is more of a shift in the paradigm than a change in the methodology. Indeed, all the common statistical procedures (t-tests, correlations, ANOVAs, regressions, …) can be achieved using the Bayesian framework. One of the core difference is that in the frequentist view (the “classic” statistics, with p and t values, as well as some weird degrees of freedom), the effects are fixed (but unknown) and data are random. On the other hand, in the Bayesian inference process, instead of having estimates of the “true effect”, the probability of different effects given the observed data is computed, resulting in a distribution of possible values for the parameters, called the posterior distribution.

The uncertainty in Bayesian inference can be summarized, for instance, by the median of the distribution, as well as a range of values of the posterior distribution that includes the 95% most probable values (the 95% credible interval). Cum grano salis, these are considered the counterparts to the point-estimate and confidence interval in a frequentist framework. To illustrate the difference of interpretation, the Bayesian framework allows to say “given the observed data, the effect has 95% probability of falling within this range”, while the frequentist less straightforward alternative would be “when repeatedly computing confidence intervals from data of this sort, there is a 95% probability that the effect falls within a given range”. In essence, the Bayesian sampling algorithms (such as MCMC sampling) return a probability distribution (the posterior) of an effect that is compatible with the observed data. Thus, an effect can be described by characterizing its posterior distribution in relation to its centrality (point-estimates), uncertainty, as well as existence and significance

In other words, omitting the maths behind it, we can say that:

  • The frequentist bloke tries to estimate “the real effect”. For instance, the “real” value of the correlation between x and y. Hence, frequentist models return a “point-estimate” (i.e., a single value) of the “real” correlation (e.g., r = 0.42) estimated under a number of obscure assumptions (at a minimum, considering that the data is sampled at random from a “parent”, usually normal distribution).
  • The Bayesian master assumes no such thing. The data are what they are. Based on this observed data (and a prior belief about the result), the Bayesian sampling algorithm (sometimes referred to for example as MCMC sampling) returns a probability distribution (called the posterior) of the effect that is compatible with the observed data. For the correlation between x and y, it will return a distribution that says, for example, “the most probable effect is 0.42, but this data is also compatible with correlations of 0.12 and 0.74”.
  • To characterize our effects, no need of p values or other cryptic indices. We simply describe the posterior distribution of the effect. For example, we can report the median, the 89% Credible Interval or other indices.
Accurate depiction of a regular Bayesian user estimating a credible interval.

Accurate depiction of a regular Bayesian user estimating a credible interval.

Note: Altough the very purpose of this package is to advocate for the use of Bayesian statistics, please note that there are serious arguments supporting frequentist indices (see for instance this thread). As always, the world is not black and white (p < .001).

So… how does it work?

A simple example

BayestestR Installation

You can install bayestestR along with the whole easystats suite by running the following:

Let’s also install and load the rstanarm, that allows fitting Bayesian models, as well as bayestestR, to describe them.

Traditional linear regression

Let’s start by fitting a simple frequentist linear regression (the lm() function stands for linear model) between two numeric variables, Sepal.Length and Petal.Length from the famous iris dataset, included by default in R.


Call:
lm(formula = Sepal.Length ~ Petal.Length, data = iris)

Residuals:
    Min      1Q  Median      3Q     Max 
-1.2468 -0.2966 -0.0152  0.2768  1.0027 

Coefficients:
             Estimate Std. Error t value Pr(>|t|)    
(Intercept)    4.3066     0.0784    54.9   <2e-16 ***
Petal.Length   0.4089     0.0189    21.6   <2e-16 ***
---
Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

Residual standard error: 0.41 on 148 degrees of freedom
Multiple R-squared:  0.76,  Adjusted R-squared:  0.758 
F-statistic:  469 on 1 and 148 DF,  p-value: <2e-16

This analysis suggests that there is a significant (whatever that means) and positive (with a coefficient of 0.41) linear relationship between the two variables.

Fitting and interpreting frequentist models is so easy that it is obvious that people use it instead of the Bayesian framework… right?

Not anymore.

Bayesian linear regression

Parameter Median CI CI_low CI_high pd ROPE_CI ROPE_low ROPE_high ROPE_Percentage Rhat ESS
(Intercept) 4.31 89 4.18 4.43 1 89 -0.08 0.08 0 1 4056
Petal.Length 0.41 89 0.38 0.44 1 89 -0.08 0.08 0 1 4311

That’s it! You fitted a Bayesian version of the model by simply using stan_glm() instead of lm() and described the posterior distributions of the parameters. The conclusion that we can drawn, for this example, are very similar. The effect (the median of the effect’s posterior distribution) is about 0.41, and it can be also be considered as significant in the Bayesian sense (more on that later).

So, ready to learn more? Check out the next tutorial!

References

Andrews, M., & Baguley, T. (2013). Prior approval: The growth of bayesian methods in psychology. British Journal of Mathematical and Statistical Psychology, 66(1), 1–7.

Benjamin, D. J., Berger, J. O., Johannesson, M., Nosek, B. A., Wagenmakers, E.-J., Berk, R., … others. (2018). Redefine statistical significance. Nature Human Behaviour, 2(1), 6.

Chambers, C. D., Feredoes, E., Muthukumaraswamy, S. D., & Etchells, P. (2014). Instead of ’playing the game’ it is time to change the rules: Registered reports at aims neuroscience and beyond. AIMS Neuroscience, 1(1), 4–17.

Etz, A., & Vandekerckhove, J. (2016). A bayesian perspective on the reproducibility project: Psychology. PloS One, 11(2), e0149794.

Kruschke, J. K. (2010). What to believe: Bayesian methods for data analysis. Trends in Cognitive Sciences, 14(7), 293–300.

Kruschke, J. K., Aguinis, H., & Joo, H. (2012). The time has come: Bayesian methods for data analysis in the organizational sciences. Organizational Research Methods, 15(4), 722–752.

Szucs, D., & Ioannidis, J. P. (2016). Empirical assessment of published effect sizes and power in the recent cognitive neuroscience and psychology literature. BioRxiv, 071530.

Wagenmakers, E.-J., Marsman, M., Jamil, T., Ly, A., Verhagen, J., Love, J., … others. (2018). Bayesian inference for psychology. Part i: Theoretical advantages and practical ramifications. Psychonomic Bulletin & Review, 25(1), 35–57.

bayestestR/inst/doc/example2.Rmd0000644000176200001440000002654413620150172016312 0ustar liggesusers--- title: "2. Confirmation of Bayesian skills" output: github_document: toc: true fig_width: 10.08 fig_height: 6 rmarkdown::html_vignette: toc: true fig_width: 10.08 fig_height: 6 tags: [r, bayesian, posterior, test] vignette: > \usepackage[utf8]{inputenc} %\VignetteIndexEntry{Example 2: Confirmation of Bayesian skills} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console bibliography: bibliography.bib csl: apa.csl --- This vignette can be referred to by citing the package: - Makowski, D., Ben-Shachar, M. S., \& Lüdecke, D. (2019). *bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework*. Journal of Open Source Software, 4(40), 1541. https://doi.org/10.21105/joss.01541 --- ```{r message=FALSE, warning=FALSE, include=FALSE} if (!requireNamespace("see", quietly = TRUE) || !requireNamespace("dplyr", quietly = TRUE) || !requireNamespace("ggplot2", quietly = TRUE) || !requireNamespace("performance", quietly = TRUE) || !requireNamespace("BayesFactor", quietly = TRUE) || !requireNamespace("rstanarm", quietly = TRUE)) { knitr::opts_chunk$set(eval = FALSE) } data(iris) library(knitr) library(bayestestR) options(knitr.kable.NA = '') knitr::opts_chunk$set(comment=">") knitr::opts_chunk$set(dpi=150) options(digits=2) set.seed(333) ``` Now that [**describing and understanding posterior distributions**](https://easystats.github.io/bayestestR/articles/example1.html) of linear regressions has no secrets for you, we will take one step back and study some simpler models: **correlations** and ***t*-tests**. But before we do that, let us take a moment to remind ourselves and appreciate the fact that **all basic statistical pocedures** such as correlations, *t*-tests, ANOVAs or Chisquare tests ***are* linear regressions** (we strongly recommend [this excellent demonstration](https://lindeloev.github.io/tests-as-linear/)). Nevertheless, these simple models will be the occasion to introduce a more complex index, such as the **Bayes factor**. ## Correlations ### Frequentist version Let us start, again, with a **frequentist correlation** between two continuous variables, the **width** and the **length** of the sepals of some flowers. The data is available in R as the `iris` dataset (the same that was used in the [previous tutorial](https://easystats.github.io/bayestestR/articles/example1.html)). We will compute a Pearson's correlation test, store the results in an object called `result`, then display it: ```{r message=FALSE, warning=FALSE} result <- cor.test(iris$Sepal.Width, iris$Sepal.Length) result ``` As you can see in the output, the test that we did actually compared two hypotheses: the **null hypothesis** (*h0*; no correlation) with the **alternative hypothesis** (*h1*; a non-null correlation). Based on the *p*-value, the null hypothesis cannot be rejected: the correlation between the two variables is **negative but not significant** (r = -.12, p > .05). ### Bayesian correlation To compute a Bayesian correlation test, we will need the [`BayesFactor`](https://richarddmorey.github.io/BayesFactor/) package (you can install it by running `install.packages("BayesFactor")`). We can then load this package, compute the correlation using the `correlationBF()` function and store the results in a similar fashion. ```{r message=FALSE, warning=FALSE, results='hide'} library(BayesFactor) result <- correlationBF(iris$Sepal.Width, iris$Sepal.Length) ``` Now, let us run our `describe_posterior()` function on that: ```{r message=FALSE, warning=FALSE, eval=FALSE} describe_posterior(result) ``` ```{r echo=FALSE} structure(list(Parameter = "rho", Median = -0.114149129692488, CI = 89, CI_low = -0.240766308855643, CI_high = 0.00794997655649642, pd = 91.6, ROPE_CI = 89, ROPE_low = -0.1, ROPE_high = 0.1, ROPE_Percentage = 42.0949171581017, BF = 0.509017511647702, Prior_Distribution = "cauchy", Prior_Location = 0, Prior_Scale = 0.333333333333333), row.names = 1L, class = "data.frame") ``` We see again many things here, but the important indices for now are the **median** of the posterior distribution, `-.11`. This is (again) quite close to the frequentist correlation. We could, as previously, describe the [**credible interval**](https://easystats.github.io/bayestestR/articles/credible_interval.html), the [**pd**](https://easystats.github.io/bayestestR/articles/probability_of_direction.html) or the [**ROPE percentage**](https://easystats.github.io/bayestestR/articles/region_of_practical_equivalence.html), but we will focus here on another index provided by the Bayesian framework, the **Bayes factor (BF)**. ### Bayes factor (BF) We said previously that a correlation test actually compares two hypotheses, a null (absence of effect) with an altnernative one (presence of an effect). The [**Bayes factor (BF)**](https://easystats.github.io/bayestestR/articles/bayes_factors.html) allows the same comparison and determines **under which of two models the observed data are more probable**: a model with the effect of interest, and a null model without the effect of interest. We can use `bayesfactor()` to specifically compute the Bayes factor comparing those models: ```{r message=FALSE, warning=FALSE} bayesfactor(result) ``` We got a *BF* of `0.51`. What does it mean? Bayes factors are **continuous measures of relative evidence**, with a Bayes factor greater than 1 giving evidence in favour of one of the models (often referred to as *the numerator*), and a Bayes factor smaller than 1 giving evidence in favour of the other model (*the denominator*). > **Yes, you heard things right, evidence in favour of the null!** That's one of the reason why the Bayesian framework is sometimes considered as superior to the frequentist framework. Remember from your stats lessons, that the ***p*-value can only be used to reject *h0***, but not *accept* it. With the **Bayes factor**, you can measure **evidence against - and in favour of - the null**. BFs representing evidence for the alternative against the null can be reversed using $BF_{01}=1/BF_{10}$ (the *01* and *10* correspond to *h0* against *h1* and *h1* against *h0*, respectively) to provide evidence of the null againtt the alternative. This improves human readability in cases where the BF of the alternative against the null is smaller than 1 (i.e., in support of the null). In our case, `BF = 1/0.51 = 2`, indicates that the data are **2 times more probable under the null compared to the alternative hypothesis**, which, though favouring the null, is considered only [anecdotal evidence against the null](https://easystats.github.io/report/articles/interpret_metrics.html#bayes-factor-bf). We can thus conclude that there is **anecdotal evidence in favour of an absence of correlation between the two variables (rmedian = 0.11, BF = 0.51)**, which is a much more informative statement that what we can do with frequentist statistics. **And that's not all!** ### Visualise the Bayes factor In general, **pie charts are an absolute no-go in data visualisation**, as our brain's perceptive system heavily distorts the information presented in such way. Nevertheless, there is one exeption: pizza charts. It is an intuitive way of interpreting the strength of evidence provided by BFs as an amount of surprise. ```{r echo=FALSE, fig.cap="Wagenmakers' pizza poking analogy. From the great 'www.bayesianspectacles.org' blog.", fig.align='center', out.width="80%"} knitr::include_graphics("https://github.com/easystats/easystats/raw/master/man/figures/bayestestR/LetsPokeAPizza.jpg") ``` Such "pizza plots" can be directly created through the [`see`](https://github.com/easystats/see) visualisation companion package for easystats (you can install it by running `install.packages("see")`): ```{r message=FALSE, warning=FALSE} library(see) plot(bayesfactor(result)) + scale_fill_pizza() ``` So, after seeing this pizza, how much would you be suprised by the outcome of a blinded poke? ## *t*-tests ***"I know that I know nothing, and especially not if *versicolor* and *virginica* differ in terms of Sepal.Width"*, famously said Socrates**. Time to finally answer this answer this crucial question! ### Versicolor *vs.* virginica Bayesian *t*-tests can be performed in a very similar way to correlations. As we are particularly interested in two levels of the `Species` factor, *versicolor* and *virginica*. We will start by filtering out from `iris` the non-relevant observations corresponding to the *setosa* specie, and we will then visualise the observations and the distribution of the `Sepal.Width` variable. ```{r message=FALSE, warning=FALSE} library(dplyr) library(ggplot2) # Select only two relevant species data <- iris %>% filter(Species != "setosa") %>% droplevels() # Visualise distributions and observations data %>% ggplot(aes(x = Species, y = Sepal.Width, fill = Species)) + geom_violindot(fill_dots = "black", size_dots = 1) + scale_fill_material() + theme_modern() ``` It *seems* (visually) that *virgnica* flowers have, on average, a slightly higer width of sepals. Let's assess this difference statistically by using the `ttestBF` in the `BayesFactor` package. ### Compute the Bayesian *t*-test ```{r message=FALSE, warning=FALSE} result <- BayesFactor::ttestBF(formula = Sepal.Width ~ Species, data = data) describe_posterior(result) ``` From the indices, we can say that the difference of `Sepal.Width` between *virginica* and *versicolor* has a probability of **100% of being negative** [*from the pd and the sign of the median*] (median = -0.19, 89% CI [-0.29, -0.092]). The data provides a **strong evidence against the null hypothesis** (BF = 18). Keep that in mind as we will see another way of investigating this question. ## Logistic Model A hypothesis for which one uses a *t*-test can also be tested using a binomial model (*e.g.*, a **logistic model**). Indeed, it is possible to reformulate the following hypothesis, "*there is an important difference in this variable between the two groups*" by "*this variable is able to discriminate between (or classify) the two groups*". However, these models are much more powerful than a regular *t*-test. In the case of the difference of `Sepal.Width` between *virginica* and *versicolor*, the question becomes, *how well can we classify the two species using only* `Sepal.Width`. ### Fit the model ```{r message=FALSE, warning=FALSE, eval=FALSE} library(rstanarm) model <- stan_glm(Species ~ Sepal.Width, data = data, family = "binomial") ``` ```{r message=FALSE, warning=FALSE, echo=FALSE} library(rstanarm) model <- stan_glm(Species ~ Sepal.Width, data = data, family = "binomial", refresh = 0) ``` ### Visualise the model Using the [`estimate`](https://github.com/easystats/estimate) package. **Wait until estimate is on CRAN**. ### Performance and Parameters TO DO. ```{r message=FALSE, warning=FALSE} library(performance) model_performance(model) ``` ```{r message=FALSE, warning=FALSE} describe_posterior(model, test = c("pd", "ROPE", "BF")) ``` ### Visualise the indices TO DO. ```{r message=FALSE, warning=FALSE} # plot(rope(result)) ``` ### Diagnostic Indices About diagnostic indices such as Rhat and ESS. bayestestR/inst/doc/region_of_practical_equivalence.R0000644000176200001440000000360313620150635022621 0ustar liggesusers## ----message=FALSE, warning=FALSE, include=FALSE------------------------------ if (!requireNamespace("rstanarm", quietly = TRUE) || !requireNamespace("see", quietly = TRUE)) { knitr::opts_chunk$set(eval = FALSE) } library(knitr) options(knitr.kable.NA = '') knitr::opts_chunk$set(comment=">") options(digits=2) set.seed(333) ## ----message=FALSE, warning=FALSE, eval=FALSE--------------------------------- # library(rstanarm) # library(bayestestR) # library(see) # # data <- iris # Use the iris data # model <- stan_glm(Sepal.Length ~ Sepal.Width, data=data) # Fit model ## ----echo=FALSE, message=FALSE, warning=FALSE, comment=">"-------------------- library(rstanarm) library(bayestestR) library(see) set.seed(333) data <- iris # Use the iris data model <- stan_glm(Sepal.Length ~ Sepal.Width, data=data, refresh = 0) ## ----echo=TRUE, message=FALSE, warning=FALSE, comment=">"--------------------- # Compute indices pd <- p_direction(model) percentage_in_rope <- rope(model, ci=1) # Visualise the pd plot(pd) pd # Visualise the percentage in ROPE plot(percentage_in_rope) percentage_in_rope ## ----message=FALSE, warning=FALSE, eval=FALSE--------------------------------- # data$Sepal.Width_scaled <- data$Sepal.Width / 100 # Divide predictor by 100 # model <- stan_glm(Sepal.Length ~ Sepal.Width_scaled, data=data) # Fit model ## ----echo=FALSE, message=FALSE, warning=FALSE, comment=">"-------------------- set.seed(333) data$Sepal.Width_scaled <- data$Sepal.Width / 100 model <- stan_glm(Sepal.Length ~ Sepal.Width_scaled, data=data, refresh = 0) ## ----echo=TRUE, message=FALSE, warning=FALSE, comment=">"--------------------- # Compute indices pd <- p_direction(model) percentage_in_rope <- rope(model, ci=1) # Visualise the pd plot(pd) pd # Visualise the percentage in ROPE plot(percentage_in_rope) percentage_in_rope bayestestR/inst/doc/bayes_factors.R0000644000176200001440000004413213620150367017077 0ustar liggesusers## ----setup, include=FALSE----------------------------------------------------- if (!requireNamespace("rstanarm", quietly = TRUE) || !requireNamespace("BayesFactor", quietly = TRUE) || !requireNamespace("emmeans", quietly = TRUE) || !requireNamespace("logspline", quietly = TRUE) || !requireNamespace("lme4", quietly = TRUE) || !requireNamespace("ggplot2", quietly = TRUE) || !requireNamespace("see", quietly = TRUE) ) { knitr::opts_chunk$set(eval = FALSE) } else { library(knitr) library(insight) library(bayestestR) library(rstanarm) library(BayesFactor) library(emmeans) library(ggplot2) library(see) options(knitr.kable.NA = '', digits = 2) opts_chunk$set(echo = TRUE, comment = ">", message = FALSE, warning = FALSE, dpi = 150) theme_set(theme_modern()) set.seed(4) } ## ----deathsticks_fig, echo=FALSE, fig.cap="Bayesian analysis of the Students' (1908) Sleep data set.", fig.align='center', out.width="80%"---- knitr::include_graphics("https://github.com/easystats/easystats/raw/master/man/figures/bayestestR/deathsticks.jpg") ## ----sleep_boxplot, echo=FALSE------------------------------------------------ ggplot(sleep, aes(x = group, y = extra, fill= group)) + geom_boxplot() + theme_classic() ## ----rstanarm_model, eval = FALSE--------------------------------------------- # library(rstanarm) # # model <- stan_glm(extra ~ group, data = sleep, # prior = normal(0, 3, autoscale = FALSE)) ## ---- echo=FALSE-------------------------------------------------------------- model <- stan_glm(extra ~ group, data = sleep, prior = normal(0, 3, autoscale = FALSE), refresh = 0) ## ---- echo=FALSE-------------------------------------------------------------- null <- c(-1,1) xrange <- c(-10,10) x_vals <- seq(xrange[1], xrange[2], length.out = 400) d_vals <- dnorm(x_vals, sd = 3) in_null <- null[1] < x_vals & x_vals < null[2] range_groups <- rep(0, length(x_vals)) range_groups[!in_null & x_vals < 0] <- -1 range_groups[!in_null & x_vals > 0] <- 1 ggplot(mapping = aes(x_vals, d_vals, fill = in_null, group = range_groups)) + geom_area(color = "black", size = 1) + scale_fill_flat(name = "", labels = c("Alternative", "Null")) + labs(x = "Drug effect", y = "Density") + theme_modern() + theme(legend.position = c(0.2, 0.8)) pnull <- diff(pnorm(null, sd = 2.5)) prior_odds <- (1 - pnull) / pnull ## ----rstanarm_fit, echo=FALSE------------------------------------------------- model_prior <- bayestestR:::.update_to_priors.stanreg(model) posterior <- insight::get_parameters(model)$group2 prior <- insight::get_parameters(model_prior)$group2 f_post <- logspline::logspline(posterior) d_vals_post <- logspline::dlogspline(x_vals,f_post) ggplot(mapping = aes(x_vals, d_vals_post, fill = in_null, group = range_groups)) + geom_area(color = "black", size = 1) + scale_fill_flat(name = "", labels = c("Alternative", "Null")) + labs(x = "Drug effect", y = "Density") + theme_modern() + theme(legend.position = c(0.2, 0.8)) My_first_BF <- bayesfactor_parameters(model, null = c(-1,1), prior = model_prior) BF <- My_first_BF$BF[2] post_odds <- prior_odds * BF med_post <- point_estimate(posterior)$Median ## ---- eval=FALSE-------------------------------------------------------------- # My_first_BF <- bayesfactor_parameters(model, null = c(-1, 1)) # My_first_BF ## ---- echo=FALSE-------------------------------------------------------------- print(My_first_BF) ## ----------------------------------------------------------------------------- library(see) plot(My_first_BF) ## ---- eval=FALSE-------------------------------------------------------------- # My_second_BF <- bayesfactor_parameters(model, null = 0) # My_second_BF ## ---- echo=FALSE-------------------------------------------------------------- My_second_BF <- bayesfactor_parameters( data.frame(group2 = posterior), data.frame(group2 = prior), null = 0) print(My_second_BF) ## ----------------------------------------------------------------------------- plot(My_second_BF) ## ----savagedickey_one_sided, eval=FALSE--------------------------------------- # test_group2_right <- bayesfactor_parameters(model, direction = ">") # test_group2_right ## ----prior_n_post_plot_one_sided, echo=FALSE---------------------------------- test_group2_right <- bayesfactor_parameters( data.frame(group2 = posterior), data.frame(group2 = prior), null = 0, direction = ">" ) BF <- test_group2_right$BF print(test_group2_right) ## ----------------------------------------------------------------------------- plot(test_group2_right) ## ---- eval=FALSE-------------------------------------------------------------- # my_first_si <- si(model, BF = 1) # my_first_si ## ---- echo=FALSE-------------------------------------------------------------- my_first_si <- si(data.frame(group2 = posterior), data.frame(group2 = prior), BF = 1) print(my_first_si) ## ----------------------------------------------------------------------------- plot(my_first_si) ## ----brms_disp, eval=FALSE---------------------------------------------------- # library(brms) # # m0 <- brm(Sepal.Length ~ 1, # intercept only model # data = iris, save_all_pars = TRUE) # m1 <- brm(Sepal.Length ~ Petal.Length, # data = iris, save_all_pars = TRUE) # m2 <- brm(Sepal.Length ~ Species, # data = iris, save_all_pars = TRUE) # m3 <- brm(Sepal.Length ~ Species + Petal.Length, # data = iris, save_all_pars = TRUE) # m4 <- brm(Sepal.Length ~ Species * Petal.Length, # data = iris, save_all_pars = TRUE) ## ----brms_models_disp, eval=FALSE--------------------------------------------- # library(bayestestR) # comparison <- bayesfactor_models(m1, m2, m3, m4, denominator = m0) # comparison ## ----brms_models_print, echo=FALSE-------------------------------------------- comparison <- structure( list( Model = c( "Petal.Length", "Species", "Species + Petal.Length", "Species * Petal.Length", "1" ), BF = c(3.44736e+44, 5.628679e+29, 7.121386e+55, 9.149948e+55, 1) ), class = c("bayesfactor_models", "see_bayesfactor_models", "data.frame"), row.names = c(NA, -5L), denominator = 5L, BF_method = "marginal likelihoods (bridgesampling)", unsupported_models = FALSE ) print(comparison) ## ----update_models1----------------------------------------------------------- update(comparison, reference = 3) ## ----update_models2----------------------------------------------------------- update(comparison, reference = 2) ## ----lme4_models, eval=FALSE-------------------------------------------------- # library(lme4) # # m0 <- lmer(Sepal.Length ~ (1 | Species), data = iris) # m1 <- lmer(Sepal.Length ~ Petal.Length + (1 | Species), data = iris) # m2 <- lmer(Sepal.Length ~ Petal.Length + (Petal.Length | Species), data = iris) # m3 <- lmer(Sepal.Length ~ Petal.Length + Petal.Width + (Petal.Length | Species), data = iris) # m4 <- lmer(Sepal.Length ~ Petal.Length * Petal.Width + (Petal.Length | Species), data = iris) # # bayesfactor_models(m1, m2, m3, m4, denominator = m0) ## ---- echo=FALSE-------------------------------------------------------------- structure(list(Model = c( "Petal.Length + (1 | Species)", "Petal.Length + (Petal.Length | Species)", "Petal.Length + Petal.Width + (Petal.Length | Species)", "Petal.Length * Petal.Width + (Petal.Length | Species)", "1 + (1 | Species)"), BF = c(8.24027869011648e+24, 4.7677519818206e+23, 1.52492156042604e+22, 5.93045520305254e+20, 1)), class = c("bayesfactor_models", "see_bayesfactor_models", "data.frame"), row.names = c(NA, -5L), denominator = 5L, BF_method = "BIC approximation", unsupported_models = FALSE) ## ---- eval=FALSE-------------------------------------------------------------- # iris_model <- stan_glm(Sepal.Length ~ Species + Petal.Length, # data = iris, # prior = normal(0, c(2, 1.2, 1.2), autoscale = FALSE)) ## ---- echo=FALSE-------------------------------------------------------------- iris_model <- stan_glm(Sepal.Length ~ Species + Petal.Length, data = iris, prior = normal(0, c(2, 1.2, 1.2), autoscale = FALSE), refresh = 0) ## ----------------------------------------------------------------------------- botanist_hypotheses <- c( "Petal.Length > 0", "(Speciesversicolor > 0) & (Speciesvirginica > 0)" ) ## ---- eval=FALSE-------------------------------------------------------------- # botanist_BFs <- bayesfactor_restricted(iris_model, hypothesis = botanist_hypotheses) # botanist_BFs ## ---- echo=FALSE-------------------------------------------------------------- model_prior <- bayestestR:::.update_to_priors.stanreg(iris_model) botanist_BFs <- bayesfactor_restricted(iris_model, prior = model_prior, hypothesis = botanist_hypotheses) print(botanist_BFs) ## ----plot_iris, echo=FALSE---------------------------------------------------- ggplot(iris, aes(Petal.Length, Sepal.Length, color = Species)) + geom_point() + scale_color_flat() + theme(legend.position = c(0.2, 0.8)) ## ----inclusion_brms----------------------------------------------------------- bayesfactor_inclusion(comparison) ## ----inclusion_brms2---------------------------------------------------------- bayesfactor_inclusion(comparison, match_models = TRUE) ## ----JASP_all----------------------------------------------------------------- library(BayesFactor) data(ToothGrowth) ToothGrowth$dose <- as.factor(ToothGrowth$dose) BF_ToothGrowth <- anovaBF(len ~ dose*supp, ToothGrowth, progress = FALSE) bayesfactor_inclusion(BF_ToothGrowth) ## ----JASP_all_fig, echo=FALSE------------------------------------------------- knitr::include_graphics("https://github.com/easystats/easystats/raw/master/man/figures/bayestestR/JASP1.PNG") ## ----JASP_matched------------------------------------------------------------- bayesfactor_inclusion(BF_ToothGrowth, match_models = TRUE) ## ----JASP_matched_fig, echo=FALSE--------------------------------------------- knitr::include_graphics("https://github.com/easystats/easystats/raw/master/man/figures/bayestestR/JASP2.PNG") ## ----JASP_Nuisance------------------------------------------------------------ BF_ToothGrowth_against_dose <- BF_ToothGrowth[3:4]/BF_ToothGrowth[2] # OR: # update(bayesfactor_models(BF_ToothGrowth), # subset = c(4, 5), # reference = 3) BF_ToothGrowth_against_dose bayesfactor_inclusion(BF_ToothGrowth_against_dose) ## ----JASP_Nuisance_fig, echo=FALSE-------------------------------------------- knitr::include_graphics("https://github.com/easystats/easystats/raw/master/man/figures/bayestestR/JASP3.PNG") ## ---- eval=FALSE-------------------------------------------------------------- # mod <- stan_glm(mpg ~ wt + am, # data = mtcars, # prior = normal(0, c(10,10), autoscale = FALSE), # diagnostic_file = file.path(tempdir(), "df1.csv")) # # mod_carb <- stan_glm(mpg ~ wt + am + carb, # data = mtcars, # prior = normal(0, c(10,10,20), autoscale = FALSE), # diagnostic_file = file.path(tempdir(), "df0.csv")) # # bayesfactor_models(mod_carb, denominator = mod) ## ---- echo=FALSE-------------------------------------------------------------- mod <- stan_glm(mpg ~ wt + am, data = mtcars, prior = normal(0, c(10,10), autoscale = FALSE), diagnostic_file = file.path(tempdir(), "df1.csv"), refresh = 0) mod_carb <- stan_glm(mpg ~ wt + am + carb, data = mtcars, prior = normal(0, c(10,10,20), autoscale = FALSE), diagnostic_file = file.path(tempdir(), "df0.csv"), refresh = 0) BF_carb <- bayesfactor_models(mod_carb, denominator = mod, verbose = FALSE) BF <- BF_carb$BF[1] print(BF_carb) ## ----------------------------------------------------------------------------- hdi(mod_carb, ci = .95) ## ----------------------------------------------------------------------------- BMA_draws <- weighted_posteriors(mod, mod_carb) BMA_hdi <- hdi(BMA_draws, ci = .95) BMA_hdi plot(BMA_hdi) ## ---- echo=FALSE-------------------------------------------------------------- set.seed(1) ## ----------------------------------------------------------------------------- library(emmeans) groups <- emmeans(model, ~ group) group_diff <- pairs(groups) (groups_all <- rbind(groups, group_diff)) # pass the original model via prior bayesfactor_parameters(groups_all, prior = model) ## ---- echo=FALSE-------------------------------------------------------------- set.seed(1) ## ----------------------------------------------------------------------------- library(modelbased) estimate_contrasts(model, test = "bf") ## ---- eval=FALSE-------------------------------------------------------------- # contrasts(iris$Species) <- contr.sum # # fit_sum <- stan_glm(Sepal.Length ~ Species, data = iris, # prior = normal(0, c(1, 1), autoscale = FALSE), # prior_PD = TRUE, # sample priors # family = gaussian()) # # pairs_sum <- pairs(emmeans(fit_sum, ~ Species)) # pairs_sum ## ---- echo=FALSE-------------------------------------------------------------- contrasts(iris$Species) <- contr.sum fit_sum <- stan_glm(Sepal.Length ~ Species, data = iris, prior = normal(0, c(1, 1), autoscale = FALSE), prior_PD = TRUE, # sample priors family = gaussian(), refresh = 0) pairs_sum <- pairs(emmeans(fit_sum, ~ Species)) em_pairs_samples <- as.data.frame(as.matrix(emmeans::as.mcmc.emmGrid(pairs_sum, names = FALSE))) print(pairs_sum) ggplot(stack(em_pairs_samples), aes(x = values, fill = ind)) + geom_density(size = 1) + facet_grid(ind ~ .) + labs(x = "prior difference values") + theme(legend.position = "none") ## ----------------------------------------------------------------------------- contrasts(iris$Species) <- contr.bayes ## ---- eval=FALSE-------------------------------------------------------------- # options(contrasts = c('contr.bayes', 'contr.poly')) ## ---- eval=FALSE-------------------------------------------------------------- # contrasts(iris$Species) <- contr.bayes # # fit_bayes <- stan_glm(Sepal.Length ~ Species, data = iris, # prior = normal(0, c(1, 1), autoscale = FALSE), # prior_PD = TRUE, # sample priors # family = gaussian()) # # pairs_bayes <- pairs(emmeans(fit_bayes, ~ Species)) # pairs_bayes ## ---- echo=FALSE-------------------------------------------------------------- contrasts(iris$Species) <- contr.bayes fit_bayes <- stan_glm(Sepal.Length ~ Species, data = iris, prior = normal(0, c(1, 1), autoscale = FALSE), prior_PD = TRUE, # sample priors family = gaussian(), refresh = 0) pairs_bayes <- pairs(emmeans(fit_bayes, ~ Species)) em_pairs_samples <- as.data.frame(as.matrix(emmeans::as.mcmc.emmGrid(pairs_bayes, names = FALSE))) print(pairs_bayes) ggplot(stack(em_pairs_samples), aes(x = values, fill = ind)) + geom_density(size = 1) + facet_grid(ind ~ .) + labs(x = "prior difference values") + theme(legend.position = "none") ## ----------------------------------------------------------------------------- hyp <- c( # comparing 2 levels "setosa < versicolor", "setosa < virginica", "versicolor < virginica", # comparing 3 (or more) levels "setosa < virginica & virginica < versicolor", "virginica < setosa & setosa < versicolor", "setosa < versicolor & versicolor < virginica" ) ## ---- eval=FALSE-------------------------------------------------------------- # contrasts(iris$Species) <- contr.sum # # fit_sum <- stan_glm(Sepal.Length ~ Species, data = iris, # prior = normal(0, c(1, 1), autoscale = FALSE), # family = gaussian()) # # em_sum <- emmeans(fit_sum, ~ Species) # the posterior marginal means # # bayesfactor_restricted(em_sum, fit_sum, hypothesis = hyp) ## ---- echo=FALSE-------------------------------------------------------------- contrasts(iris$Species) <- contr.sum fit_sum <- stan_glm(Sepal.Length ~ Species, data = iris, prior = normal(0, c(1, 1), autoscale = FALSE), family = gaussian(), refresh = 0) em_sum <- emmeans(fit_sum, ~ Species) # the posterior marginal means bayesfactor_restricted(em_sum, fit_sum, hypothesis = hyp) ## ---- eval=FALSE-------------------------------------------------------------- # contrasts(iris$Species) <- contr.bayes # # fit_bayes <- stan_glm(Sepal.Length ~ Species, data = iris, # prior = normal(0, c(1, 1), autoscale = FALSE), # family = gaussian()) # # em_bayes <- emmeans(fit_sum, ~ Species) # the posterior marginal means # # bayesfactor_restricted(em_bayes, fit_sum, hypothesis = hyp) ## ---- echo=FALSE-------------------------------------------------------------- contrasts(iris$Species) <- contr.bayes fit_bayes <- stan_glm(Sepal.Length ~ Species, data = iris, prior = normal(0, c(1, 1), autoscale = FALSE), family = gaussian(), refresh = 0) em_bayes <- emmeans(fit_bayes, ~ Species) # the posterior marginal means bayesfactor_restricted(em_bayes, fit_bayes, hypothesis = hyp) bayestestR/inst/doc/indicesEstimationComparison.R0000644000176200001440000001221013620150454021746 0ustar liggesusers## ----message=FALSE, warning=FALSE, include=FALSE------------------------------ if (!requireNamespace("see", quietly = TRUE) || !requireNamespace("dplyr", quietly = TRUE) || !requireNamespace("ggplot2", quietly = TRUE) || !requireNamespace("tidyr", quietly = TRUE)) { knitr::opts_chunk$set(eval = FALSE) } library(knitr) options(knitr.kable.NA = '') knitr::opts_chunk$set(comment=">") options(digits=2) ## ----message=FALSE, warning=FALSE--------------------------------------------- library(ggplot2) library(dplyr) library(tidyr) library(see) df <- read.csv("https://raw.github.com/easystats/circus/master/data/bayesSim_study1.csv") ## ---- message=FALSE, warning=FALSE-------------------------------------------- df %>% select(error, true_effect, outcome_type, Coefficient, Median, Mean, MAP) %>% gather(estimate, value, -error, -true_effect, -outcome_type) %>% mutate(temp = as.factor(cut(error, 10, labels = FALSE))) %>% group_by(temp) %>% mutate(error_group = round(mean(error), 1)) %>% ungroup() %>% filter(value < 6) %>% ggplot(aes(x = error_group, y = value, fill = estimate, group = interaction(estimate, error_group))) + # geom_hline(yintercept = 0) + # geom_point(alpha=0.05, size=2, stroke = 0, shape=16) + # geom_smooth(method="loess") + geom_boxplot(outlier.shape=NA) + theme_modern() + scale_fill_manual(values = c("Coefficient" = "#607D8B", "MAP" = "#795548", "Mean" = "#FF9800", "Median" = "#FFEB3B"), name = "Index") + ylab("Point-estimate") + xlab("Noise") + facet_wrap(~ outcome_type * true_effect, scales="free") ## ---- message=FALSE, warning=FALSE-------------------------------------------- df %>% select(sample_size, true_effect, outcome_type, Coefficient, Median, Mean, MAP) %>% gather(estimate, value, -sample_size, -true_effect, -outcome_type) %>% mutate(temp = as.factor(cut(sample_size, 10, labels = FALSE))) %>% group_by(temp) %>% mutate(size_group = round(mean(sample_size))) %>% ungroup() %>% filter(value < 6) %>% ggplot(aes(x = size_group, y = value, fill = estimate, group = interaction(estimate, size_group))) + # geom_hline(yintercept = 0) + # geom_point(alpha=0.05, size=2, stroke = 0, shape=16) + # geom_smooth(method="loess") + geom_boxplot(outlier.shape=NA) + theme_modern() + scale_fill_manual(values = c("Coefficient" = "#607D8B", "MAP" = "#795548", "Mean" = "#FF9800", "Median" = "#FFEB3B"), name = "Index") + ylab("Point-estimate") + xlab("Sample size") + facet_wrap(~ outcome_type * true_effect, scales="free") ## ---- message=FALSE, warning=FALSE-------------------------------------------- df %>% select(sample_size, error, true_effect, outcome_type, Coefficient, Median, Mean, MAP) %>% gather(estimate, value, -sample_size, -error, -true_effect, -outcome_type) %>% glm(true_effect ~ outcome_type / estimate / value, data=., family="binomial") %>% broom::tidy() %>% select(term, estimate, p=p.value) %>% filter(stringr::str_detect(term, 'outcome_type'), stringr::str_detect(term, ':value')) %>% arrange(desc(estimate)) %>% knitr::kable(digits=2) ## ----message=FALSE, warning=FALSE--------------------------------------------- df <- read.csv("https://raw.github.com/easystats/circus/master/data/bayesSim_study2.csv") ## ---- message=FALSE, warning=FALSE-------------------------------------------- df %>% select(iterations, true_effect, outcome_type, beta, Median, Mean, MAP) %>% gather(estimate, value, -iterations, -true_effect, -outcome_type) %>% mutate(temp = as.factor(cut(iterations, 5, labels = FALSE))) %>% group_by(temp) %>% mutate(iterations_group = round(mean(iterations), 1)) %>% ungroup() %>% filter(value < 6) %>% ggplot(aes(x = iterations_group, y = value, fill = estimate, group = interaction(estimate, iterations_group))) + geom_boxplot(outlier.shape=NA) + theme_classic() + scale_fill_manual(values = c("beta" = "#607D8B", "MAP" = "#795548", "Mean" = "#FF9800", "Median" = "#FFEB3B"), name = "Index") + ylab("Point-estimate of the true value 0\n") + xlab("\nNumber of Iterations") + facet_wrap(~ outcome_type * true_effect, scales="free") ## ---- message=FALSE, warning=FALSE-------------------------------------------- df %>% mutate(warmup = warmup / iterations) %>% select(warmup, true_effect, outcome_type, beta, Median, Mean, MAP) %>% gather(estimate, value, -warmup, -true_effect, -outcome_type) %>% mutate(temp = as.factor(cut(warmup, 3, labels = FALSE))) %>% group_by(temp) %>% mutate(warmup_group = round(mean(warmup), 1)) %>% ungroup() %>% filter(value < 6) %>% ggplot(aes(x = warmup_group, y = value, fill = estimate, group = interaction(estimate, warmup_group))) + geom_boxplot(outlier.shape=NA) + theme_classic() + scale_fill_manual(values = c("beta" = "#607D8B", "MAP" = "#795548", "Mean" = "#FF9800", "Median" = "#FFEB3B"), name = "Index") + ylab("Point-estimate of the true value 0\n") + xlab("\nNumber of Iterations") + facet_wrap(~ outcome_type * true_effect, scales="free") bayestestR/inst/doc/bayestestR.R0000644000176200001440000000313113620150376016372 0ustar liggesusers## ----message=FALSE, warning=FALSE, include=FALSE------------------------------ if (!requireNamespace("rstanarm", quietly = TRUE)) { knitr::opts_chunk$set(eval = FALSE) } library(knitr) options(knitr.kable.NA = '') knitr::opts_chunk$set(comment=">") options(digits=2) ## ----echo=FALSE, fig.cap="Accurate depiction of a regular Bayesian user estimating a credible interval.", fig.align='center', out.width="50%"---- knitr::include_graphics("https://github.com/easystats/easystats/raw/master/man/figures/bayestestR/bayesianMaster.jpg") ## ----eval=FALSE, message=FALSE, warning=FALSE--------------------------------- # install.packages("devtools") # devtools::install_github("easystats/easystats") ## ----message=FALSE, warning=FALSE, eval=FALSE--------------------------------- # install.packages("rstanarm") # library(rstanarm) ## ----message=FALSE, warning=FALSE, eval=FALSE--------------------------------- # model <- lm(Sepal.Length ~ Petal.Length, data=iris) # summary(model) ## ----echo=FALSE, message=FALSE, warning=FALSE, comment=NA--------------------- library(dplyr) lm(Sepal.Length ~ Petal.Length, data=iris) %>% summary() ## ----message=FALSE, warning=FALSE, eval=FALSE--------------------------------- # model <- stan_glm(Sepal.Length ~ Petal.Length, data=iris) # describe_posterior(model) ## ----echo=FALSE, message=FALSE, warning=FALSE, comment=NA--------------------- library(rstanarm) library(bayestestR) set.seed(333) model <- stan_glm(Sepal.Length ~ Petal.Length, data=iris, refresh = 0) knitr::kable(describe_posterior(model), digits=2) bayestestR/inst/doc/bayes_factors.Rmd0000644000176200001440000012420413620150172017411 0ustar liggesusers--- title: "Bayes Factors" output: github_document: toc: true fig_width: 10.08 fig_height: 6 rmarkdown::html_vignette: toc: true toc_depth: 2 fig_width: 10.08 fig_height: 6 tags: [r, bayesian, bayes factors] vignette: > \usepackage[utf8]{inputenc} %\VignetteIndexEntry{Bayes Factors} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console bibliography: bibliography.bib csl: apa.csl --- This vignette can be referred to by citing the following: - Makowski, D., Ben-Shachar, M. S., \& Lüdecke, D. (2019). *bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework*. Journal of Open Source Software, 4(40), 1541. https://doi.org/10.21105/joss.01541 - Makowski, D., Ben-Shachar, M. S., Chen, S. H. A., \& Lüdecke, D. (2019). *Indices of Effect Existence and Significance in the Bayesian Framework*. Retrieved from [10.3389/fpsyg.2019.02767](https://doi.org/10.3389/fpsyg.2019.02767) --- ```{r setup, include=FALSE} if (!requireNamespace("rstanarm", quietly = TRUE) || !requireNamespace("BayesFactor", quietly = TRUE) || !requireNamespace("emmeans", quietly = TRUE) || !requireNamespace("logspline", quietly = TRUE) || !requireNamespace("lme4", quietly = TRUE) || !requireNamespace("ggplot2", quietly = TRUE) || !requireNamespace("see", quietly = TRUE) ) { knitr::opts_chunk$set(eval = FALSE) } else { library(knitr) library(insight) library(bayestestR) library(rstanarm) library(BayesFactor) library(emmeans) library(ggplot2) library(see) options(knitr.kable.NA = '', digits = 2) opts_chunk$set(echo = TRUE, comment = ">", message = FALSE, warning = FALSE, dpi = 150) theme_set(theme_modern()) set.seed(4) } ``` The adoption of the Bayesian framework for applied statistics, especially in the social and psychological sciences, seems to be developing in two distinct directions. One of the key topics marking their separation is their opinion about **the Bayes factor**. In short, some authors (e.g., the "Amsterdam school", led by [Wagenmakers](https://www.bayesianspectacles.org/)) advocate its use and emphasize its qualities as a statistical index, while others point to its limits and prefer, instead, the precise description of posterior distributions (using [CIs](https://easystats.github.io/bayestestR/reference/hdi.html), [ROPEs](https://easystats.github.io/bayestestR/reference/rope.html), etc.). **bayestestR** does not take a side in this debate, rather offering tools to help you in whatever analysis you want to achieve. Instead, it strongly supports the notion of an *informed choice:* **discover the methods, try them, understand them, learn about them, and decide for yourself**. Having said that, here's an introduction to Bayes factors :) # The Bayes Factor **Bayes factors (BFs) are indices of *relative* evidence of one "model" over another**, which can be used in the Bayesian framework as alternatives to classical (frequentist) hypothesis testing indices (such as $p-values$). According to Bayes' theorem, we can update prior probabilities of some model $M$ ($P(M)$) to posterior probabilities ($P(M|D)$) after observing some datum $D$ by accounting for the probability of observing that datum given the model ($P(D|M)$, also known as the *likelihood*): $$ P(M|D) = \frac{P(D|M)\times P(M)}{P(D)} $$ Using this equation, We can compare the probability-odds of two models: $$ \frac{P(M_1|D)}{P(M_2|D)} = \frac{P(D|M_1)}{P(D|M_2)} \times \frac{P(M_1)}{P(M_2)} $$ Where the left-most term are the *posterior odds*, the right-most term are the *prior odds*, and the middle term is the *Bayes factor*: $$ BF_{12}=\frac{P(D|M_1)}{P(D|M_2)} $$ Thus, Bayes factors can be seen either as a ratio quantifying ***the relative probability of some observed data by two models*** as they can be computed by comparing the marginal likelihoods of the two models, or as ***the degree by which some prior beliefs about the relative credibility of two models are to be updated*** as they can be computed by dividing posterior odds by prior odds, as we will soon demonstrate. Here we provide functions for computing Bayes factors in two different applications: **testing single parameters (coefficients) within a model** and **comparing statistical models themselves**. ## Testing Models' Parameters with Bayes Factors {#bayesfactor_parameters} A ***Bayes factor for a single parameter*** can be used to answer the question: > **Given the observed data, has the null hypothesis of an absence of an effect become more, or less credible?** ```{r deathsticks_fig, echo=FALSE, fig.cap="Bayesian analysis of the Students' (1908) Sleep data set.", fig.align='center', out.width="80%"} knitr::include_graphics("https://github.com/easystats/easystats/raw/master/man/figures/bayestestR/deathsticks.jpg") ``` Let's use the Students' (1908) Sleep data set (`data("sleep")`), in which **people took some drug** and where the researchers measured the **extra hours of sleep** that they slept afterwards. We will try answering the following question: *given the observed data, has the hypothesis that the drug (the effect of `group`) **has no effect** on the numbers of hours of **extra sleep** (variable `extra`) become more of less credible?* ```{r sleep_boxplot, echo=FALSE} ggplot(sleep, aes(x = group, y = extra, fill= group)) + geom_boxplot() + theme_classic() ``` The **bloxplot** suggests that the second group has a higher number of hours of extra sleep. *By how much?* Let's fit a simple [Bayesian linear model](https://easystats.github.io/bayestestR/articles/example1_GLM.html), with a prior of $b_{group} \sim N(0, 3)$: ```{r rstanarm_model, eval = FALSE} library(rstanarm) model <- stan_glm(extra ~ group, data = sleep, prior = normal(0, 3, autoscale = FALSE)) ``` ```{r, echo=FALSE} model <- stan_glm(extra ~ group, data = sleep, prior = normal(0, 3, autoscale = FALSE), refresh = 0) ``` ### Testing against a null-*region* One way of operationlizing the null-hypothesis is by setting a null region, such that an effect that falls within this interval would be practically equivalent to the the null [@kruschke2010believe]. In our case, that means defining a range of effects we would consider equal to the drug having no effect at all. We can then compute the prior probability of the drug's effect falling *within this null-region*, and the prior probability of the drug's effect falling *outside the null-region* to get our *prior odds*. Say any effect smaller than an hour of extra sleep is practically equivalent to being no effect at all, we would define our prior odds as: $$ \frac {P(b_{drug} \in [-1, 1])} {P(b_{drug} \notin [-1, 1])} $$ Given our prior has a normal distribution centered at 0 hours with a scale (an SD) of 2.5 hours, our priors would look like this: ```{r, echo=FALSE} null <- c(-1,1) xrange <- c(-10,10) x_vals <- seq(xrange[1], xrange[2], length.out = 400) d_vals <- dnorm(x_vals, sd = 3) in_null <- null[1] < x_vals & x_vals < null[2] range_groups <- rep(0, length(x_vals)) range_groups[!in_null & x_vals < 0] <- -1 range_groups[!in_null & x_vals > 0] <- 1 ggplot(mapping = aes(x_vals, d_vals, fill = in_null, group = range_groups)) + geom_area(color = "black", size = 1) + scale_fill_flat(name = "", labels = c("Alternative", "Null")) + labs(x = "Drug effect", y = "Density") + theme_modern() + theme(legend.position = c(0.2, 0.8)) pnull <- diff(pnorm(null, sd = 2.5)) prior_odds <- (1 - pnull) / pnull ``` and the prior odds would be 2.2. By looking at the posterior distribution, can now compute the posterior probability of the drug's effect falling *within the null-region*, and the posterior probability of the drug's effect falling *outside the null-region* to get our *posterior odds*: $$ \frac {P(b_{drug} \in [-1,1] | Data)} {P(b_{drug} \notin [-1,1] | Data)} $$ ```{r rstanarm_fit, echo=FALSE} model_prior <- bayestestR:::.update_to_priors.stanreg(model) posterior <- insight::get_parameters(model)$group2 prior <- insight::get_parameters(model_prior)$group2 f_post <- logspline::logspline(posterior) d_vals_post <- logspline::dlogspline(x_vals,f_post) ggplot(mapping = aes(x_vals, d_vals_post, fill = in_null, group = range_groups)) + geom_area(color = "black", size = 1) + scale_fill_flat(name = "", labels = c("Alternative", "Null")) + labs(x = "Drug effect", y = "Density") + theme_modern() + theme(legend.position = c(0.2, 0.8)) My_first_BF <- bayesfactor_parameters(model, null = c(-1,1), prior = model_prior) BF <- My_first_BF$BF[2] post_odds <- prior_odds * BF med_post <- point_estimate(posterior)$Median ``` We can see that the center of the posterior distribution has shifted away from 0 (to ~1.5). Likewise, the posterior odds are 2 - which seems to favor **the effect being non-null**, but... *does this mean the data support the alternative over the null?* Hard to say, since even before the data were observed, the priors already favored the alternative - so we need to take our priors into account here! Let's compute the Bayes factor as the change from the prior odds to the posterior odds: $BF_{10} = Odds_{posterior} / Odds_{prior} = 0.9$! This BF indicates that the data provide 1/0.9 = 1.1 times more evidence for the effect of the drug being practically nothing than it does for the drug having some clinically significant effect. Thus, although the center of distribution has shifted away from 0, and the posterior distribution seems to favor a non-null effect of the drug, it seems that given the observed data, the probability mass has *overall* shifted closer to the null interval, making the values in the null interval more probable! [see *Non-overlapping Hypotheses* in @morey2011bayesinterval] Note that **interpretation guides** for Bayes factors can be found [**here**](https://easystats.github.io/report/articles/interpret_metrics.html#bayes-factor-bf). All of this can be achieved with the function `bayesfactor_parameters()`, which computes a Bayes factor for each of the model's parameters: ```{r, eval=FALSE} My_first_BF <- bayesfactor_parameters(model, null = c(-1, 1)) My_first_BF ``` ```{r, echo=FALSE} print(My_first_BF) ``` We can also plot using the `see` package: ```{r} library(see) plot(My_first_BF) ``` ### Testing against the *point*-null (0) **What if we don't know what region would be practically equivalent to 0?** Or if we just want the null to be exactly zero? Not a problem - as the width of null region shrinks to a point, the change from the prior probability to the posterior probability of the null can be estimated by comparing the the density of the null value between the two distributions.^[Note that as the width of null interval shrinks to zero, the prior probability and posterior probability of the alternative tends towards 1.00.] This ratio is called the **Savage-Dickey ratio**, and has the added benefit of also being an approximation of a Bayes factor comparing the estimated model against the a model in which the parameter of interest has been restricted to a point-null: > "[...] the Bayes factor for $H_0$ versus $H_1$ could be obtained by analytically integrating out the model parameter $\theta$. However, the Bayes factor may likewise be obtained by only considering $H_1$, and dividing the height of the posterior for $\theta$ by the height of the prior for $\theta$, at the point of interest." [@wagenmakers2010bayesian] ```{r, eval=FALSE} My_second_BF <- bayesfactor_parameters(model, null = 0) My_second_BF ``` ```{r, echo=FALSE} My_second_BF <- bayesfactor_parameters( data.frame(group2 = posterior), data.frame(group2 = prior), null = 0) print(My_second_BF) ``` ```{r} plot(My_second_BF) ``` ### One-sided tests We can also conduct a directional test (a "one sided" or "one tailed" test) if we have a prior hypotheses about the direction of the effect. This can be done by setting an order restriction on the prior distribution (and thus also on the posterior distribution) of the alternative [@morey2014simple]. For example, if we have a prior hypothesis that the effect of the drug is an *increase* in the number of sleep hours, the alternative will be restricted to the region to the right of the null (point or interval): ```{r savagedickey_one_sided, eval=FALSE} test_group2_right <- bayesfactor_parameters(model, direction = ">") test_group2_right ``` ```{r prior_n_post_plot_one_sided, echo=FALSE} test_group2_right <- bayesfactor_parameters( data.frame(group2 = posterior), data.frame(group2 = prior), null = 0, direction = ">" ) BF <- test_group2_right$BF print(test_group2_right) ``` ```{r} plot(test_group2_right) ``` As we can see, given that we have an *a priori* assumption about the direction of the effect (*that the effect is positive*), **the presence of an effect is 2.8 times more likely than the absence of an effect**, given the observed data (or that the data are 2.8 time more probable under $H_1$ than $H_0$). This indicates that, given the observed data, and a priori hypothesis, the posterior mass has shifted away from the null value, giving some evidence against the null (note that a Bayes factor of 2.8 is still considered quite [weak evidence](https://easystats.github.io/report/articles/interpret_metrics.html#bayes-factor-bf)). **NOTE**: See the *Testing Contrasts* appendix below. ### Support intervals {#si} So far we've seen that Bayes factors quantify relative support between competing hypotheses. However, we can also ask: > **Upon observing the data, the credibility of which of the parameter’s values has increased (or decreased)?** For example, we've seen that the point null has become somewhat less credible after observing the data, but we might also ask *which values have gained some credibility given the observed data?*. The resulting range of values is called **the support interval** as it indicates which values are supported by the data [@wagenmakers2018SI]. We can do this by once again comparing the prior and posterior distributions and checking where the posterior densities are higher than the prior densities. This can be achieved with the `si()` function: ```{r, eval=FALSE} my_first_si <- si(model, BF = 1) my_first_si ``` ```{r, echo=FALSE} my_first_si <- si(data.frame(group2 = posterior), data.frame(group2 = prior), BF = 1) print(my_first_si) ``` The argument `BF = 1` indicates that we want the interval to contain values that have gained support by a factor of at least 1 (that is, any support at all). Visually, we can see that the credibility of all the values within this interval has increased (and likewise the credibility of all the values outside this interval has decreased): ```{r} plot(my_first_si) ``` We can also see the this support interval (just barely) excludes the point null (0) - whose credibility we've already seen has decreased by the observed data. This emphasizes the relationship between the support interval and the Bayes factor: > "The interpretation of such intervals would be analogous to how a frequentist confidence interval contains all the parameter values that would not have been rejected if tested at level $\alpha$. For instance, a BF = 1/3 support interval encloses all values of theta for which the updating factor is not stronger than 3 against." [@wagenmakers2018SI] Thus, the choice of BF (the level of support the interval should indicate) depends on what we want our interval to represent: - A $BF = 1$ contains values whose credibility has merely not decreased by observing the data. - A $BF > 1$ contains values who received more impressive support from the data. - A $BF < 1$ contains values whose credibility has *not* been impressively decreased by observing the data. Testing against values outside this interval will produce a Bayes factor larger than $1/BF$ in support of the alternative. ## Comparing Models using Bayes Factors {#bayesfactor_models} Bayes factors can also be used to compare statistical models, for which they answer the question: > **Under which model are the the observed data more probable?** In other words, which model is more likely to have produced the observed data? This is usually done by comparing the marginal likelihoods of two models. In such a case, the Bayes factor is a measure of the *relative* evidence of one of the compared models over the other. Let's use Bayes factors for model comparison to find a model that best describes the length of an iris' sepal using the `iris` data set. ### For Bayesian models (`brms` and `rstanarm`) **Note: In order to compute Bayes factors for Bayesian models, non-default arguments must be added upon fitting:** - `brmsfit` models **must** have been fitted with `save_all_pars = TRUE` - `stanreg` models **must** have been fitted with a defined `diagnostic_file`. Let's first fit 5 Bayesian regressions with `brms` to predict `Sepal.Length`: ```{r brms_disp, eval=FALSE} library(brms) m0 <- brm(Sepal.Length ~ 1, # intercept only model data = iris, save_all_pars = TRUE) m1 <- brm(Sepal.Length ~ Petal.Length, data = iris, save_all_pars = TRUE) m2 <- brm(Sepal.Length ~ Species, data = iris, save_all_pars = TRUE) m3 <- brm(Sepal.Length ~ Species + Petal.Length, data = iris, save_all_pars = TRUE) m4 <- brm(Sepal.Length ~ Species * Petal.Length, data = iris, save_all_pars = TRUE) ``` We can now compare these models with the `bayesfactor_models()` function, using the `denominator` argument to specify which model all models will be compared against (in this case, the intercept-only model): ```{r brms_models_disp, eval=FALSE} library(bayestestR) comparison <- bayesfactor_models(m1, m2, m3, m4, denominator = m0) comparison ``` ```{r brms_models_print, echo=FALSE} comparison <- structure( list( Model = c( "Petal.Length", "Species", "Species + Petal.Length", "Species * Petal.Length", "1" ), BF = c(3.44736e+44, 5.628679e+29, 7.121386e+55, 9.149948e+55, 1) ), class = c("bayesfactor_models", "see_bayesfactor_models", "data.frame"), row.names = c(NA, -5L), denominator = 5L, BF_method = "marginal likelihoods (bridgesampling)", unsupported_models = FALSE ) print(comparison) ``` We can see that the full model is the best model - with $BF_{\text{m0}}=9\times 10^{55}$ compared to the null (intercept only). Due to the transitive property of Bayes factors, we can easily change the reference model to the main effects model: ```{r update_models1} update(comparison, reference = 3) ``` As we can see, though the full model is the best, there is hardly any evidence that it is preferable to the main effects model. We can also change the reference model to the `Species` model: ```{r update_models2} update(comparison, reference = 2) ``` Notice that in the Bayesian framework the compared models *do not* need to be nested models, as happened here when we compared the `Petal.Length`-only model to the `Species`-only model (something that cannot be done in the frequentists framework, where compared models must be nested in one another). **NOTE:** In order to correctly and precisely estimate Bayes Factors, you always need the 4 P's: **P**roper **P**riors ^[[Robert, 2016](https://doi.org/10.1016/j.jmp.2015.08.002); [Kass & Raftery, 1993](https://doi.org/10.1080/01621459.1995.10476572); [Fernández, Ley, & Steel, 2001](https://doi.org/10.1016/S0304-4076(00)00076-2)], and a **P**lentiful **P**osterior ^[[Gronau, Wagenmakers, Heck, & Matzke, 2019](https://doi.org/10.1007/s11336-018-9648-3)]. ### For Frequentist models via the BIC approximation It is also possible to compute Bayes factors for the comparison of frequentist models. This is done by comparing BIC measures, allowing a Bayesian comparison of non-nested frequentist models [@wagenmakers2007practical]. Let's try it out on some **linear mixed models**: ```{r lme4_models, eval=FALSE} library(lme4) m0 <- lmer(Sepal.Length ~ (1 | Species), data = iris) m1 <- lmer(Sepal.Length ~ Petal.Length + (1 | Species), data = iris) m2 <- lmer(Sepal.Length ~ Petal.Length + (Petal.Length | Species), data = iris) m3 <- lmer(Sepal.Length ~ Petal.Length + Petal.Width + (Petal.Length | Species), data = iris) m4 <- lmer(Sepal.Length ~ Petal.Length * Petal.Width + (Petal.Length | Species), data = iris) bayesfactor_models(m1, m2, m3, m4, denominator = m0) ``` ```{r, echo=FALSE} structure(list(Model = c( "Petal.Length + (1 | Species)", "Petal.Length + (Petal.Length | Species)", "Petal.Length + Petal.Width + (Petal.Length | Species)", "Petal.Length * Petal.Width + (Petal.Length | Species)", "1 + (1 | Species)"), BF = c(8.24027869011648e+24, 4.7677519818206e+23, 1.52492156042604e+22, 5.93045520305254e+20, 1)), class = c("bayesfactor_models", "see_bayesfactor_models", "data.frame"), row.names = c(NA, -5L), denominator = 5L, BF_method = "BIC approximation", unsupported_models = FALSE) ``` ### Order restricted models {#bayesfactor_restricted} As stated above when discussing one-sided hypothesis tests, we can create new models by imposing order restrictions on a given model. For example, consider the following model, in which we predict the length of an iris' sepal from the length of its petal, as well as from its species, with a prior of $b_{petal} \sim N(0,2)$ $b_{versicolors}\ \&\ b_{virginica} \sim N(0,1.2)$: ```{r, eval=FALSE} iris_model <- stan_glm(Sepal.Length ~ Species + Petal.Length, data = iris, prior = normal(0, c(2, 1.2, 1.2), autoscale = FALSE)) ``` ```{r, echo=FALSE} iris_model <- stan_glm(Sepal.Length ~ Species + Petal.Length, data = iris, prior = normal(0, c(2, 1.2, 1.2), autoscale = FALSE), refresh = 0) ``` These priors are **unrestricted** - that is, all values between $-\infty$ and $\infty$ of all parameters in the model have some non-zero credibility (no matter how small; this is true for both the prior and posterior distribution). Subsequently, *a priori* the ordering of the parameters relating to the iris species can have any ordering, such that (a priori) setosa can have larger sepals than virginica, but it is also possible for virginica to have larger sepals than setosa! Does it make sense to let our priors cover all of these possibilities? That depends on our *prior* knowledge or hypotheses. For example, even a novice botanist will assume that it is unlikely that petal length will be *negatively* associated with sepal length - an iris with longer petals is likely larger, and thus will also have a longer sepal. And an expert botanist will perhaps assume that setosas have smaller sepals than both versicolors and virginica. These priors can be formulated as **restricted** priors [@morey_2015_blog; @morey2011bayesinterval]: 1. The novice botanist: $b_{petal} > 0$ 2. The expert botanist: $b_{versicolors} > 0\ \&\ b_{virginica} > 0$ By testing these restrictions on prior and posterior samples, we can see how the probabilities of the restricted distributions change after observing the data. This can be achieved with `bayesfactor_restricted()`, that compute a Bayes factor for these restricted model vs the unrestricted model. Let's first specify these restrictions as logical conditions: ```{r} botanist_hypotheses <- c( "Petal.Length > 0", "(Speciesversicolor > 0) & (Speciesvirginica > 0)" ) ``` Let's test these hypotheses: ```{r, eval=FALSE} botanist_BFs <- bayesfactor_restricted(iris_model, hypothesis = botanist_hypotheses) botanist_BFs ``` ```{r, echo=FALSE} model_prior <- bayestestR:::.update_to_priors.stanreg(iris_model) botanist_BFs <- bayesfactor_restricted(iris_model, prior = model_prior, hypothesis = botanist_hypotheses) print(botanist_BFs) ``` We can see that the novice botanist's hypothesis gets a Bayes factor of ~2, indicating the data provides twice as much evidence for a model in which petal length is restricted to be positively associated with sepal length than for a model with not such restriction. What about our expert botanist? He seems to have failed miserably, with a BF favoring the *unrestricted* model many many times over ($BF\gg1,000$). How is this possible? It seems that when *controlling for petal length*, versicolor and virginica actually have shorter sepals! ```{r plot_iris, echo=FALSE} ggplot(iris, aes(Petal.Length, Sepal.Length, color = Species)) + geom_point() + scale_color_flat() + theme(legend.position = c(0.2, 0.8)) ``` Note that these Bayes factors compare the restricted model to the unrestricted model. If we wanted to compare the restricted model to the null model, we could use the transitive property of Bayes factors like so: $$ BF_{restricted / NULL} = \frac {BF_{restricted / un-restricted}} {BF_{un-restricted / NULL}} $$ **Because these restrictions are on the prior distribution, they are only appropriate for testing pre-planned (*a priori*) hypotheses, and should not be used for any post hoc comparisons [@morey_2015_blog].** **NOTE**: See the *Specifying Correct Priors for Factors with More Than 2 Levels* appendix below. ## Bayesian Model Averaging In the previous section we discussed the direct comparison of two models to determine if an effect is supported by the data. However, in many cases there are too many models to consider or perhaps it is not straightforward which models we should compare to determine if an effect is supported by the data. For such cases we can use Bayesian model averaging (BMA) to determine the support provided by the data for a parameter or term across many models. ### Inclusion Bayes factors {#bayesfactor_inclusion} Inclusion Bayes factors answer the question: > **Are the observed data more probable under models with a particular predictor, than they are under models without that particular predictor?** In other words, on average - are models with predictor $X$ more likely to have produced the observed data than models without predictor $X$?^[A model without predictor $X$ can be thought of as a model in which the parameter(s) of the predictor have been restricted to a null-point of 0.] Since each model has a prior probability, it is possible to sum the prior probability of all models that include a predictor of interest (the *prior inclusion probability*), and of all models that do not include that predictor (the *prior exclusion probability*). After the data are observed, and each model is assigned a posterior probability, we can similarly consider the sums of the posterior models' probabilities to obtain the *posterior inclusion probability* and the *posterior exclusion probability*. Once again, the change from prior inclusion odds to the posterior inclusion odds is the Inclusion Bayes factor ["$BF_{Inclusion}$"; @clyde2011bayesian]. Lets use the `brms` example from above: ```{r inclusion_brms} bayesfactor_inclusion(comparison) ``` If we examine the interaction term's inclusion Bayes factor, we can see that across all 5 models, a model with the interaction term (`Species:Petal.Length`) is *on average* 5 times more likely than a model without the interaction term. **Note** that `Species`, a factor represented in the model with several parameters, gets a single Bayes factor - inclusion Bayes factors are given per predictor! We can also compare only matched models - such that averaging is done only across models that (1) do not include any interactions with the predictor of interest; (2) for interaction predictors, averaging is done only across models that contain the main effect from which the interaction predictor is comprised (see explanation for why you might want to do this [here](https://www.cogsci.nl/blog/interpreting-bayesian-repeated-measures-in-jasp)). ```{r inclusion_brms2} bayesfactor_inclusion(comparison, match_models = TRUE) ``` #### Comparison with JASP `bayesfactor_inclusion()` is meant to provide Bayes Factors per predictor, similar to JASP's *Effects* option. Let's compare the two: 1. Across all models: ```{r JASP_all} library(BayesFactor) data(ToothGrowth) ToothGrowth$dose <- as.factor(ToothGrowth$dose) BF_ToothGrowth <- anovaBF(len ~ dose*supp, ToothGrowth, progress = FALSE) bayesfactor_inclusion(BF_ToothGrowth) ``` ```{r JASP_all_fig, echo=FALSE} knitr::include_graphics("https://github.com/easystats/easystats/raw/master/man/figures/bayestestR/JASP1.PNG") ``` 2. Across matched models: ```{r JASP_matched} bayesfactor_inclusion(BF_ToothGrowth, match_models = TRUE) ``` ```{r JASP_matched_fig, echo=FALSE} knitr::include_graphics("https://github.com/easystats/easystats/raw/master/man/figures/bayestestR/JASP2.PNG") ``` 3. With Nuisance Effects: We'll add `dose` to the null model in JASP, and do the same in `R`: ```{r JASP_Nuisance} BF_ToothGrowth_against_dose <- BF_ToothGrowth[3:4]/BF_ToothGrowth[2] # OR: # update(bayesfactor_models(BF_ToothGrowth), # subset = c(4, 5), # reference = 3) BF_ToothGrowth_against_dose bayesfactor_inclusion(BF_ToothGrowth_against_dose) ``` ```{r JASP_Nuisance_fig, echo=FALSE} knitr::include_graphics("https://github.com/easystats/easystats/raw/master/man/figures/bayestestR/JASP3.PNG") ``` ### Averaging posteriors {#weighted_posteriors} Similar to how we can average evidence for a predictor across models, we can also average the posterior estimate across models. One situation in which this is useful in **situations where Bayes factors seem to support a null effect, yet the *HDI* of the alternative excludes the null value** (also see `si()` described above). For example, looking at Motor *Trend Car Road Tests* (`data(mtcars)`), we would naturally predict miles/gallon (`mpg`) from transition type (`am`) and weight (`wt`), but what about number of carburetors (`carb`)? Is this a good predictor? We can determine this by comparing the following models: ```{r, eval=FALSE} mod <- stan_glm(mpg ~ wt + am, data = mtcars, prior = normal(0, c(10,10), autoscale = FALSE), diagnostic_file = file.path(tempdir(), "df1.csv")) mod_carb <- stan_glm(mpg ~ wt + am + carb, data = mtcars, prior = normal(0, c(10,10,20), autoscale = FALSE), diagnostic_file = file.path(tempdir(), "df0.csv")) bayesfactor_models(mod_carb, denominator = mod) ``` ```{r, echo=FALSE} mod <- stan_glm(mpg ~ wt + am, data = mtcars, prior = normal(0, c(10,10), autoscale = FALSE), diagnostic_file = file.path(tempdir(), "df1.csv"), refresh = 0) mod_carb <- stan_glm(mpg ~ wt + am + carb, data = mtcars, prior = normal(0, c(10,10,20), autoscale = FALSE), diagnostic_file = file.path(tempdir(), "df0.csv"), refresh = 0) BF_carb <- bayesfactor_models(mod_carb, denominator = mod, verbose = FALSE) BF <- BF_carb$BF[1] print(BF_carb) ``` It seems that the model without `carb` as a predictor is $1/BF=1.2$ times more likely than the model *with* `carb` as a predictor. We might then assume that in the latter model, the HDI will include the point-null value of 0 effect, to also indicate the credibility of the null in the posterior. However, this is not the case: ```{r} hdi(mod_carb, ci = .95) ``` How can this be? By estimating the HDI of the effect for `carb` in the full model, we are acting under the assumption that that model is correct. However, as we've just seen, both models are practically tied, and in fact it was the no-`carb` model, in which the effect for `carb` is fixed at 0, that was slightly more supported by the data. If this is the case **why limit our estimation of the effect just to one model?** [@van2019cautionary]. Using Bayesian model averaging, we can combine the posteriors samples from several models, weighted by the models' marginal likelihood (done via the `bayesfactor_models()` function). If some parameter is part of some of the models but is missing from others, it is assumed to be fixed a 0 (which can also be seen as a method of applying shrinkage to our estimates). This results in a posterior distribution across several models, which we can now treat like any posterior distribution, and estimate the HDI. We can do this with the `weighted_posteriors()` function: ```{r} BMA_draws <- weighted_posteriors(mod, mod_carb) BMA_hdi <- hdi(BMA_draws, ci = .95) BMA_hdi plot(BMA_hdi) ``` We can see that across both models under consideration, the posterior of the `carb` effect is almost equally weighted between the alternative model and the null model - as represented by about half of the posterior mass concentrated at 0 - which makes sense as both models were almost equally supported by the data. We can also see that across both models, that now **the HDI does contain 0**. Thus we have resolved the conflict between the Bayes factor and the HDI [@rouder2018bayesian]! **Note** that parameters might play different roles across different models; For example, the parameter `A` plays a different role in the model `Y ~ A + B` (where it is a main effect) than it does in the model `Y ~ A + B + A:B` (where it is a simple effect). In many cases centering of predictors (mean subtracting for continuous variables, and effects coding via `contr.sum` or orthonormal coding via [`contr.bayes`](https://easystats.github.io/bayestestR/reference/contr.bayes.html) for factors) can in some cases reduce this issue. ## Appendices ### Testing contrasts (with `emmeans` / `modelbased`) Besides testing parameter `bayesfactor_parameters()` can be used to test any estimate based on the prior and posterior distribution of the estimate. One way to achieve this is with a mix of `bayesfactor_parameters()` + [**`emmeans`**](https://cran.r-project.org/package=emmeans) to [test Bayesian contrasts](https://easystats.github.io/blog/posts/bayestestr_emmeans/). For example, in the `sleep` example from above, we can estimate the group means and the difference between them: ```{r, echo=FALSE} set.seed(1) ``` ```{r} library(emmeans) groups <- emmeans(model, ~ group) group_diff <- pairs(groups) (groups_all <- rbind(groups, group_diff)) # pass the original model via prior bayesfactor_parameters(groups_all, prior = model) ``` That is strong evidence for the mean of group 1 being 0, and for group 2 for not being 0, but hardly any evidence for the difference between them being not 0. Conflict? Uncertainty? That is the Bayesian way! We can also use the `easystats`' [**`modelbased`**](https://cran.r-project.org/package=modelbased) package to compute Bayes factors for contrasts: ```{r, echo=FALSE} set.seed(1) ``` ```{r} library(modelbased) estimate_contrasts(model, test = "bf") ``` **NOTE**: See the *Specifying Correct Priors for Factors with More Than 2 Levels* section below. ### Specifying correct priors for factors {#contr_bayes} This section introduces the biased priors obtained when using the common *effects* factor coding (`contr.sum`) or dummy factor coding (`contr.treatment`), and the solution of using orthonormal factor coding (`contr.bayes`) [as outlined in @rouder2012default, section 7.2]. Specifically, ***special care should be taken when working with factors which have 3 or more levels***. #### Contrasts (and marginal means) The *effects* factor coding commonly used in factorial analysis carries a hidden bias when it is applies to Bayesian priors. For example, if we want to test all pairwise differences between 3 levels of the same factor, we would expect all *a priori* differences to have the same distribution, but... For our example, we will be test all ***prior*** pairwise differences between the 3 species in the `iris` data-set. ```{r, eval=FALSE} contrasts(iris$Species) <- contr.sum fit_sum <- stan_glm(Sepal.Length ~ Species, data = iris, prior = normal(0, c(1, 1), autoscale = FALSE), prior_PD = TRUE, # sample priors family = gaussian()) pairs_sum <- pairs(emmeans(fit_sum, ~ Species)) pairs_sum ``` ```{r, echo=FALSE} contrasts(iris$Species) <- contr.sum fit_sum <- stan_glm(Sepal.Length ~ Species, data = iris, prior = normal(0, c(1, 1), autoscale = FALSE), prior_PD = TRUE, # sample priors family = gaussian(), refresh = 0) pairs_sum <- pairs(emmeans(fit_sum, ~ Species)) em_pairs_samples <- as.data.frame(as.matrix(emmeans::as.mcmc.emmGrid(pairs_sum, names = FALSE))) print(pairs_sum) ggplot(stack(em_pairs_samples), aes(x = values, fill = ind)) + geom_density(size = 1) + facet_grid(ind ~ .) + labs(x = "prior difference values") + theme(legend.position = "none") ``` We can see that the though the prior estimate for all 3 pairwise contrasts is ~0, the scale / HDI is much more narrow for the prior of the `setosa - versicolor` contrast! ***What happened???*** This is caused by an inherent bias in the priors introduced by the *effects* coding (it's even worse with the default treatment coding, because the prior for the intercept is usually drastically different from the effect's parameters). **And since it affects the priors, this bias will also bias the the Bayes factors over / understating evidence for some contrasts over others!** The solution is to use *orthonormal* factor coding, a-la `contr.bayes`, which can either specify this factor coding per-factor: ```{r} contrasts(iris$Species) <- contr.bayes ``` Or you can set it globally: ```{r, eval=FALSE} options(contrasts = c('contr.bayes', 'contr.poly')) ``` Let's again estimate the ***prior*** differences: ```{r, eval=FALSE} contrasts(iris$Species) <- contr.bayes fit_bayes <- stan_glm(Sepal.Length ~ Species, data = iris, prior = normal(0, c(1, 1), autoscale = FALSE), prior_PD = TRUE, # sample priors family = gaussian()) pairs_bayes <- pairs(emmeans(fit_bayes, ~ Species)) pairs_bayes ``` ```{r, echo=FALSE} contrasts(iris$Species) <- contr.bayes fit_bayes <- stan_glm(Sepal.Length ~ Species, data = iris, prior = normal(0, c(1, 1), autoscale = FALSE), prior_PD = TRUE, # sample priors family = gaussian(), refresh = 0) pairs_bayes <- pairs(emmeans(fit_bayes, ~ Species)) em_pairs_samples <- as.data.frame(as.matrix(emmeans::as.mcmc.emmGrid(pairs_bayes, names = FALSE))) print(pairs_bayes) ggplot(stack(em_pairs_samples), aes(x = values, fill = ind)) + geom_density(size = 1) + facet_grid(ind ~ .) + labs(x = "prior difference values") + theme(legend.position = "none") ``` We can see that using this coding scheme, we have equal priors on all pairwise contrasts. #### Order restrictions This bias also affect order restrictions involving 3 or more levels. For example, if we want to test an order restriction among A, B, and C, the *a priori* probability of obtaining the order A > C > B is 1/6 (reach back to *intro to stats* year 1), but... For our example, we will be interested in the following order restrictions in the `iris` data-set (each line is a separate restriction): ```{r} hyp <- c( # comparing 2 levels "setosa < versicolor", "setosa < virginica", "versicolor < virginica", # comparing 3 (or more) levels "setosa < virginica & virginica < versicolor", "virginica < setosa & setosa < versicolor", "setosa < versicolor & versicolor < virginica" ) ``` With the default factor coding, this looks like this: ```{r, eval=FALSE} contrasts(iris$Species) <- contr.sum fit_sum <- stan_glm(Sepal.Length ~ Species, data = iris, prior = normal(0, c(1, 1), autoscale = FALSE), family = gaussian()) em_sum <- emmeans(fit_sum, ~ Species) # the posterior marginal means bayesfactor_restricted(em_sum, fit_sum, hypothesis = hyp) ``` ```{r, echo=FALSE} contrasts(iris$Species) <- contr.sum fit_sum <- stan_glm(Sepal.Length ~ Species, data = iris, prior = normal(0, c(1, 1), autoscale = FALSE), family = gaussian(), refresh = 0) em_sum <- emmeans(fit_sum, ~ Species) # the posterior marginal means bayesfactor_restricted(em_sum, fit_sum, hypothesis = hyp) ``` ***What happened???*** 1. The comparison of 2 levels all have a prior of ~0.5, as expected. 2. The comparison of 3 levels has different priors, depending on the order restriction - i.e. **some orders are *a priori* more likely than others!!!** Again, this is solved by using the *orthonormal* factor coding (from above). ```{r, eval=FALSE} contrasts(iris$Species) <- contr.bayes fit_bayes <- stan_glm(Sepal.Length ~ Species, data = iris, prior = normal(0, c(1, 1), autoscale = FALSE), family = gaussian()) em_bayes <- emmeans(fit_sum, ~ Species) # the posterior marginal means bayesfactor_restricted(em_bayes, fit_sum, hypothesis = hyp) ``` ```{r, echo=FALSE} contrasts(iris$Species) <- contr.bayes fit_bayes <- stan_glm(Sepal.Length ~ Species, data = iris, prior = normal(0, c(1, 1), autoscale = FALSE), family = gaussian(), refresh = 0) em_bayes <- emmeans(fit_bayes, ~ Species) # the posterior marginal means bayesfactor_restricted(em_bayes, fit_bayes, hypothesis = hyp) ``` #### Conclusion When comparing the results from the two factor coding schemes, we find: 1. In both cases, the estimated (posterior) means are quite similar (if not identical). 2. The priors and Bayes factors differ between the two schemes. 3. Only with `contr.bayes`, the prior distribution of the difference or the order of 3 (or more) means is balanced. # ReferencesbayestestR/inst/doc/example3.Rmd0000644000176200001440000000355213610210142016276 0ustar liggesusers--- title: "3. Become a Bayesian master" output: github_document: toc: true fig_width: 10.08 fig_height: 6 rmarkdown::html_vignette: toc: true fig_width: 10.08 fig_height: 6 tags: [r, bayesian, posterior, test] vignette: > \usepackage[utf8]{inputenc} %\VignetteIndexEntry{Example 3: Become a Bayesian master} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console bibliography: bibliography.bib csl: apa.csl --- This vignette can be referred to by citing the package: - Makowski, D., Ben-Shachar, M. S., \& Lüdecke, D. (2019). *bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework*. Journal of Open Source Software, 4(40), 1541. https://doi.org/10.21105/joss.01541 --- ```{r message=FALSE, warning=FALSE, include=FALSE} library(knitr) options(knitr.kable.NA = '') knitr::opts_chunk$set(comment=">") knitr::opts_chunk$set(dpi=150) options(digits=2) set.seed(333) ``` ```{r echo=FALSE, fig.cap="Yoda Bayes (896 BBY - 4 ABY).", fig.align='center', out.width="80%"} knitr::include_graphics("https://github.com/easystats/easystats/raw/master/man/figures/bayestestR/YodaBayes.jpg") ``` ## Mixed Models TO BE CONTINUED. ### Priors TO BE CONTINUED. ## What's next? The journey to become a true Bayesian master is not over. It is merely the beginning. It is now time to leave the `bayestestR` universe and apply the Bayesian framework in a variety of other statistical contexts: - [**Marginal means**](https://easystats.github.io/modelbased/articles/marginal_means.html) - [**Contrast analysis**](https://easystats.github.io/modelbased/articles/contrast_analysis.html) - [**Testing Contrasts from Bayesian Models with 'emmeans' and 'bayestestR'**](https://easystats.github.io/blog/posts/bayestestr_emmeans/) bayestestR/inst/doc/indicesEstimationComparison.html0000644000176200001440000027475513620150455022542 0ustar liggesusers

In-Depth 1: Comparison of Point-Estimates

This vignette can be referred to by citing the package:

  • Makowski, D., Ben-Shachar, M. S., & Lüdecke, D. (2019). bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework. Journal of Open Source Software, 4(40), 1541. https://doi.org/10.21105/joss.01541

Effect Point-Estimates in the Bayesian Framework

Introduction

One of the main difference between the Bayesian and the frequentist frameworks is that the former returns a probability distribution of each effect (i.e., parameter of interest of a model, such as a regression slope) instead of a single value. However, there is still a need and demand, for reporting or use in further analysis, for a single value (point-estimate) that best characterise the underlying posterior distribution.

There are three main indices used in the literature for effect estimation: the mean, the median or the MAP (Maximum A Posteriori) estimate (roughly corresponding to the mode (the “peak”) of the distribution). Unfortunately, there is no consensus about which one to use, as no systematic comparison has ever been done.

In the present work, we will compare these three point-estimates of effect between themselves, as well as with the widely known beta, extracted from a comparable frequentist model. With this comparison, we expect to draw bridges and relationships between the two frameworks, helping and easing the transition for the public.

Experiment 1: Relationship with Error (Noise) and Sample Size

Methods

The simulation aimed at modulating the following characteristics:

  • Model type: linear or logistic.
  • “True” effect (original regression coefficient from which data is drawn): Can be 1 or 0 (no effect).
  • Sample size: From 20 to 100 by steps of 10.
  • Error: Gaussian noise applied to the predictor with SD uniformly spread between 0.33 and 6.66 (with 1000 different values).

We generated a dataset for each combination of these characteristics, resulting in a total of 2 * 2 * 9 * 1000 = 36000 Bayesian and frequentist models. The code used for generation is avaible here (please note that it takes usually several days/weeks to complete).

Results

Sensitivity to Noise

Sensitivity to Sample Size

Statistical Modelling

We fitted a (frequentist) multiple linear regression to statistically test the the predict the presence or absence of effect with the estimates as well as their interaction with noise and sample size.

term estimate p
outcome_typelinear:estimateMean:value 10.8 0
outcome_typelinear:estimateMedian:value 10.8 0
outcome_typelinear:estimateMAP:value 10.7 0
outcome_typelinear:estimateCoefficient:value 10.5 0
outcome_typebinary:estimateMAP:value 4.4 0
outcome_typebinary:estimateMedian:value 4.3 0
outcome_typebinary:estimateMean:value 4.2 0
outcome_typebinary:estimateCoefficient:value 3.9 0

This suggests that, in order to delineate between the presence and the absence of an effect, compared to the frequentist’s beta:

  • For linear models, the Mean was the better predictor, closely followed by the Median, the MAP and the frequentist Coefficient.
  • For logistic models, the MAP was the better predictor, followed by the Median, the Mean and, behind, the frequentist Coefficient.

Overall, the median seems to be appears as a safe and approriate choice, maintaining a a high performance accross different types of models.

Experiment 2: Relationship with Sampling Characteristics

Methods

The simulation aimed at modulating the following characteristics:

  • Model type: linear or logistic.
  • “True” effect (original regression coefficient from which data is drawn): Can be 1 or 0 (no effect).
  • draws: from 10 to 5000 by step of 5 (1000 iterations).
  • warmup: Ratio of warmup iterations. from 1/10 to 9/10 by step of 0.1 (9 iterations).

We generated 3 datasets for each combination of these characteristics, resulting in a total of 2 * 2 * 8 * 40 * 9 * 3 = 34560 Bayesian and frequentist models. The code used for generation is avaible here (please note that it takes usually several days/weeks to complete).

Results

Sensitivity to number of iterations

Sensitivity to warmup ratio

Experiment 3: Relationship with Priors Specification

Discussion

Conclusions can be found in the guidelines section.

bayestestR/inst/doc/example2.R0000644000176200001440000000664613620150414015771 0ustar liggesusers## ----message=FALSE, warning=FALSE, include=FALSE------------------------------ if (!requireNamespace("see", quietly = TRUE) || !requireNamespace("dplyr", quietly = TRUE) || !requireNamespace("ggplot2", quietly = TRUE) || !requireNamespace("performance", quietly = TRUE) || !requireNamespace("BayesFactor", quietly = TRUE) || !requireNamespace("rstanarm", quietly = TRUE)) { knitr::opts_chunk$set(eval = FALSE) } data(iris) library(knitr) library(bayestestR) options(knitr.kable.NA = '') knitr::opts_chunk$set(comment=">") knitr::opts_chunk$set(dpi=150) options(digits=2) set.seed(333) ## ----message=FALSE, warning=FALSE--------------------------------------------- result <- cor.test(iris$Sepal.Width, iris$Sepal.Length) result ## ----message=FALSE, warning=FALSE, results='hide'----------------------------- library(BayesFactor) result <- correlationBF(iris$Sepal.Width, iris$Sepal.Length) ## ----message=FALSE, warning=FALSE, eval=FALSE--------------------------------- # describe_posterior(result) ## ----echo=FALSE--------------------------------------------------------------- structure(list(Parameter = "rho", Median = -0.114149129692488, CI = 89, CI_low = -0.240766308855643, CI_high = 0.00794997655649642, pd = 91.6, ROPE_CI = 89, ROPE_low = -0.1, ROPE_high = 0.1, ROPE_Percentage = 42.0949171581017, BF = 0.509017511647702, Prior_Distribution = "cauchy", Prior_Location = 0, Prior_Scale = 0.333333333333333), row.names = 1L, class = "data.frame") ## ----message=FALSE, warning=FALSE--------------------------------------------- bayesfactor(result) ## ----echo=FALSE, fig.cap="Wagenmakers' pizza poking analogy. From the great 'www.bayesianspectacles.org' blog.", fig.align='center', out.width="80%"---- knitr::include_graphics("https://github.com/easystats/easystats/raw/master/man/figures/bayestestR/LetsPokeAPizza.jpg") ## ----message=FALSE, warning=FALSE--------------------------------------------- library(see) plot(bayesfactor(result)) + scale_fill_pizza() ## ----message=FALSE, warning=FALSE--------------------------------------------- library(dplyr) library(ggplot2) # Select only two relevant species data <- iris %>% filter(Species != "setosa") %>% droplevels() # Visualise distributions and observations data %>% ggplot(aes(x = Species, y = Sepal.Width, fill = Species)) + geom_violindot(fill_dots = "black", size_dots = 1) + scale_fill_material() + theme_modern() ## ----message=FALSE, warning=FALSE--------------------------------------------- result <- BayesFactor::ttestBF(formula = Sepal.Width ~ Species, data = data) describe_posterior(result) ## ----message=FALSE, warning=FALSE, eval=FALSE--------------------------------- # library(rstanarm) # # model <- stan_glm(Species ~ Sepal.Width, data = data, family = "binomial") ## ----message=FALSE, warning=FALSE, echo=FALSE--------------------------------- library(rstanarm) model <- stan_glm(Species ~ Sepal.Width, data = data, family = "binomial", refresh = 0) ## ----message=FALSE, warning=FALSE--------------------------------------------- library(performance) model_performance(model) ## ----message=FALSE, warning=FALSE--------------------------------------------- describe_posterior(model, test = c("pd", "ROPE", "BF")) ## ----message=FALSE, warning=FALSE--------------------------------------------- # plot(rope(result)) bayestestR/inst/doc/probability_of_direction.Rmd0000644000176200001440000002676113620150172021642 0ustar liggesusers--- title: "Probability of Direction (pd)" output: github_document: toc: true fig_width: 10.08 fig_height: 6 rmarkdown::html_vignette: toc: true fig_width: 10.08 fig_height: 6 tags: [r, bayesian, posterior, test] vignette: > \usepackage[utf8]{inputenc} %\VignetteIndexEntry{Probability of Direction (pd)} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console bibliography: bibliography.bib csl: apa.csl --- This vignette can be referred to by citing the package: - Makowski, D., Ben-Shachar, M. S., \& Lüdecke, D. (2019). *bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework*. Journal of Open Source Software, 4(40), 1541. https://doi.org/10.21105/joss.01541 - Makowski, D., Ben-Shachar, M. S., Chen, S. H. A., \& Lüdecke, D. (2019). *Indices of Effect Existence and Significance in the Bayesian Framework*. Frontiers in Psychology 2019;10:2767. [10.3389/fpsyg.2019.02767](https://doi.org/10.3389/fpsyg.2019.02767) --- ```{r message=FALSE, warning=FALSE, include=FALSE} if (!requireNamespace("see", quietly = TRUE) || !requireNamespace("dplyr", quietly = TRUE) || !requireNamespace("ggplot2", quietly = TRUE) || !requireNamespace("tidyr", quietly = TRUE) || !requireNamespace("logspline", quietly = TRUE) || !requireNamespace("KernSmooth", quietly = TRUE) || !requireNamespace("tidyr", quietly = TRUE) || !requireNamespace("GGally", quietly = TRUE)) { knitr::opts_chunk$set(eval = FALSE) } library(knitr) options(knitr.kable.NA = '') knitr::opts_chunk$set(comment=">") options(digits=2) set.seed(333) ``` # What is the *pd?* The **Probability of Direction (pd)** is an index of **effect existence**, ranging from 50\% to 100\%, representing the certainty with which an effect goes in a particular direction (*i.e.*, is positive or negative). Beyond its **simplicity of interpretation, understanding and computation**, this index also presents other interesting properties: - It is **independent from the model**: It is solely based on the posterior distributions and does not require any additional information from the data or the model. - It is **robust** to the scale of both the response variable and the predictors. - It is strongly correlated with the frequentist ***p*-value**, and can thus be used to draw parallels and give some reference to readers non-familiar with Bayesian statistics. However, this index is not relevant to assess the magnitude and importance of an effect (the meaning of "significance"), which is better achieved through other indices such as the [ROPE percentage](https://easystats.github.io/bayestestR/articles/region_of_practical_equivalence.html). In fact, indices of significance and existence are totally independent. You can have an effect with a *pd* of **99.99\%**, for which the whole posterior distribution is concentrated within the `[0.0001, 0.0002]` range. In this case, the effect is **positive with a high certainty**, but also **not significant** (*i.e.*, very small). Indices of effect existence, such as the *pd*, are particularly useful in exploratory research or clinical studies, for which the focus is to make sure that the effect of interest is not in the opposite direction (for clinical studies, that a treatment is not harmful). However, once the effect's direction is confirmed, the focus should shift toward its significance, including a precise estimation of its magnitude, relevance and importance. # Relationship with the *p*-value In most cases, it seems that the *pd* has a direct correspondence with the frequentist **one-sided *p*-value** through the formula: $$p_{one-sided} = 1-p_d$$ Similarly, the **two-sided *p*-value** (the most commonly reported one) is equivalent through the formula: $$p_{two-sided} = 2*(1-p_d)$$ Thus, the two-sided *p*-value of respectively **.1**, **.05**, **.01** and **.001** would correspond approximately to a *pd* of **95\%**, **97.5\%**, **99.5\%** and **99.95\%** . ```{r message=FALSE, warning=FALSE, echo=FALSE, fig.cap="Correlation between the frequentist p-value and the probability of direction (pd)", fig.align='center'} library(dplyr) library(tidyr) library(ggplot2) library(see) read.csv("https://raw.github.com/easystats/easystats/master/publications/makowski_2019_bayesian/data/data.csv") %>% mutate(effect_existence = ifelse(true_effect == 1, "Presence of true effect", "Absence of true effect"), p_direction = p_direction * 100) %>% ggplot(aes(x=p_direction, y=p_value, color=effect_existence)) + geom_point2(alpha=0.1) + geom_segment(aes(x=95, y=Inf, xend=95, yend=0.1), color="black", linetype="longdash") + geom_segment(aes(x=-Inf, y=0.1, xend=95, yend=0.1), color="black", linetype="longdash") + geom_segment(aes(x=97.5, y=Inf, xend=97.5, yend=0.05), color="black", linetype="dashed") + geom_segment(aes(x=-Inf, y=0.05, xend=97.5, yend=0.05), color="black", linetype="dashed") + theme_modern() + scale_y_reverse(breaks = c(0.05, round(seq(0, 1, length.out = 11), digits=2))) + scale_x_continuous(breaks = c(95, 97.5, round(seq(50, 100, length.out = 6)))) + scale_color_manual(values=c("Presence of true effect"="green", "Absence of true effect"="red")) + theme(legend.title = element_blank()) + guides(colour = guide_legend(override.aes = list(alpha = 1))) + xlab("Probability of Direction (pd)") + ylab("Frequentist p-value") ``` > **But if it's like the *p*-value, it must be bad because the *p*-value is bad [*insert reference to the reproducibility crisis*].** In fact, this aspect of the reproducibility crisis might have been misunderstood. Indeed, it is not that the *p*-value is an intrinsically bad or wrong. Instead, it is its **misuse**, **misunderstanding** and **misinterpretation** that fuels the decay of the situation. For instance, the fact that the **pd** is highly correlated with the *p*-value suggests that the latter is more an index of effect *existence* than *significance* (*i.e.*, "worth of interest"). The Bayesian version, the **pd**, has an intuitive meaning and makes obvious the fact that **all thresholds are arbitrary**. Additionally, the **mathematical and interpretative transparency** of the **pd**, and its reconceptualisation as an index of effect existence, offers a valuable insight into the characterization of Bayesian results. Moreover, its concomitant proximity with the frequentist *p*-value makes it a perfect metric to ease the transition of psychological research into the adoption of the Bayesian framework. # Methods of computation The most **simple and direct** way to compute the **pd** is to 1) look at the median's sign, 2) select the portion of the posterior of the same sign and 3) compute the percentage that this portion represents. This "simple" method is the most straightforward, but its precision is directly tied to the number of posterior draws. The second approach relies on [**density estimation**](https://easystats.github.io/bayestestR/reference/estimate_density.html). It starts by estimating the density function (for which many methods are available), and then computing the [**area under the curve**](https://easystats.github.io/bayestestR/reference/area_under_curve.html) (AUC) of the density curve on the other side of 0. The density-based method could hypothetically be considered as more precise, but strongly depends on the method used to estimate the density function. # Methods comparison Let's compare the 4 available methods, the **direct** method and 3 **density-based** methods differing by their density estimation algorithm (see [`estimate_density`](https://easystats.github.io/bayestestR/reference/estimate_density.html)). ## Correlation Let's start by testing the proximity and similarity of the results obtained by different methods. ```{r message=FALSE, warning=FALSE, fig.align='center'} library(bayestestR) library(logspline) library(KernSmooth) # Compute the correlations data <- data.frame() for(the_mean in runif(25, 0, 4)){ for(the_sd in runif(25, 0.5, 4)){ x <- rnorm(100, the_mean, abs(the_sd)) data <- rbind(data, data.frame("direct" = pd(x), "kernel" = pd(x, method="kernel"), "logspline" = pd(x, method="logspline"), "KernSmooth" = pd(x, method="KernSmooth") )) } } data <- as.data.frame(sapply(data, as.numeric)) # Visualize the correlations library(ggplot2) library(GGally) GGally::ggpairs(data) + theme_classic() ``` All methods give are highly correlated and give very similar results. That means that the method choice is not a drastic game changer and cannot be used to tweak the results too much. ## Accuracy To test the accuracy of each methods, we will start by computing the **direct *pd*** from a very dense distribution (with a large amount of observations). This will be our baseline, or "true" *pd*. Then, we will iteratively draw smaller samples from this parent distribution, and we will compute the *pd* with different methods. The closer this estimate is from the reference one, the better. ```{r message=FALSE, warning=FALSE} data <- data.frame() for(i in 1:25){ the_mean <- runif(1, 0, 4) the_sd <- abs(runif(1, 0.5, 4)) parent_distribution <- rnorm(100000, the_mean, the_sd) true_pd <- pd(parent_distribution) for(j in 1:25){ sample_size <- round(runif(1, 25, 5000)) subsample <- sample(parent_distribution, sample_size) data <- rbind(data, data.frame("sample_size" = sample_size, "true" = true_pd, "direct" = pd(subsample) - true_pd, "kernel" = pd(subsample, method="kernel")- true_pd, "logspline" = pd(subsample, method="logspline") - true_pd, "KernSmooth" = pd(subsample, method="KernSmooth") - true_pd )) } } data <- as.data.frame(sapply(data, as.numeric)) ``` ```{r message=FALSE, warning=FALSE, fig.align='center'} library(tidyr) library(dplyr) data %>% tidyr::gather(Method, Distance, -sample_size, -true) %>% ggplot(aes(x=sample_size, y = Distance, color = Method, fill= Method)) + geom_point(alpha=0.3, stroke=0, shape=16) + geom_smooth(alpha=0.2) + geom_hline(yintercept=0) + theme_classic() + xlab("\nDistribution Size") ``` The "Kernel" based density methods seems to consistently underestimate the *pd*. Interestingly, the "direct" method appears as being the more reliable, even in the case of small number of posterior draws. ## Can the pd be 100\%? `p = 0.000` is coined as one of the term to avoid when reporting results [@lilienfeld2015fifty], even if often displayed by statistical software. The rationale is that for every probability distribution, there is no value with a probability of exactly 0. There is always some infinitesimal probability associated with each data point, and the `p = 0.000` returned by software is due to approximations related, among other, to finite memory hardware. One could apply this rationale for the *pd*: since all data points have a non-null probability density, then the *pd* (a particular portion of the probability density) can *never* be 100\%. While this is an entirely valid point, people using the *direct* method might argue that their *pd* is based on the posterior draws, rather than on the theoretical, hidden, true posterior distribution (which is only approximated by the posterior draws). These posterior draws represent a finite sample for which `pd = 100%` is a valid statement. bayestestR/inst/doc/credible_interval.R0000644000176200001440000000551013620150401017712 0ustar liggesusers## ----message=FALSE, warning=FALSE, include=FALSE------------------------------ library(knitr) options(knitr.kable.NA = '') knitr::opts_chunk$set(comment=">") options(digits=2) if (!requireNamespace("ggplot2", quietly = TRUE) || !requireNamespace("dplyr", quietly = TRUE)) { knitr::opts_chunk$set(eval = FALSE) } set.seed(333) ## ----warning=FALSE, message=FALSE--------------------------------------------- library(bayestestR) library(dplyr) library(ggplot2) # Generate a normal distribution posterior <- distribution_normal(1000) # Compute HDI and ETI ci_hdi <- ci(posterior, method = "HDI") ci_eti <- ci(posterior, method = "ETI") # Plot the distribution and add the limits of the two CIs posterior %>% estimate_density(extend=TRUE) %>% ggplot(aes(x=x, y=y)) + geom_area(fill="orange") + theme_classic() + # HDI in blue geom_vline(xintercept=ci_hdi$CI_low, color="royalblue", size=3) + geom_vline(xintercept=ci_hdi$CI_high, color="royalblue", size=3) + # Quantile in red geom_vline(xintercept=ci_eti$CI_low, color="red", size=1) + geom_vline(xintercept=ci_eti$CI_high, color="red", size=1) ## ----warning=FALSE, message=FALSE--------------------------------------------- library(bayestestR) library(dplyr) library(ggplot2) # Generate a beta distribution posterior <- distribution_beta(1000, 6, 2) # Compute HDI and Quantile CI ci_hdi <- ci(posterior, method = "HDI") ci_eti <- ci(posterior, method = "ETI") # Plot the distribution and add the limits of the two CIs posterior %>% estimate_density(extend=TRUE) %>% ggplot(aes(x=x, y=y)) + geom_area(fill="orange") + theme_classic() + # HDI in blue geom_vline(xintercept=ci_hdi$CI_low, color="royalblue", size=3) + geom_vline(xintercept=ci_hdi$CI_high, color="royalblue", size=3) + # Quantile in red geom_vline(xintercept=ci_eti$CI_low, color="red", size=1) + geom_vline(xintercept=ci_eti$CI_high, color="red", size=1) ## ----warning=FALSE, message=FALSE--------------------------------------------- prior <- distribution_normal(1000, mean = 0, sd = 1) posterior <- distribution_normal(1000, mean = .5, sd = .3) si_1 <- si(posterior, prior, BF = 1) si_3 <- si(posterior, prior, BF = 3) ggplot(mapping = aes(x=x, y=y)) + theme_classic() + # The posterior geom_area(fill = "orange", data = estimate_density(posterior, extend = TRUE)) + # The prior geom_area(color = "black", fill = NA, size = 1, linetype = "dashed", data = estimate_density(prior, extend = TRUE)) + # BF = 1 SI in blue geom_vline(xintercept=si_1$CI_low, color="royalblue", size=1) + geom_vline(xintercept=si_1$CI_high, color="royalblue", size=1) + # BF = 3 SI in red geom_vline(xintercept=si_3$CI_low, color="red", size=1) + geom_vline(xintercept=si_3$CI_high, color="red", size=1) bayestestR/inst/doc/probability_of_direction.html0000644000176200001440000060770313620150633022067 0ustar liggesusers

Probability of Direction (pd)

This vignette can be referred to by citing the package:

  • Makowski, D., Ben-Shachar, M. S., & Lüdecke, D. (2019). bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework. Journal of Open Source Software, 4(40), 1541. https://doi.org/10.21105/joss.01541
  • Makowski, D., Ben-Shachar, M. S., Chen, S. H. A., & Lüdecke, D. (2019). Indices of Effect Existence and Significance in the Bayesian Framework. Frontiers in Psychology 2019;10:2767. 10.3389/fpsyg.2019.02767

What is the pd?

The Probability of Direction (pd) is an index of effect existence, ranging from 50% to 100%, representing the certainty with which an effect goes in a particular direction (i.e., is positive or negative).

Beyond its simplicity of interpretation, understanding and computation, this index also presents other interesting properties:

  • It is independent from the model: It is solely based on the posterior distributions and does not require any additional information from the data or the model.
  • It is robust to the scale of both the response variable and the predictors.
  • It is strongly correlated with the frequentist p-value, and can thus be used to draw parallels and give some reference to readers non-familiar with Bayesian statistics.

However, this index is not relevant to assess the magnitude and importance of an effect (the meaning of “significance”), which is better achieved through other indices such as the ROPE percentage. In fact, indices of significance and existence are totally independent. You can have an effect with a pd of 99.99%, for which the whole posterior distribution is concentrated within the [0.0001, 0.0002] range. In this case, the effect is positive with a high certainty, but also not significant (i.e., very small).

Indices of effect existence, such as the pd, are particularly useful in exploratory research or clinical studies, for which the focus is to make sure that the effect of interest is not in the opposite direction (for clinical studies, that a treatment is not harmful). However, once the effect’s direction is confirmed, the focus should shift toward its significance, including a precise estimation of its magnitude, relevance and importance.

Relationship with the p-value

In most cases, it seems that the pd has a direct correspondence with the frequentist one-sided p-value through the formula: [p_{one-sided} = 1-p_d] Similarly, the two-sided p-value (the most commonly reported one) is equivalent through the formula: [p_{two-sided} = 2*(1-p_d)] Thus, the two-sided p-value of respectively .1, .05, .01 and .001 would correspond approximately to a pd of 95%, 97.5%, 99.5% and 99.95% .

Correlation between the frequentist p-value and the probability of direction (pd)

Correlation between the frequentist p-value and the probability of direction (pd)

But if it’s like the p-value, it must be bad because the p-value is bad [insert reference to the reproducibility crisis].

In fact, this aspect of the reproducibility crisis might have been misunderstood. Indeed, it is not that the p-value is an intrinsically bad or wrong. Instead, it is its misuse, misunderstanding and misinterpretation that fuels the decay of the situation. For instance, the fact that the pd is highly correlated with the p-value suggests that the latter is more an index of effect existence than significance (i.e., “worth of interest”). The Bayesian version, the pd, has an intuitive meaning and makes obvious the fact that all thresholds are arbitrary. Additionally, the mathematical and interpretative transparency of the pd, and its reconceptualisation as an index of effect existence, offers a valuable insight into the characterization of Bayesian results. Moreover, its concomitant proximity with the frequentist p-value makes it a perfect metric to ease the transition of psychological research into the adoption of the Bayesian framework.

Methods of computation

The most simple and direct way to compute the pd is to 1) look at the median’s sign, 2) select the portion of the posterior of the same sign and 3) compute the percentage that this portion represents. This “simple” method is the most straightforward, but its precision is directly tied to the number of posterior draws.

The second approach relies on density estimation. It starts by estimating the density function (for which many methods are available), and then computing the area under the curve (AUC) of the density curve on the other side of 0. The density-based method could hypothetically be considered as more precise, but strongly depends on the method used to estimate the density function.

Methods comparison

Let’s compare the 4 available methods, the direct method and 3 density-based methods differing by their density estimation algorithm (see estimate_density).

Correlation

Let’s start by testing the proximity and similarity of the results obtained by different methods.

All methods give are highly correlated and give very similar results. That means that the method choice is not a drastic game changer and cannot be used to tweak the results too much.

Accuracy

To test the accuracy of each methods, we will start by computing the direct pd from a very dense distribution (with a large amount of observations). This will be our baseline, or “true” pd. Then, we will iteratively draw smaller samples from this parent distribution, and we will compute the pd with different methods. The closer this estimate is from the reference one, the better.

The “Kernel” based density methods seems to consistently underestimate the pd. Interestingly, the “direct” method appears as being the more reliable, even in the case of small number of posterior draws.

Can the pd be 100%?

p = 0.000 is coined as one of the term to avoid when reporting results (Lilienfeld et al., 2015), even if often displayed by statistical software. The rationale is that for every probability distribution, there is no value with a probability of exactly 0. There is always some infinitesimal probability associated with each data point, and the p = 0.000 returned by software is due to approximations related, among other, to finite memory hardware.

One could apply this rationale for the pd: since all data points have a non-null probability density, then the pd (a particular portion of the probability density) can never be 100%. While this is an entirely valid point, people using the direct method might argue that their pd is based on the posterior draws, rather than on the theoretical, hidden, true posterior distribution (which is only approximated by the posterior draws). These posterior draws represent a finite sample for which pd = 100% is a valid statement.

Lilienfeld, S. O., Sauvigné, K. C., Lynn, S. J., Cautin, R. L., Latzman, R. D., & Waldman, I. D. (2015). Fifty psychological and psychiatric terms to avoid: A list of inaccurate, misleading, misused, ambiguous, and logically confused words and phrases. Frontiers in Psychology, 6, 1100. https://doi.org/10.3389/fpsyg.2015.01100

bayestestR/inst/doc/region_of_practical_equivalence.Rmd0000644000176200001440000002404213620150172023136 0ustar liggesusers--- title: "Region of Practical Equivalence (ROPE)" output: github_document: toc: true fig_width: 10.08 fig_height: 6 rmarkdown::html_vignette: toc: true fig_width: 10.08 fig_height: 6 tags: [r, bayesian, posterior, test, rope, equivalence test] vignette: > \usepackage[utf8]{inputenc} %\VignetteIndexEntry{Region of Practical Equivalence (ROPE)} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console bibliography: bibliography.bib csl: apa.csl --- This vignette can be referred to by citing the package: - Makowski, D., Ben-Shachar, M. S., \& Lüdecke, D. (2019). *bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework*. Journal of Open Source Software, 4(40), 1541. https://doi.org/10.21105/joss.01541 --- ```{r message=FALSE, warning=FALSE, include=FALSE} if (!requireNamespace("rstanarm", quietly = TRUE) || !requireNamespace("see", quietly = TRUE)) { knitr::opts_chunk$set(eval = FALSE) } library(knitr) options(knitr.kable.NA = '') knitr::opts_chunk$set(comment=">") options(digits=2) set.seed(333) ``` # What is the *ROPE?* Unlike a frequentist approach, Bayesian inference is not based on statistical significance, where effects are tested against "zero". Indeed, the Bayesian framework offers a probabilistic view of the parameters, allowing assessment of the uncertainty related to them. Thus, rather than concluding that an effect is present when it simply differs from zero, we would conclude that the probability of being outside a specific range that can be considered as **"practically no effect"** (*i.e.*, a negligible magnitude) is sufficient. This range is called the **region of practical equivalence (ROPE)**. Indeed, statistically, the probability of a posterior distribution being different from 0 does not make much sense (the probability of it being different from a single point being infinite). Therefore, the idea underlining ROPE is to let the user define an area around the null value enclosing values that are **equivalent to the null** value for practical purposes [@kruschke2010believe; @kruschke2012time; @kruschke2014doing]. # Equivalence Test The ROPE, being a region corresponding to a "null" hypothesis, is used for the **equivalence test**, to test whether a parameter is **significant** (in the sense of *important* enough to be cared about). This test is usually based on the **"HDI+ROPE decision rule"** [@kruschke2014doing; @kruschke2018bayesian] to check whether parameter values should be accepted or rejected against an explicitly formulated "null hypothesis" (*i.e.*, a ROPE). In other words, it checks the percentage of Credible Interval (CI) that is the null region (the ROPE). If this percentage is sufficiently low, the null hypothesis is rejected. If this percentage is sufficiently high, the null hypothesis is accepted. # Credible interval in ROPE *vs* full posterior in ROPE Using the ROPE and the HDI as Credible Interval, Kruschke (2018) suggests using the percentage of the 95\% HDI that falls within the ROPE as a decision rule. However, as the 89\% HDI [is considered a better choice](https://easystats.github.io/bayestestR/articles/credible_interval.html) [@kruschke2014doing; @mcelreath2014rethinking; @mcelreath2018statistical], `bayestestR` provides by default the percentage of the 89\% HDI that falls within the ROPE. However, [*simulation studies data*](https://www.frontiersin.org/articles/10.3389/fpsyg.2019.02767/full) suggest that using the percentage of the full posterior distribution, instead of a CI, might be more sensitive (especially do delineate highly significant effects). Thus, we recommend that the user considers using the ***full* ROPE** percentage (by setting `ci = 1`), which will return the portion of the entire posterior distribution in the ROPE. # What percentage in ROPE to accept or to reject? If the HDI is completely outside the ROPE, the "null hypothesis" for this parameter is "rejected". If the ROPE completely covers the HDI, *i.e.*, all most credible values of a parameter are inside the region of practical equivalence, the null hypothesis is accepted. Else, it’s unclear whether the null hypothesis should be accepted or rejected. If the **full ROPE** is used (*i.e.*, 100\% of the HDI), then the null hypothesis is rejected or accepted if the percentage of the posterior within the ROPE is smaller than to 2.5\% or greater than 97.5\%. Desirable results are low proportions inside the ROPE (the closer to zero the better). # How to define the ROPE range? Kruschke (2018) suggests that the ROPE could be set, by default, to a range from `-0.1` to `0.1` of a standardized parameter (negligible effect size according to Cohen, 1988). - For **linear models (lm)**, this can be generalised to: $$[-0.1*SD_{y}, 0.1*SD_{y}]$$. - For **logistic models**, the parameters expressed in log odds ratio can be converted to standardized difference through the formula: $$\sqrt{3}/\pi$$, resulting in a range of `-0.055` to `-0.055`. For other models with binary outcome, it is strongly recommended to manually specify the rope argument. Currently, the same default is applied that for logistic models. - For **t-tests**, the standard deviation of the response is used, similarly to linear models (see above). - For **correlations**, `-0.05, 0.05` is used, *i.e.*, half the value of a negligible correlation as suggested by Cohen's (1988) rules of thumb. - For all other models, `-0.1, 0.1` is used to determine the ROPE limits, but it is strongly advised to specify it manually. # Sensitivity to parameter's scale It is important to consider **the unit (*i.e.*, the scale) of the predictors** when using an index based on the ROPE, as the correct interpretation of the ROPE as representing a region of practical equivalence to zero is dependent on the scale of the predictors. Indeed, unlike other indices (such as the [`pd`](https://easystats.github.io/bayestestR/articles/probability_of_direction.html)), the percentage in **ROPE** depend on the unit of its parameter. In other words, as the ROPE represents a fixed portion of the response's scale, its proximity with a coefficient depends on the scale of the coefficient itself. For instance, if we consider a simple regression `growth ~ time`, modelling the development of **Wookies babies**, a negligible change (the ROPE) is less than **54 cm**. If our `time` variable is **expressed in days**, we will find that the coefficient (representing the growth **by day**) is of about **10 cm** (*the median of the posterior of the coefficient is 10*). Which we would consider as **negligible**. However, if we decide to express the `time` variable **in years**, the coefficient will be scaled by this transformation (as it will now represent the growth **by year**). The coefficient will now be around **3550** cm (`10 * 355`), which we would now consider as **significant**. ```{r message=FALSE, warning=FALSE, eval=FALSE} library(rstanarm) library(bayestestR) library(see) data <- iris # Use the iris data model <- stan_glm(Sepal.Length ~ Sepal.Width, data=data) # Fit model ``` ```{r echo=FALSE, message=FALSE, warning=FALSE, comment=">"} library(rstanarm) library(bayestestR) library(see) set.seed(333) data <- iris # Use the iris data model <- stan_glm(Sepal.Length ~ Sepal.Width, data=data, refresh = 0) ``` ```{r echo=TRUE, message=FALSE, warning=FALSE, comment=">"} # Compute indices pd <- p_direction(model) percentage_in_rope <- rope(model, ci=1) # Visualise the pd plot(pd) pd # Visualise the percentage in ROPE plot(percentage_in_rope) percentage_in_rope ``` We can see that the *pd* and the percentage in ROPE of the linear relationship between **Sepal.Length** and **Sepal.Width** are respectively of about `92.95%` and `15.95%`, corresponding to an **uncertain** and **not significant** effect. What happen if we scale our predictor? ```{r message=FALSE, warning=FALSE, eval=FALSE} data$Sepal.Width_scaled <- data$Sepal.Width / 100 # Divide predictor by 100 model <- stan_glm(Sepal.Length ~ Sepal.Width_scaled, data=data) # Fit model ``` ```{r echo=FALSE, message=FALSE, warning=FALSE, comment=">"} set.seed(333) data$Sepal.Width_scaled <- data$Sepal.Width / 100 model <- stan_glm(Sepal.Length ~ Sepal.Width_scaled, data=data, refresh = 0) ``` ```{r echo=TRUE, message=FALSE, warning=FALSE, comment=">"} # Compute indices pd <- p_direction(model) percentage_in_rope <- rope(model, ci=1) # Visualise the pd plot(pd) pd # Visualise the percentage in ROPE plot(percentage_in_rope) percentage_in_rope ``` As you can see, by simply dividing the predictor by 100, we **drastically** changed the conclusion related to the **percentage in ROPE** (which became very close to `0`): the effect could now be **interpreted as being significant**. Thus, we recommend paying close attention to the unit of the predictors when selecting the ROPE range (*e.g.*, what coefficient would correspond to a small effect?), and when reporting or reading ROPE results. # Multicollinearity: Non-independent covariates When **parameters show strong correlations**, *i.e.*, when covariates are not independent, the joint parameter distributions may shift towards or away from the ROPE. Collinearity invalidates ROPE and hypothesis testing based on univariate marginals, as the probabilities are conditional on independence. Most problematic are parameters that only have partial overlap with the ROPE region. In case of collinearity, the (joint) distributions of these parameters may either get an increased or decreased ROPE, which means that inferences based on ROPE are inappropriate [@kruschke2014doing]. The `equivalence_test()` and `rope()` functions perform a simple check for pairwise correlations between parameters, but as there can be collinearity between more than two variables, a first step to check the assumptions of this hypothesis testing is to look at different pair plots. An even more sophisticated check is the projection predictive variable selection [@piironen2017comparison]. bayestestR/inst/doc/indicesExistenceComparison.R0000644000176200001440000000030113620150455021560 0ustar liggesusers## ----message=FALSE, warning=FALSE, include=FALSE------------------------------ library(knitr) options(knitr.kable.NA = '') knitr::opts_chunk$set(comment=">", dpi=75) options(digits=2) bayestestR/inst/doc/bayestestR.Rmd0000644000176200001440000002401213613227664016723 0ustar liggesusers--- title: "Get Started with Bayesian Analysis" output: github_document: toc: true fig_width: 10.08 fig_height: 6 rmarkdown::html_vignette: toc: true fig_width: 10.08 fig_height: 6 tags: [r, bayesian, posterior, test] vignette: > \usepackage[utf8]{inputenc} %\VignetteIndexEntry{Get Started with Bayesian Analysis} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console bibliography: bibliography.bib csl: apa.csl --- This vignette can be referred to by citing the package: - Makowski, D., Ben-Shachar, M. S., \& Lüdecke, D. (2019). *bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework*. Journal of Open Source Software, 4(40), 1541. https://doi.org/10.21105/joss.01541 --- ```{r message=FALSE, warning=FALSE, include=FALSE} if (!requireNamespace("rstanarm", quietly = TRUE)) { knitr::opts_chunk$set(eval = FALSE) } library(knitr) options(knitr.kable.NA = '') knitr::opts_chunk$set(comment=">") options(digits=2) ``` ## Why use the Bayesian Framework? The Bayesian framework for statistics is quickly gaining in popularity among scientists, associated with the general shift towards **open and honest science**. Reasons to prefer this approach are **reliability**, **accuracy** (in noisy data and small samples), the possibility of introducing **prior knowledge** into the analysis and, critically, **results intuitiveness** and their **straightforward interpretation** [@andrews2013prior; @etz2016bayesian; @kruschke2010believe; @kruschke2012time; @wagenmakers2018bayesian]. In general, the frequentist approach has been associated with the focus on null hypothesis testing, and the misuse of *p*-values has been shown to critically contribute to the reproducibility crisis of psychological science [@chambers2014instead; @szucs2016empirical]. There is a general agreement that the generalization of the Bayesian approach is one way of overcoming these issues [@benjamin2018redefine; @etz2016bayesian]. Once we agreed that the Bayesian framework is the right way to go, you might wonder *what* is the Bayesian framework. **What's all the fuss about?** ## What is the Bayesian Framework? Adopting the Bayesian framework is more of a shift in the paradigm than a change in the methodology. Indeed, all the common statistical procedures (t-tests, correlations, ANOVAs, regressions, ...) can be achieved using the Bayesian framework. One of the core difference is that in the **frequentist view** (the "classic" statistics, with *p* and *t* values, as well as some weird *degrees of freedom*), **the effects are fixed** (but unknown) and **data are random**. On the other hand, in the Bayesian inference process, instead of having estimates of the "true effect", the probability of different effects *given the observed data* is computed, resulting in a distribution of possible values for the parameters, called the ***posterior distribution***. The uncertainty in Bayesian inference can be summarized, for instance, by the **median** of the distribution, as well as a range of values of the posterior distribution that includes the 95\% most probable values (the 95\% ***credible* interval**). *Cum grano salis*, these are considered the counterparts to the point-estimate and confidence interval in a frequentist framework. To illustrate the difference of interpretation, the Bayesian framework allows to say *"given the observed data, the effect has 95\% probability of falling within this range"*, while the frequentist less straightforward alternative would be *"when repeatedly computing confidence intervals from data of this sort, there is a 95\% probability that the effect falls within a given range"*. In essence, the Bayesian sampling algorithms (such as MCMC sampling) return a probability distribution (*the posterior*) of an effect that is compatible with the observed data. Thus, an effect can be described by [characterizing its posterior distribution](https://easystats.github.io/bayestestR/articles/guidelines.html) in relation to its centrality (point-estimates), uncertainty, as well as existence and significance In other words, omitting the maths behind it, we can say that: - The frequentist bloke tries to estimate "the **real effect**". For instance, the "real" value of the correlation between *x* and *y*. Hence, frequentist models return a "**point-estimate**" (*i.e.*, a single value) of the "real" correlation (*e.g.*, r = 0.42) estimated under a number of obscure assumptions (at a minimum, considering that the data is sampled at random from a "parent", usually normal distribution). - **The Bayesian master assumes no such thing**. The data are what they are. Based on this observed data (and a **prior** belief about the result), the Bayesian sampling algorithm (sometimes referred to for example as **MCMC** sampling) returns a probability distribution (called **the posterior**) of the effect that is compatible with the observed data. For the correlation between *x* and *y*, it will return a distribution that says, for example, "the most probable effect is 0.42, but this data is also compatible with correlations of 0.12 and 0.74". - To characterize our effects, **no need of *p* values** or other cryptic indices. We simply describe the posterior distribution of the effect. For example, we can report the median, the [89% *Credible* Interval](https://easystats.github.io/bayestestR/articles/credible_interval.html) or [other indices](https://easystats.github.io/bayestestR/articles/guidelines.html). ```{r echo=FALSE, fig.cap="Accurate depiction of a regular Bayesian user estimating a credible interval.", fig.align='center', out.width="50%"} knitr::include_graphics("https://github.com/easystats/easystats/raw/master/man/figures/bayestestR/bayesianMaster.jpg") ``` *Note: Altough the very purpose of this package is to advocate for the use of Bayesian statistics, please note that there are serious arguments supporting frequentist indices (see for instance [this thread](https://discourse.datamethods.org/t/language-for-communicating-frequentist-results-about-treatment-effects/934/16)). As always, the world is not black and white (p \< .001).* **So... how does it work?** ## A simple example ### BayestestR Installation You can install `bayestestR` along with the whole [**easystats**](https://github.com/easystats/easystats) suite by running the following: ```{r eval=FALSE, message=FALSE, warning=FALSE} install.packages("devtools") devtools::install_github("easystats/easystats") ``` Let's also install and load the [`rstanarm`](https://mc-stan.org/rstanarm/), that allows fitting Bayesian models, as well as [`bayestestR`](https://github.com/easystats/bayestestR), to describe them. ```{r message=FALSE, warning=FALSE, eval=FALSE} install.packages("rstanarm") library(rstanarm) ``` ### Traditional linear regression Let's start by fitting a simple frequentist linear regression (the `lm()` function stands for *linear model*) between two numeric variables, `Sepal.Length` and `Petal.Length` from the famous [`iris`](https://en.wikipedia.org/wiki/Iris_flower_data_set) dataset, included by default in R. ```{r message=FALSE, warning=FALSE, eval=FALSE} model <- lm(Sepal.Length ~ Petal.Length, data=iris) summary(model) ``` ```{r echo=FALSE, message=FALSE, warning=FALSE, comment=NA} library(dplyr) lm(Sepal.Length ~ Petal.Length, data=iris) %>% summary() ``` This analysis suggests that there is a **significant** (*whatever that means*) and **positive** (with a coefficient of `0.41`) linear relationship between the two variables. *Fitting and interpreting **frequentist models is so easy** that it is obvious that people use it instead of the Bayesian framework... right?* **Not anymore.** ### Bayesian linear regression ```{r message=FALSE, warning=FALSE, eval=FALSE} model <- stan_glm(Sepal.Length ~ Petal.Length, data=iris) describe_posterior(model) ``` ```{r echo=FALSE, message=FALSE, warning=FALSE, comment=NA} library(rstanarm) library(bayestestR) set.seed(333) model <- stan_glm(Sepal.Length ~ Petal.Length, data=iris, refresh = 0) knitr::kable(describe_posterior(model), digits=2) ``` **That's it!** You fitted a Bayesian version of the model by simply using [`stan_glm()`](https://mc-stan.org/rstanarm/reference/stan_glm.html) instead of `lm()` and described the posterior distributions of the parameters. The conclusion that we can drawn, for this example, are very similar. The effect (*the median of the effect's posterior distribution*) is about `0.41`, and it can be also be considered as *significant* in the Bayesian sense (more on that later). **So, ready to learn more?** Check out the [**next tutorial**](https://easystats.github.io/bayestestR/articles/example1.html)! ## References bayestestR/inst/doc/example2.html0000644000176200001440000070067613620150415016541 0ustar liggesusers

2. Confirmation of Bayesian skills

This vignette can be referred to by citing the package:

  • Makowski, D., Ben-Shachar, M. S., & Lüdecke, D. (2019). bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework. Journal of Open Source Software, 4(40), 1541. https://doi.org/10.21105/joss.01541

Now that describing and understanding posterior distributions of linear regressions has no secrets for you, we will take one step back and study some simpler models: correlations and t-tests.

But before we do that, let us take a moment to remind ourselves and appreciate the fact that all basic statistical pocedures such as correlations, t-tests, ANOVAs or Chisquare tests are linear regressions (we strongly recommend this excellent demonstration). Nevertheless, these simple models will be the occasion to introduce a more complex index, such as the Bayes factor.

Correlations

Frequentist version

Let us start, again, with a frequentist correlation between two continuous variables, the width and the length of the sepals of some flowers. The data is available in R as the iris dataset (the same that was used in the previous tutorial).

We will compute a Pearson’s correlation test, store the results in an object called result, then display it:

> 
>   Pearson's product-moment correlation
> 
> data:  iris$Sepal.Width and iris$Sepal.Length
> t = -1, df = 148, p-value = 0.2
> alternative hypothesis: true correlation is not equal to 0
> 95 percent confidence interval:
>  -0.273  0.044
> sample estimates:
>   cor 
> -0.12

As you can see in the output, the test that we did actually compared two hypotheses: the null hypothesis (h0; no correlation) with the alternative hypothesis (h1; a non-null correlation). Based on the p-value, the null hypothesis cannot be rejected: the correlation between the two variables is negative but not significant (r = -.12, p > .05).

Bayesian correlation

To compute a Bayesian correlation test, we will need the BayesFactor package (you can install it by running install.packages("BayesFactor")). We can then load this package, compute the correlation using the correlationBF() function and store the results in a similar fashion.

Now, let us run our describe_posterior() function on that:

>   Parameter Median CI CI_low CI_high pd ROPE_CI ROPE_low ROPE_high
> 1       rho  -0.11 89  -0.24  0.0079 92      89     -0.1       0.1
>   ROPE_Percentage   BF Prior_Distribution Prior_Location Prior_Scale
> 1              42 0.51             cauchy              0        0.33

We see again many things here, but the important indices for now are the median of the posterior distribution, -.11. This is (again) quite close to the frequentist correlation. We could, as previously, describe the credible interval, the pd or the ROPE percentage, but we will focus here on another index provided by the Bayesian framework, the Bayes factor (BF).

Bayes factor (BF)

We said previously that a correlation test actually compares two hypotheses, a null (absence of effect) with an altnernative one (presence of an effect). The Bayes factor (BF) allows the same comparison and determines under which of two models the observed data are more probable: a model with the effect of interest, and a null model without the effect of interest. We can use bayesfactor() to specifically compute the Bayes factor comparing those models:

> # Bayes Factors for Model Comparison
> 
>   Model               BF
>   [2] Alt., r=0.333 0.51
> 
> * Against Denominator: [1] Null, rho = 0
> *   Bayes Factor Type: JZS (BayesFactor)

We got a BF of 0.51. What does it mean?

Bayes factors are continuous measures of relative evidence, with a Bayes factor greater than 1 giving evidence in favour of one of the models (often referred to as the numerator), and a Bayes factor smaller than 1 giving evidence in favour of the other model (the denominator).

Yes, you heard things right, evidence in favour of the null!

That’s one of the reason why the Bayesian framework is sometimes considered as superior to the frequentist framework. Remember from your stats lessons, that the p-value can only be used to reject h0, but not accept it. With the Bayes factor, you can measure evidence against - and in favour of - the null.

BFs representing evidence for the alternative against the null can be reversed using (BF_{01}=1/BF_{10}) (the 01 and 10 correspond to h0 against h1 and h1 against h0, respectively) to provide evidence of the null againtt the alternative. This improves human readability in cases where the BF of the alternative against the null is smaller than 1 (i.e., in support of the null).

In our case, BF = 1/0.51 = 2, indicates that the data are 2 times more probable under the null compared to the alternative hypothesis, which, though favouring the null, is considered only anecdotal evidence against the null.

We can thus conclude that there is anecdotal evidence in favour of an absence of correlation between the two variables (rmedian = 0.11, BF = 0.51), which is a much more informative statement that what we can do with frequentist statistics.

And that’s not all!

Visualise the Bayes factor

In general, pie charts are an absolute no-go in data visualisation, as our brain’s perceptive system heavily distorts the information presented in such way. Nevertheless, there is one exeption: pizza charts.

It is an intuitive way of interpreting the strength of evidence provided by BFs as an amount of surprise.

Wagenmakers' pizza poking analogy. From the great 'www.bayesianspectacles.org' blog.

Wagenmakers’ pizza poking analogy. From the great ‘www.bayesianspectacles.org’ blog.

Such “pizza plots” can be directly created through the see visualisation companion package for easystats (you can install it by running install.packages("see")):

So, after seeing this pizza, how much would you be suprised by the outcome of a blinded poke?

t-tests

“I know that I know nothing, and especially not if versicolor and virginica differ in terms of Sepal.Width”, famously said Socrates. Time to finally answer this answer this crucial question!

Versicolor vs. virginica

Bayesian t-tests can be performed in a very similar way to correlations. As we are particularly interested in two levels of the Species factor, versicolor and virginica. We will start by filtering out from iris the non-relevant observations corresponding to the setosa specie, and we will then visualise the observations and the distribution of the Sepal.Width variable.

It seems (visually) that virgnica flowers have, on average, a slightly higer width of sepals. Let’s assess this difference statistically by using the ttestBF in the BayesFactor package.

Compute the Bayesian t-test

>    Parameter Median CI CI_low CI_high pd ROPE_CI ROPE_low ROPE_high
> 1 Difference  -0.19 89  -0.29  -0.089  1      89     -0.1       0.1
>   ROPE_Percentage BF Prior_Distribution Prior_Location Prior_Scale
> 1           0.028 18             cauchy              0        0.71

From the indices, we can say that the difference of Sepal.Width between virginica and versicolor has a probability of 100% of being negative [from the pd and the sign of the median] (median = -0.19, 89% CI [-0.29, -0.092]). The data provides a strong evidence against the null hypothesis (BF = 18).

Keep that in mind as we will see another way of investigating this question.

Logistic Model

A hypothesis for which one uses a t-test can also be tested using a binomial model (e.g., a logistic model). Indeed, it is possible to reformulate the following hypothesis, “there is an important difference in this variable between the two groups” by “this variable is able to discriminate between (or classify) the two groups”. However, these models are much more powerful than a regular t-test.

In the case of the difference of Sepal.Width between virginica and versicolor, the question becomes, how well can we classify the two species using only Sepal.Width.

Fit the model

Visualise the model

Using the estimate package. Wait until estimate is on CRAN.

Performance and Parameters

TO DO.

> Can't calculate log-loss.

> # Indices of model performance
> 
>  ELPD ELPD_SE LOOIC LOOIC_SE WAIC    R2 RMSE LOGLOSS SCORE_LOG SCORE_SPHERICAL
>   -66     3.1   133      6.1  133 0.097 0.48      NA      -106            0.01
> # Description of Posterior Distributions
> 
> Parameter   | Median | CI | CI_low | CI_high |    pd | ROPE_CI | ROPE_low | ROPE_high | ROPE_Percentage |     BF |  Rhat |  ESS
> -------------------------------------------------------------------------------------------------------------------------------
> (Intercept) | -6.169 | 89 | -9.519 |  -3.028 | 0.999 |      89 |   -0.181 |     0.181 |               0 | 10.831 | 0.999 | 2747
> Sepal.Width |  2.148 | 89 |  1.100 |   3.342 | 1.000 |      89 |   -0.181 |     0.181 |               0 | 10.209 | 0.999 | 2770

Visualise the indices

TO DO.

Diagnostic Indices

About diagnostic indices such as Rhat and ESS.

bayestestR/inst/doc/indicesExistenceComparison.Rmd0000644000176200001440000000333713603574330022120 0ustar liggesusers--- title: "In-Depth 2: Comparison of Indices of Effect Existence and Significance" output: github_document: toc: true toc_depth: 3 fig_width: 10.08 fig_height: 6 word_document: toc: true toc_depth: 3 fig_width: 10.08 fig_height: 6 df_print: "kable" highlight: "pygments" reference_docx: templates/Template_Frontiers.docx rmarkdown::html_vignette: toc: true toc_depth: 3 fig_width: 10.08 fig_height: 6 tags: [r, bayesian, posterior, test] vignette: > \usepackage[utf8]{inputenc} %\VignetteIndexEntry{In-Depth 2: Comparison of Indices of Effect Existence} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console bibliography: bibliography.bib csl: apa.csl --- ```{r message=FALSE, warning=FALSE, include=FALSE} library(knitr) options(knitr.kable.NA = '') knitr::opts_chunk$set(comment=">", dpi=75) options(digits=2) ``` This vignette can be referred to by citing the following: - Makowski, D., Ben-Shachar, M. S., \& Lüdecke, D. (2019). *bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework*. Journal of Open Source Software, 4(40), 1541. https://doi.org/10.21105/joss.01541 - Makowski, D., Ben-Shachar, M. S., Chen, S. H. A., \& Lüdecke, D. (2019). *Indices of Effect Existence and Significance in the Bayesian Framework*. Frontiers in Psychology 2019;10:2767. [10.3389/fpsyg.2019.02767](https://doi.org/10.3389/fpsyg.2019.02767) --- # Indices of Effect *Existence* and *Significance* in the Bayesian Framework A comparison of different Bayesian indices (*pd*, *BFs*, ROPE etc.) is accessible [**here**](https://doi.org/10.3389/fpsyg.2019.02767).bayestestR/inst/doc/example3.html0000644000176200001440000061235613620150416016540 0ustar liggesusers

3. Become a Bayesian master

This vignette can be referred to by citing the package:

  • Makowski, D., Ben-Shachar, M. S., & Lüdecke, D. (2019). bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework. Journal of Open Source Software, 4(40), 1541. https://doi.org/10.21105/joss.01541

Yoda Bayes (896 BBY - 4 ABY).

Yoda Bayes (896 BBY - 4 ABY).

Mixed Models

TO BE CONTINUED.

Priors

TO BE CONTINUED.

What’s next?

The journey to become a true Bayesian master is not over. It is merely the beginning. It is now time to leave the bayestestR universe and apply the Bayesian framework in a variety of other statistical contexts:

bayestestR/inst/doc/bayes_factors.html0000644000176200001440000310770713620150375017654 0ustar liggesusers

Bayes Factors

This vignette can be referred to by citing the following:

  • Makowski, D., Ben-Shachar, M. S., & Lüdecke, D. (2019). bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework. Journal of Open Source Software, 4(40), 1541. https://doi.org/10.21105/joss.01541
  • Makowski, D., Ben-Shachar, M. S., Chen, S. H. A., & Lüdecke, D. (2019). Indices of Effect Existence and Significance in the Bayesian Framework. Retrieved from 10.3389/fpsyg.2019.02767

The adoption of the Bayesian framework for applied statistics, especially in the social and psychological sciences, seems to be developing in two distinct directions. One of the key topics marking their separation is their opinion about the Bayes factor. In short, some authors (e.g., the “Amsterdam school”, led by Wagenmakers) advocate its use and emphasize its qualities as a statistical index, while others point to its limits and prefer, instead, the precise description of posterior distributions (using CIs, ROPEs, etc.).

bayestestR does not take a side in this debate, rather offering tools to help you in whatever analysis you want to achieve. Instead, it strongly supports the notion of an informed choice: discover the methods, try them, understand them, learn about them, and decide for yourself.

Having said that, here’s an introduction to Bayes factors :)

The Bayes Factor

Bayes factors (BFs) are indices of relative evidence of one “model” over another, which can be used in the Bayesian framework as alternatives to classical (frequentist) hypothesis testing indices (such as (p-values)).

According to Bayes’ theorem, we can update prior probabilities of some model (M) ((P(M))) to posterior probabilities ((P(M|D))) after observing some datum (D) by accounting for the probability of observing that datum given the model ((P(D|M)), also known as the likelihood):

[ P(M|D) = \frac{P(D|M)\times P(M)}{P(D)} ]

Using this equation, We can compare the probability-odds of two models:

[ \frac{P(M_1|D)}{P(M_2|D)} = \frac{P(D|M_1)}{P(D|M_2)} \times \frac{P(M_1)}{P(M_2)} ] Where the left-most term are the posterior odds, the right-most term are the prior odds, and the middle term is the Bayes factor:

[ BF_{12}=\frac{P(D|M_1)}{P(D|M_2)} ]

Thus, Bayes factors can be seen either as a ratio quantifying the relative probability of some observed data by two models as they can be computed by comparing the marginal likelihoods of the two models, or as the degree by which some prior beliefs about the relative credibility of two models are to be updated as they can be computed by dividing posterior odds by prior odds, as we will soon demonstrate.

Here we provide functions for computing Bayes factors in two different applications: testing single parameters (coefficients) within a model and comparing statistical models themselves.

Testing Models’ Parameters with Bayes Factors

A Bayes factor for a single parameter can be used to answer the question:

Given the observed data, has the null hypothesis of an absence of an effect become more, or less credible?

Bayesian analysis of the Students' (1908) Sleep data set.

Bayesian analysis of the Students’ (1908) Sleep data set.

Let’s use the Students’ (1908) Sleep data set (data("sleep")), in which people took some drug and where the researchers measured the extra hours of sleep that they slept afterwards. We will try answering the following question:

given the observed data, has the hypothesis that the drug (the effect of group) has no effect on the numbers of hours of extra sleep (variable extra) become more of less credible?

The bloxplot suggests that the second group has a higher number of hours of extra sleep. By how much? Let’s fit a simple Bayesian linear model, with a prior of (b_{group} \sim N(0, 3)):

Testing against a null-region

One way of operationlizing the null-hypothesis is by setting a null region, such that an effect that falls within this interval would be practically equivalent to the the null (Kruschke, 2010). In our case, that means defining a range of effects we would consider equal to the drug having no effect at all. We can then compute the prior probability of the drug’s effect falling within this null-region, and the prior probability of the drug’s effect falling outside the null-region to get our prior odds. Say any effect smaller than an hour of extra sleep is practically equivalent to being no effect at all, we would define our prior odds as:

[ \frac {P(b_{drug} \in [-1, 1])} {P(b_{drug} \notin [-1, 1])} ]

Given our prior has a normal distribution centered at 0 hours with a scale (an SD) of 2.5 hours, our priors would look like this:

and the prior odds would be 2.2.

By looking at the posterior distribution, can now compute the posterior probability of the drug’s effect falling within the null-region, and the posterior probability of the drug’s effect falling outside the null-region to get our posterior odds:

[ \frac {P(b_{drug} \in [-1,1] | Data)} {P(b_{drug} \notin [-1,1] | Data)} ]

We can see that the center of the posterior distribution has shifted away from 0 (to ~1.5). Likewise, the posterior odds are 2 - which seems to favor the effect being non-null, but… does this mean the data support the alternative over the null? Hard to say, since even before the data were observed, the priors already favored the alternative - so we need to take our priors into account here!

Let’s compute the Bayes factor as the change from the prior odds to the posterior odds: (BF_{10} = Odds_{posterior} / Odds_{prior} = 0.9)! This BF indicates that the data provide 1/0.9 = 1.1 times more evidence for the effect of the drug being practically nothing than it does for the drug having some clinically significant effect. Thus, although the center of distribution has shifted away from 0, and the posterior distribution seems to favor a non-null effect of the drug, it seems that given the observed data, the probability mass has overall shifted closer to the null interval, making the values in the null interval more probable! (see Non-overlapping Hypotheses in Morey & Rouder, 2011)

Note that interpretation guides for Bayes factors can be found here.

All of this can be achieved with the function bayesfactor_parameters(), which computes a Bayes factor for each of the model’s parameters:

> # Bayes Factor (Null-Interval)
> 
> Parameter   |   BF
> ------------------
> (Intercept) | 0.02
> group2      |  0.9
> 
> * Evidence Against The Null: [-1, 1]

We can also plot using the see package:

Testing against the point-null (0)

What if we don’t know what region would be practically equivalent to 0? Or if we just want the null to be exactly zero? Not a problem - as the width of null region shrinks to a point, the change from the prior probability to the posterior probability of the null can be estimated by comparing the the density of the null value between the two distributions.[1] This ratio is called the Savage-Dickey ratio, and has the added benefit of also being an approximation of a Bayes factor comparing the estimated model against the a model in which the parameter of interest has been restricted to a point-null:

“[…] the Bayes factor for (H_0) versus (H_1) could be obtained by analytically integrating out the model parameter (\theta). However, the Bayes factor may likewise be obtained by only considering (H_1), and dividing the height of the posterior for (\theta) by the height of the prior for (\theta), at the point of interest.” (Wagenmakers, Lodewyckx, Kuriyal, & Grasman, 2010)

> # Bayes Factor (Savage-Dickey density ratio)
> 
> Parameter |   BF
> ----------------
> group2    | 1.49
> 
> * Evidence Against The Null: [0]

One-sided tests

We can also conduct a directional test (a “one sided” or “one tailed” test) if we have a prior hypotheses about the direction of the effect. This can be done by setting an order restriction on the prior distribution (and thus also on the posterior distribution) of the alternative (Morey & Wagenmakers, 2014). For example, if we have a prior hypothesis that the effect of the drug is an increase in the number of sleep hours, the alternative will be restricted to the region to the right of the null (point or interval):

> # Bayes Factor (Savage-Dickey density ratio)
> 
> Parameter |   BF
> ----------------
> group2    | 2.77
> 
> * Evidence Against The Null: [0]
> *                 Direction: Right-Sided test

As we can see, given that we have an a priori assumption about the direction of the effect (that the effect is positive), the presence of an effect is 2.8 times more likely than the absence of an effect, given the observed data (or that the data are 2.8 time more probable under (H_1) than (H_0)). This indicates that, given the observed data, and a priori hypothesis, the posterior mass has shifted away from the null value, giving some evidence against the null (note that a Bayes factor of 2.8 is still considered quite weak evidence).

NOTE: See the Testing Contrasts appendix below.

Support intervals

So far we’ve seen that Bayes factors quantify relative support between competing hypotheses. However, we can also ask:

Upon observing the data, the credibility of which of the parameter’s values has increased (or decreased)?

For example, we’ve seen that the point null has become somewhat less credible after observing the data, but we might also ask which values have gained some credibility given the observed data?. The resulting range of values is called the support interval as it indicates which values are supported by the data (Wagenmakers, Gronau, Dablander, & Etz, 2018). We can do this by once again comparing the prior and posterior distributions and checking where the posterior densities are higher than the prior densities. This can be achieved with the si() function:

> # Support Interval
> 
> Parameter |    BF = 1 SI
> ------------------------
> group2    | [0.25, 2.94]

The argument BF = 1 indicates that we want the interval to contain values that have gained support by a factor of at least 1 (that is, any support at all).

Visually, we can see that the credibility of all the values within this interval has increased (and likewise the credibility of all the values outside this interval has decreased):

We can also see the this support interval (just barely) excludes the point null (0) - whose credibility we’ve already seen has decreased by the observed data. This emphasizes the relationship between the support interval and the Bayes factor:

“The interpretation of such intervals would be analogous to how a frequentist confidence interval contains all the parameter values that would not have been rejected if tested at level (\alpha). For instance, a BF = 1/3 support interval encloses all values of theta for which the updating factor is not stronger than 3 against.” (Wagenmakers et al., 2018)

Thus, the choice of BF (the level of support the interval should indicate) depends on what we want our interval to represent:

  • A (BF = 1) contains values whose credibility has merely not decreased by observing the data.
  • A (BF > 1) contains values who received more impressive support from the data.
  • A (BF < 1) contains values whose credibility has not been impressively decreased by observing the data. Testing against values outside this interval will produce a Bayes factor larger than (1/BF) in support of the alternative.

Comparing Models using Bayes Factors

Bayes factors can also be used to compare statistical models, for which they answer the question:

Under which model are the the observed data more probable?

In other words, which model is more likely to have produced the observed data? This is usually done by comparing the marginal likelihoods of two models. In such a case, the Bayes factor is a measure of the relative evidence of one of the compared models over the other.

Let’s use Bayes factors for model comparison to find a model that best describes the length of an iris’ sepal using the iris data set.

For Bayesian models (brms and rstanarm)

Note: In order to compute Bayes factors for Bayesian models, non-default arguments must be added upon fitting:

  • brmsfit models must have been fitted with save_all_pars = TRUE
  • stanreg models must have been fitted with a defined diagnostic_file.

Let’s first fit 5 Bayesian regressions with brms to predict Sepal.Length:

We can now compare these models with the bayesfactor_models() function, using the denominator argument to specify which model all models will be compared against (in this case, the intercept-only model):

> # Bayes Factors for Model Comparison
> 
>   Model                            BF
>   [1] Petal.Length           3.45e+44
>   [2] Species                5.63e+29
>   [3] Species + Petal.Length 7.12e+55
>   [4] Species * Petal.Length 9.15e+55
> 
> * Against Denominator: [5] (Intercept only)
> *   Bayes Factor Type: marginal likelihoods (bridgesampling)

We can see that the full model is the best model - with (BF_{\text{m0}}=9\times 10^{55}) compared to the null (intercept only).

Due to the transitive property of Bayes factors, we can easily change the reference model to the main effects model:

> # Bayes Factors for Model Comparison
> 
>   Model                            BF
>   [1] Petal.Length           4.84e-12
>   [2] Species                7.90e-27
>   [4] Species * Petal.Length     1.28
>   [5] (Intercept only)       1.40e-56
> 
> * Against Denominator: [3] Species + Petal.Length
> *   Bayes Factor Type: marginal likelihoods (bridgesampling)

As we can see, though the full model is the best, there is hardly any evidence that it is preferable to the main effects model.

We can also change the reference model to the Species model:

> # Bayes Factors for Model Comparison
> 
>   Model                            BF
>   [1] Petal.Length           6.12e+14
>   [3] Species + Petal.Length 1.27e+26
>   [4] Species * Petal.Length 1.63e+26
>   [5] (Intercept only)       1.78e-30
> 
> * Against Denominator: [2] Species
> *   Bayes Factor Type: marginal likelihoods (bridgesampling)

Notice that in the Bayesian framework the compared models do not need to be nested models, as happened here when we compared the Petal.Length-only model to the Species-only model (something that cannot be done in the frequentists framework, where compared models must be nested in one another).

NOTE: In order to correctly and precisely estimate Bayes Factors, you always need the 4 P’s: Proper Priors,[2] and a Plentiful Posterior.[3]

For Frequentist models via the BIC approximation

It is also possible to compute Bayes factors for the comparison of frequentist models. This is done by comparing BIC measures, allowing a Bayesian comparison of non-nested frequentist models (Wagenmakers, 2007). Let’s try it out on some linear mixed models:

> # Bayes Factors for Model Comparison
> 
>   Model                                                           BF
>   [1] Petal.Length + (1 | Species)                          8.24e+24
>   [2] Petal.Length + (Petal.Length | Species)               4.77e+23
>   [3] Petal.Length + Petal.Width + (Petal.Length | Species) 1.52e+22
>   [4] Petal.Length * Petal.Width + (Petal.Length | Species) 5.93e+20
> 
> * Against Denominator: [5] 1 + (1 | Species)
> *   Bayes Factor Type: BIC approximation

Order restricted models

As stated above when discussing one-sided hypothesis tests, we can create new models by imposing order restrictions on a given model. For example, consider the following model, in which we predict the length of an iris’ sepal from the length of its petal, as well as from its species, with a prior of (b_{petal} \sim N(0,2)) (b_{versicolors}\ &\ b_{virginica} \sim N(0,1.2)):

These priors are unrestricted - that is, all values between (-\infty) and (\infty) of all parameters in the model have some non-zero credibility (no matter how small; this is true for both the prior and posterior distribution). Subsequently, a priori the ordering of the parameters relating to the iris species can have any ordering, such that (a priori) setosa can have larger sepals than virginica, but it is also possible for virginica to have larger sepals than setosa!

Does it make sense to let our priors cover all of these possibilities? That depends on our prior knowledge or hypotheses. For example, even a novice botanist will assume that it is unlikely that petal length will be negatively associated with sepal length - an iris with longer petals is likely larger, and thus will also have a longer sepal. And an expert botanist will perhaps assume that setosas have smaller sepals than both versicolors and virginica. These priors can be formulated as restricted priors (Morey, 2015; Morey & Rouder, 2011):

  1. The novice botanist: (b_{petal} > 0)
  2. The expert botanist: (b_{versicolors} > 0\ &\ b_{virginica} > 0)

By testing these restrictions on prior and posterior samples, we can see how the probabilities of the restricted distributions change after observing the data. This can be achieved with bayesfactor_restricted(), that compute a Bayes factor for these restricted model vs the unrestricted model. Let’s first specify these restrictions as logical conditions:

Let’s test these hypotheses:

> # Bayes Factor (Order-Restriction)
> 
>                                        Hypothesis P(Prior) P(Posterior)
>                                  Petal.Length > 0     0.51            1
>  (Speciesversicolor > 0) & (Speciesvirginica > 0)     0.25            0
>        BF
>      1.96
>  0.00e+00
> 
> * Bayes factors for the restricted model vs. the un-restricted model.

We can see that the novice botanist’s hypothesis gets a Bayes factor of ~2, indicating the data provides twice as much evidence for a model in which petal length is restricted to be positively associated with sepal length than for a model with not such restriction.

What about our expert botanist? He seems to have failed miserably, with a BF favoring the unrestricted model many many times over ((BF\gg1,000)). How is this possible? It seems that when controlling for petal length, versicolor and virginica actually have shorter sepals!

Note that these Bayes factors compare the restricted model to the unrestricted model. If we wanted to compare the restricted model to the null model, we could use the transitive property of Bayes factors like so:

[ BF_{restricted / NULL} = \frac {BF_{restricted / un-restricted}} {BF_{un-restricted / NULL}} ]

Because these restrictions are on the prior distribution, they are only appropriate for testing pre-planned (a priori) hypotheses, and should not be used for any post hoc comparisons (Morey, 2015).

NOTE: See the Specifying Correct Priors for Factors with More Than 2 Levels appendix below.

Bayesian Model Averaging

In the previous section we discussed the direct comparison of two models to determine if an effect is supported by the data. However, in many cases there are too many models to consider or perhaps it is not straightforward which models we should compare to determine if an effect is supported by the data. For such cases we can use Bayesian model averaging (BMA) to determine the support provided by the data for a parameter or term across many models.

Inclusion Bayes factors

Inclusion Bayes factors answer the question:

Are the observed data more probable under models with a particular predictor, than they are under models without that particular predictor?

In other words, on average - are models with predictor (X) more likely to have produced the observed data than models without predictor (X)?[4]

Since each model has a prior probability, it is possible to sum the prior probability of all models that include a predictor of interest (the prior inclusion probability), and of all models that do not include that predictor (the prior exclusion probability). After the data are observed, and each model is assigned a posterior probability, we can similarly consider the sums of the posterior models’ probabilities to obtain the posterior inclusion probability and the posterior exclusion probability. Once again, the change from prior inclusion odds to the posterior inclusion odds is the Inclusion Bayes factor (“(BF_{Inclusion})”; Clyde, Ghosh, & Littman, 2011).

Lets use the brms example from above:

> # Inclusion Bayes Factors (Model Averaged)
> 
>                      Pr(prior) Pr(posterior) Inclusion BF
> Petal.Length               0.6          1.00     1.93e+26
> Species                    0.6          1.00     3.15e+11
> Petal.Length:Species       0.2          0.56         5.14
> 
> * Compared among: all models
> *    Priors odds: uniform-equal

If we examine the interaction term’s inclusion Bayes factor, we can see that across all 5 models, a model with the interaction term (Species:Petal.Length) is on average 5 times more likely than a model without the interaction term. Note that Species, a factor represented in the model with several parameters, gets a single Bayes factor - inclusion Bayes factors are given per predictor!

We can also compare only matched models - such that averaging is done only across models that (1) do not include any interactions with the predictor of interest; (2) for interaction predictors, averaging is done only across models that contain the main effect from which the interaction predictor is comprised (see explanation for why you might want to do this here).

> # Inclusion Bayes Factors (Model Averaged)
> 
>                      Pr(prior) Pr(posterior) Inclusion BF
> Petal.Length               0.4          0.44     1.27e+26
> Species                    0.4          0.44     2.07e+11
> Petal.Length:Species       0.2          0.56         1.28
> 
> * Compared among: matched models only
> *    Priors odds: uniform-equal

Comparison with JASP

bayesfactor_inclusion() is meant to provide Bayes Factors per predictor, similar to JASP’s Effects option. Let’s compare the two:

  1. Across all models:
> # Inclusion Bayes Factors (Model Averaged)
> 
>           Pr(prior) Pr(posterior) Inclusion BF
> supp            0.6          1.00       140.99
> dose            0.6          1.00     3.21e+14
> dose:supp       0.2          0.72        10.12
> 
> * Compared among: all models
> *    Priors odds: uniform-equal

  1. Across matched models:
> # Inclusion Bayes Factors (Model Averaged)
> 
>           Pr(prior) Pr(posterior) Inclusion BF
> supp            0.4          0.28        59.19
> dose            0.4          0.28     1.36e+14
> dose:supp       0.2          0.72         2.57
> 
> * Compared among: matched models only
> *    Priors odds: uniform-equal

  1. With Nuisance Effects:

We’ll add dose to the null model in JASP, and do the same in R:

> Bayes factor analysis
> --------------
> [1] supp + dose             : 59  ±4.5%
> [2] supp + dose + supp:dose : 152 ±1.5%
> 
> Against denominator:
>   len ~ dose 
> ---
> Bayes factor type: BFlinearModel, JZS
> # Inclusion Bayes Factors (Model Averaged)
> 
>           Pr(prior) Pr(posterior) Inclusion BF
> dose           1.00          1.00          NaN
> supp           0.67          1.00       105.74
> dose:supp      0.33          0.72         5.06
> 
> * Compared among: all models
> *    Priors odds: uniform-equal

Averaging posteriors

Similar to how we can average evidence for a predictor across models, we can also average the posterior estimate across models. One situation in which this is useful in situations where Bayes factors seem to support a null effect, yet the HDI of the alternative excludes the null value (also see si() described above). For example, looking at Motor Trend Car Road Tests (data(mtcars)), we would naturally predict miles/gallon (mpg) from transition type (am) and weight (wt), but what about number of carburetors (carb)? Is this a good predictor?

We can determine this by comparing the following models:

> # Bayes Factors for Model Comparison
> 
>   Model                BF
>   [1] wt + am + carb 0.81
> 
> * Against Denominator: [2] wt + am
> *   Bayes Factor Type: marginal likelihoods (bridgesampling)

It seems that the model without carb as a predictor is (1/BF=1.2) times more likely than the model with carb as a predictor. We might then assume that in the latter model, the HDI will include the point-null value of 0 effect, to also indicate the credibility of the null in the posterior. However, this is not the case:

> # Highest Density Interval
> 
> Parameter   |        95% HDI
> ----------------------------
> (Intercept) | [27.95, 39.89]
> wt          | [-5.59, -1.89]
> am          | [-0.83,  5.91]
> carb        | [-1.97, -0.31]

How can this be? By estimating the HDI of the effect for carb in the full model, we are acting under the assumption that that model is correct. However, as we’ve just seen, both models are practically tied, and in fact it was the no-carb model, in which the effect for carb is fixed at 0, that was slightly more supported by the data. If this is the case why limit our estimation of the effect just to one model? (Bergh, Haaf, Ly, Rouder, & Wagenmakers, 2019).

Using Bayesian model averaging, we can combine the posteriors samples from several models, weighted by the models’ marginal likelihood (done via the bayesfactor_models() function). If some parameter is part of some of the models but is missing from others, it is assumed to be fixed a 0 (which can also be seen as a method of applying shrinkage to our estimates). This results in a posterior distribution across several models, which we can now treat like any posterior distribution, and estimate the HDI. We can do this with the weighted_posteriors() function:

> # Highest Density Interval
> 
> Parameter   |        95% HDI
> ----------------------------
> (Intercept) | [28.81, 42.03]
> wt          | [-6.62, -2.14]
> am          | [-2.53,  5.51]
> carb        | [-1.71,  0.00]

We can see that across both models under consideration, the posterior of the carb effect is almost equally weighted between the alternative model and the null model - as represented by about half of the posterior mass concentrated at 0 - which makes sense as both models were almost equally supported by the data. We can also see that across both models, that now the HDI does contain 0. Thus we have resolved the conflict between the Bayes factor and the HDI (Rouder, Haaf, & Vandekerckhove, 2018)!

Note that parameters might play different roles across different models; For example, the parameter A plays a different role in the model Y ~ A + B (where it is a main effect) than it does in the model Y ~ A + B + A:B (where it is a simple effect). In many cases centering of predictors (mean subtracting for continuous variables, and effects coding via contr.sum or orthonormal coding via contr.bayes for factors) can in some cases reduce this issue.

Appendices

Testing contrasts (with emmeans / modelbased)

Besides testing parameter bayesfactor_parameters() can be used to test any estimate based on the prior and posterior distribution of the estimate. One way to achieve this is with a mix of bayesfactor_parameters() + emmeans to test Bayesian contrasts.

For example, in the sleep example from above, we can estimate the group means and the difference between them:

>  group contrast emmean lower.HPD upper.HPD
>  1     .          0.82      -0.4       2.1
>  2     .          2.28       1.1       3.5
>  .     1 - 2     -1.46      -3.2       0.2
> 
> Point estimate displayed: median 
> HPD interval probability: 0.95
> # Bayes Factor (Savage-Dickey density ratio)
> 
> Parameter |    BF
> -----------------
> 1, .      |  0.08
> 2, .      | 12.21
> ., 1 - 2  |  1.48
> 
> * Evidence Against The Null: [0]

That is strong evidence for the mean of group 1 being 0, and for group 2 for not being 0, but hardly any evidence for the difference between them being not 0. Conflict? Uncertainty? That is the Bayesian way!

We can also use the easystatsmodelbased package to compute Bayes factors for contrasts:

> Level1 | Level2 | Median |         89% CI |   BF | Median (std.)
> ----------------------------------------------------------------
> 1      |      2 |  -1.46 | [-2.86, -0.13] | 1.48 |         -0.72

NOTE: See the Specifying Correct Priors for Factors with More Than 2 Levels section below.

Specifying correct priors for factors

This section introduces the biased priors obtained when using the common effects factor coding (contr.sum) or dummy factor coding (contr.treatment), and the solution of using orthonormal factor coding (contr.bayes) (as outlined in Rouder, Morey, Speckman, & Province, 2012, sec. 7.2). Specifically, special care should be taken when working with factors which have 3 or more levels.

Contrasts (and marginal means)

The effects factor coding commonly used in factorial analysis carries a hidden bias when it is applies to Bayesian priors. For example, if we want to test all pairwise differences between 3 levels of the same factor, we would expect all a priori differences to have the same distribution, but…

For our example, we will be test all prior pairwise differences between the 3 species in the iris data-set.

>  contrast               estimate lower.HPD upper.HPD
>  setosa - versicolor      -0.004      -2.7       2.8
>  setosa - virginica        0.086      -4.4       4.5
>  versicolor - virginica    0.090      -4.4       4.6
> 
> Point estimate displayed: median 
> HPD interval probability: 0.95

We can see that the though the prior estimate for all 3 pairwise contrasts is ~0, the scale / HDI is much more narrow for the prior of the setosa - versicolor contrast!

What happened???
This is caused by an inherent bias in the priors introduced by the effects coding (it’s even worse with the default treatment coding, because the prior for the intercept is usually drastically different from the effect’s parameters). And since it affects the priors, this bias will also bias the the Bayes factors over / understating evidence for some contrasts over others!

The solution is to use orthonormal factor coding, a-la contr.bayes, which can either specify this factor coding per-factor:

Or you can set it globally:

Let’s again estimate the prior differences:

>  contrast               estimate lower.HPD upper.HPD
>  setosa - versicolor       0.004      -2.8      2.71
>  setosa - virginica        0.027      -2.6      2.84
>  versicolor - virginica    0.057      -2.6      2.95
> 
> Point estimate displayed: median 
> HPD interval probability: 0.95

We can see that using this coding scheme, we have equal priors on all pairwise contrasts.

Order restrictions

This bias also affect order restrictions involving 3 or more levels. For example, if we want to test an order restriction among A, B, and C, the a priori probability of obtaining the order A > C > B is 1/6 (reach back to intro to stats year 1), but…

For our example, we will be interested in the following order restrictions in the iris data-set (each line is a separate restriction):

With the default factor coding, this looks like this:

> # Bayes Factor (Order-Restriction)
> 
>                                        Hypothesis P(Prior) P(Posterior)
>                               setosa < versicolor     0.49            1
>                                setosa < virginica     0.50            1
>                            versicolor < virginica     0.51            1
>  setosa    < virginica  & virginica  < versicolor     0.10            0
>  virginica < setosa     & setosa     < versicolor     0.19            0
>   setosa    < versicolor & versicolor < virginica     0.20            1
>        BF
>      2.02
>      1.99
>      1.95
>  0.00e+00
>  0.00e+00
>      5.04
> 
> * Bayes factors for the restricted model vs. the un-restricted model.

What happened???

  1. The comparison of 2 levels all have a prior of ~0.5, as expected.
  2. The comparison of 3 levels has different priors, depending on the order restriction - i.e. some orders are a priori more likely than others!!!

Again, this is solved by using the orthonormal factor coding (from above).

> # Bayes Factor (Order-Restriction)
> 
>                                        Hypothesis P(Prior) P(Posterior)
>                               setosa < versicolor     0.50            1
>                                setosa < virginica     0.51            1
>                            versicolor < virginica     0.50            1
>  setosa    < virginica  & virginica  < versicolor     0.17            0
>  virginica < setosa     & setosa     < versicolor     0.16            0
>   setosa    < versicolor & versicolor < virginica     0.16            1
>        BF
>      2.02
>      1.96
>      1.99
>  0.00e+00
>  0.00e+00
>      6.13
> 
> * Bayes factors for the restricted model vs. the un-restricted model.

Conclusion

When comparing the results from the two factor coding schemes, we find:
1. In both cases, the estimated (posterior) means are quite similar (if not identical).
2. The priors and Bayes factors differ between the two schemes.
3. Only with contr.bayes, the prior distribution of the difference or the order of 3 (or more) means is balanced.

References

Bergh, D. van den, Haaf, J. M., Ly, A., Rouder, J. N., & Wagenmakers, E.-J. (2019). A cautionary note on estimating effect size.

Clyde, M. A., Ghosh, J., & Littman, M. L. (2011). Bayesian adaptive sampling for variable selection and model averaging. Journal of Computational and Graphical Statistics, 20(1), 80–101.

Kruschke, J. K. (2010). What to believe: Bayesian methods for data analysis. Trends in Cognitive Sciences, 14(7), 293–300.

Morey, R. D. (2015). Multiple comparisons with bayesfactor, part 2 – order restrictions. Retrieved from http://bayesfactor.blogspot.com/2015/01/multiple-comparisons-with-bayesfactor-2.html

Morey, R. D., & Rouder, J. N. (2011). Bayes factor approaches for testing interval null hypotheses. Psychological Methods, 16(4), 406.

Morey, R. D., & Wagenmakers, E.-J. (2014). Simple relation between bayesian order-restricted and point-null hypothesis tests. Statistics & Probability Letters, 92, 121–124.

Rouder, J. N., Haaf, J. M., & Vandekerckhove, J. (2018). Bayesian inference for psychology, part iv: Parameter estimation and bayes factors. Psychonomic Bulletin & Review, 25(1), 102–113.

Rouder, J. N., Morey, R. D., Speckman, P. L., & Province, J. M. (2012). Default bayes factors for anova designs. Journal of Mathematical Psychology, 56(5), 356–374.

Wagenmakers, E.-J. (2007). A practical solution to the pervasive problems ofp values. Psychonomic Bulletin & Review, 14(5), 779–804.

Wagenmakers, E.-J., Gronau, Q. F., Dablander, F., & Etz, A. (2018). The support interval. https://doi.org/10.31234/osf.io/zwnxb

Wagenmakers, E.-J., Lodewyckx, T., Kuriyal, H., & Grasman, R. (2010). Bayesian hypothesis testing for psychologists: A tutorial on the savage–dickey method. Cognitive Psychology, 60(3), 158–189.

  1. Note that as the width of null interval shrinks to zero, the prior probability and posterior probability of the alternative tends towards 1.00.

  2. Robert, 2016; Kass & Raftery, 1993; Fernández, Ley, & Steel, 2001

  3. Gronau, Wagenmakers, Heck, & Matzke, 2019

  4. A model without predictor (X) can be thought of as a model in which the parameter(s) of the predictor have been restricted to a null-point of 0.

bayestestR/inst/doc/example1.R0000644000176200001440000001431213620150406015756 0ustar liggesusers## ----message=FALSE, warning=FALSE, include=FALSE------------------------------ library(knitr) library(insight) options(knitr.kable.NA = '') knitr::opts_chunk$set(comment=">") options(digits=2) set.seed(333) if (!requireNamespace("rstanarm", quietly = TRUE) || !requireNamespace("dplyr", quietly = TRUE) || !requireNamespace("ggplot2", quietly = TRUE)) { knitr::opts_chunk$set(eval = FALSE) } format_percent <- function(x, digits = 0, ...) { paste0(format_value(x*100, digits = digits, ...), "%") } ## ----message=FALSE, warning=FALSE--------------------------------------------- library(rstanarm) library(bayestestR) library(insight) ## ----message=FALSE, warning=FALSE--------------------------------------------- model <- lm(Sepal.Length ~ Petal.Length, data=iris) summary(model) ## ----message=FALSE, warning=FALSE--------------------------------------------- library(ggplot2) # Load the package # The ggplot function takes the data as argument, and then the variables # related to aesthetic features such as the x and y axes. ggplot(iris, aes(x=Petal.Length, y=Sepal.Length)) + geom_point() + # This adds the points geom_smooth(method="lm") # This adds a regression line ## ----message=FALSE, warning=FALSE, eval=FALSE--------------------------------- # model <- stan_glm(Sepal.Length ~ Petal.Length, data=iris) ## ----echo=FALSE, message=FALSE, warning=FALSE, comment=NA, results='hide'----- library(rstanarm) set.seed(333) model <- stan_glm(Sepal.Length ~ Petal.Length, data=iris) ## ----message=FALSE, warning=FALSE, eval=FALSE--------------------------------- # posteriors <- insight::get_parameters(model) # # head(posteriors) # Show the first 6 rows ## ----message=FALSE, warning=FALSE, echo=FALSE--------------------------------- posteriors <- insight::get_parameters(model) head(posteriors) # Show the first 6 rows ## ----message=FALSE, warning=FALSE--------------------------------------------- nrow(posteriors) # Size (number of rows) ## ----message=FALSE, warning=FALSE, eval=FALSE--------------------------------- # model <- stan_glm(Sepal.Length ~ Petal.Length, data=iris, chains = 2, iter = 1000, warmup = 250) # # nrow(insight::get_parameters(model)) # Size (number of rows) ## ----echo=FALSE, message=FALSE, warning=FALSE, comment=NA, echo=FALSE--------- model <- stan_glm(Sepal.Length ~ Petal.Length, data=iris, chains = 2, iter = 1000, warmup = 250, refresh = 0) nrow(insight::get_parameters(model)) # Size (number of rows) ## ----message=FALSE, warning=FALSE--------------------------------------------- ggplot(posteriors, aes(x = Petal.Length)) + geom_density(fill = "orange") ## ----message=FALSE, warning=FALSE--------------------------------------------- mean(posteriors$Petal.Length) ## ----message=FALSE, warning=FALSE--------------------------------------------- median(posteriors$Petal.Length) ## ----message=FALSE, warning=FALSE--------------------------------------------- map_estimate(posteriors$Petal.Length) ## ----message=FALSE, warning=FALSE--------------------------------------------- ggplot(posteriors, aes(x = Petal.Length)) + geom_density(fill = "orange") + # The mean in blue geom_vline(xintercept=mean(posteriors$Petal.Length), color="blue", size=1) + # The median in red geom_vline(xintercept=median(posteriors$Petal.Length), color="red", size=1) + # The MAP in purple geom_vline(xintercept=map_estimate(posteriors$Petal.Length), color="purple", size=1) ## ----message=FALSE, warning=FALSE--------------------------------------------- range(posteriors$Petal.Length) ## ----message=FALSE, warning=FALSE--------------------------------------------- hdi(posteriors$Petal.Length, ci=0.89) ## ----message=FALSE, warning=FALSE--------------------------------------------- library(dplyr) # We keep only rows for which feed is meatmeal or sunflower data <- chickwts %>% filter(feed %in% c("meatmeal", "sunflower")) ## ----message=FALSE, warning=FALSE, eval=FALSE--------------------------------- # model <- stan_glm(weight ~ feed, data=data) ## ----echo=FALSE, message=FALSE, warning=FALSE, comment=NA, results='hide'----- model <- stan_glm(weight ~ feed, data=data) ## ----message=FALSE, warning=FALSE--------------------------------------------- posteriors <- insight::get_parameters(model) ggplot(posteriors, aes(x=feedsunflower)) + geom_density(fill = "red") ## ----message=FALSE, warning=FALSE--------------------------------------------- median(posteriors$feedsunflower) hdi(posteriors$feedsunflower) ## ----message=FALSE, warning=FALSE--------------------------------------------- rope(posteriors$feedsunflower, range = c(-20, 20), ci=0.89) ## ----echo=FALSE, fig.cap="Prof. Sanders giving default values to define the Region of Practical Equivalence (ROPE).", fig.align='center', out.width="75%"---- knitr::include_graphics("https://github.com/easystats/easystats/raw/master/man/figures/bayestestR/profsanders.png") ## ----message=FALSE, warning=FALSE--------------------------------------------- rope_value <- 0.1 * sd(data$weight) rope_range <- c(-rope_value, rope_value) rope_range ## ----message=FALSE, warning=FALSE--------------------------------------------- rope_value <- rope_range(model) rope_range ## ----message=FALSE, warning=FALSE--------------------------------------------- rope(posteriors$feedsunflower, range = rope_range, ci=0.89) ## ----message=FALSE, warning=FALSE--------------------------------------------- n_positive <- posteriors %>% filter(feedsunflower > 0) %>% # select only positive values nrow() # Get length n_positive / nrow(posteriors) * 100 ## ----message=FALSE, warning=FALSE--------------------------------------------- p_direction(posteriors$feedsunflower) ## ----message=FALSE, warning=FALSE, eval=TRUE---------------------------------- pd <- 97.82 onesided_p <- 1 - pd / 100 twosided_p <- onesided_p * 2 twosided_p ## ----message=FALSE, warning=FALSE--------------------------------------------- lm(weight ~ feed, data=data) %>% summary() ## ----message=FALSE, warning=FALSE--------------------------------------------- describe_posterior(model, test = c("p_direction","rope","bayesfactor")) bayestestR/inst/CITATION0000644000176200001440000000142213541565014014514 0ustar liggesusersbibentry( bibtype="Article", title="bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework.", author=c(person("Dominique", "Makowski"), person("Mattan S.", "Ben-Shachar"), person("Daniel", "Lüdecke")), journal="Journal of Open Source Software", doi="10.21105/joss.01541", year="2019", number = "40", volume = "4", pages = "1541", url="https://joss.theoj.org/papers/10.21105/joss.01541", textVersion = "Makowski, D., Ben-Shachar, M., & Lüdecke, D. (2019). bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework. Journal of Open Source Software, 4(40), 1541. doi:10.21105/joss.01541", mheader = "To cite bayestestR in publications use:" )