bayestestR/0000755000176200001440000000000015054341320012374 5ustar liggesusersbayestestR/tests/0000755000176200001440000000000014542333405013544 5ustar liggesusersbayestestR/tests/testthat/0000755000176200001440000000000015054341317015404 5ustar liggesusersbayestestR/tests/testthat/test-p_direction.R0000644000176200001440000000524615005147105021005 0ustar liggesuserstest_that("p_direction", { set.seed(333) x <- distribution_normal(10000, 1, 1) pd <- p_direction(x) expect_equal(as.numeric(pd), 0.842, tolerance = 0.1) # converstion into frequentist p-value works p <- p_direction(x, as_p = TRUE) expect_equal(as.numeric(p), pd_to_p(pd$pd), tolerance = 0.1) expect_equal(as.vector(p), pd_to_p(pd$pd), tolerance = 0.1) # return NA expect_true(is.na(as.numeric(p_direction(c(x, NA), remove_na = FALSE)))) # works expect_equal(as.numeric(p_direction(c(x, NA))), 0.8413, tolerance = 0.1) expect_equal(as.vector(p_direction(c(x, NA))), 0.8413, tolerance = 0.1) # error if only NA expect_error(p_direction(c(NA_real_, NA_real_)), regex = "No valid values found") expect_equal(as.numeric(p_direction(x, method = "kernel")), 0.842, tolerance = 0.1) expect_s3_class(pd, "p_direction") expect_s3_class(pd, "data.frame") expect_identical(dim(pd), c(1L, 2L)) expect_identical( capture.output(print(pd)), c( "Probability of Direction", "", "Parameter | pd", "------------------", "Posterior | 84.13%" ) ) df <- data.frame(replicate(4, rnorm(100))) pd <- p_direction(df) expect_s3_class(pd, "p_direction") expect_s3_class(pd, "data.frame") expect_identical(dim(pd), c(4L, 2L)) }) test_that("p_direction", { skip_if_not_installed("curl") skip_if_offline() skip_if_not_installed("httr2") skip_if_not_or_load_if_installed("rstanarm") skip_if_not_or_load_if_installed("brms") m <- insight::download_model("stanreg_merMod_5") p <- insight::get_parameters(m, effects = "all") expect_equal( p_direction(m, effects = "all")$pd, p_direction(p)$pd, tolerance = 1e-3 ) # converstion into frequentist p-value works expect_equal( p_direction(m, effects = "all", as_p = TRUE)$p, pd_to_p(p_direction(m, effects = "all")$pd), tolerance = 1e-3 ) expect_equal( p_direction(m, effects = "all", as_p = TRUE)$p, as.numeric(p_direction(m, effects = "all", as_p = TRUE)), tolerance = 1e-3 ) expect_equal( p_direction(m, effects = "all", as_p = TRUE)$p, as.vector(p_direction(m, effects = "all", as_p = TRUE)), tolerance = 1e-3 ) }) test_that("p_direction", { skip_if_not_installed("curl") skip_if_offline() skip_if_not_installed("httr2") skip_if_not_or_load_if_installed("rstanarm") skip_if_not_or_load_if_installed("brms") m <- insight::download_model("brms_zi_3") p <- insight::get_parameters(m, effects = "all", component = "all") expect_equal( p_direction(m, effects = "all", component = "all")$pd, p_direction(p)$pd, tolerance = 1e-3 ) }) bayestestR/tests/testthat/test-p_to_bf.R0000644000176200001440000000066114542333405020117 0ustar liggesuserstest_that("p_to_bf works", { skip_if_not_or_load_if_installed("parameters") m <- lm(mpg ~ hp + cyl + am, data = mtcars) p <- coef(summary(m))[-1, 4] # BF by hand bfs <- 3 * p * sqrt(insight::n_obs(m)) expect_equal(p_to_bf(m, log = FALSE)[-1, ]$BF, exp(-log(bfs)), tolerance = 1e-4, ignore_attr = TRUE) expect_equal(p_to_bf(m, log = TRUE)[-1, ]$log_BF, -log(bfs), tolerance = 1e-4, ignore_attr = TRUE) }) bayestestR/tests/testthat/test-map_estimate.R0000644000176200001440000000544615005147105021160 0ustar liggesusers# numeric ---------------------- test_that("map_estimate", { x <- distribution_normal(1000, 1) MAP <- map_estimate(x) expect_equal(as.numeric(MAP), 0.997, tolerance = 0.001, ignore_attr = TRUE) expect_s3_class(MAP, "map_estimate") expect_s3_class(MAP, "data.frame") expect_identical(dim(MAP), c(1L, 2L)) expect_identical( capture.output(print(MAP)), c( "MAP Estimate", "", "Parameter | MAP_Estimate", "------------------------", "x | 1.00" ) ) }) # stanreg ---------------------- test_that("map_estimate", { skip_if_not_installed("curl") skip_if_offline() skip_if_not_installed("httr2") skip_if_not_or_load_if_installed("rstanarm") skip_if_not_or_load_if_installed("BayesFactor") m <- insight::download_model("stanreg_merMod_5") skip_if(is.null(m)) expect_identical( map_estimate(m, effects = "all")$Parameter, colnames(as.data.frame(m))[c(1:5, 21)] ) expect_identical( map_estimate(m, effects = "full")$Parameter, colnames(as.data.frame(m))[1:21] ) }) # brms ---------------------- test_that("map_estimate", { skip_if_not_installed("curl") skip_if_offline() skip_if_not_installed("httr2") skip_if_not_or_load_if_installed("rstanarm") skip_if_not_or_load_if_installed("BayesFactor") m <- insight::download_model("brms_zi_3") skip_if(is.null(m)) expect_identical( map_estimate(m, effects = "all", component = "all")$Parameter, c( "b_Intercept", "b_child", "b_camper", "sd_persons__Intercept", "b_zi_Intercept", "b_zi_child", "b_zi_camper", "sd_persons__zi_Intercept" ) ) expect_identical( map_estimate(m, effects = "full", component = "all")$Parameter, c( "b_Intercept", "b_child", "b_camper", "r_persons[1,Intercept]", "r_persons[2,Intercept]", "r_persons[3,Intercept]", "r_persons[4,Intercept]", "sd_persons__Intercept", "b_zi_Intercept", "b_zi_child", "b_zi_camper", "r_persons__zi[1,Intercept]", "r_persons__zi[2,Intercept]", "r_persons__zi[3,Intercept]", "r_persons__zi[4,Intercept]", "sd_persons__zi_Intercept" ) ) m <- correlationBF(y = iris$Sepal.Length, x = iris$Sepal.Width) expect_error(map_estimate(m)) }) # edge cases test_that("map_estimate, constant vectors or sparse samples", { x <- c(2.5, 2.5, 2.5, 2.5, 2.5, 2.5, 2.2, 2.2, 2.2, 2.5, 2.5, 2.5, 2.5, 2.5, 2.5) out <- map_estimate(x, verbose = FALSE) expect_true(is.na(out$MAP_Estimate)) out <- map_estimate(c(3, 3, 3), verbose = FALSE) expect_identical(out$MAP_Estimate, 3) expect_message( map_estimate(x, verbose = TRUE), regex = "Could not calculate MAP estimate" ) expect_message( map_estimate(c(3, 3, 3), verbose = TRUE), regex = "Data is singular" ) }) bayestestR/tests/testthat/test-print.R0000644000176200001440000000071515005147105017636 0ustar liggesuserstest_that("print.describe_posterior", { skip_on_cran() skip_if_not_installed("curl") skip_if_offline() skip_if_not_installed("httr2") skip_if_not_or_load_if_installed("brms") m <- insight::download_model("brms_zi_3") skip_if(is.null(m)) expect_snapshot(describe_posterior(m, verbose = FALSE), variant = "windows") expect_snapshot(describe_posterior(m, effects = "all", component = "all", verbose = FALSE), variant = "windows") }) bayestestR/tests/testthat/test-format.R0000644000176200001440000000343414542333405020000 0ustar liggesuserstest_that("p_significance", { set.seed(333) x <- rnorm(100) expect_equal( format(point_estimate(x)), data.frame(Median = "0.05", Mean = "-0.02", MAP = "0.13", stringsAsFactors = FALSE), ignore_attr = TRUE ) expect_equal( format(ci(x)), data.frame(`95% CI` = "[-1.93, 1.77]", stringsAsFactors = FALSE), ignore_attr = TRUE ) expect_equal( format(p_rope(x)), data.frame(ROPE = "[-0.10, 0.10]", `p (ROPE)` = "0.100", stringsAsFactors = FALSE), ignore_attr = TRUE ) expect_equal( format(map_estimate(x)), data.frame(Parameter = "x", MAP_Estimate = "0.13", stringsAsFactors = FALSE), ignore_attr = TRUE ) expect_equal( format(p_direction(x)), data.frame(Parameter = "Posterior", pd = "51.00%", stringsAsFactors = FALSE), ignore_attr = TRUE ) expect_equal( format(p_map(x)), data.frame(Parameter = "Posterior", p_MAP = "0.973", stringsAsFactors = FALSE), ignore_attr = TRUE ) expect_equal( format(p_significance(x)), data.frame(Parameter = "Posterior", ps = "0.46", stringsAsFactors = FALSE), ignore_attr = TRUE ) expect_equal( format(rope(x)), data.frame(CI = "0.95", ROPE = "[-0.10, 0.10]", `% in ROPE` = "10.64%", stringsAsFactors = FALSE), ignore_attr = TRUE ) expect_equal( format(equivalence_test(x)), data.frame( CI = "0.95", ROPE = "[-0.10, 0.10]", `% in ROPE` = "10.64%", `Equivalence (ROPE)` = "Undecided", HDI_low = "-1.93", HDI_high = "1.77", stringsAsFactors = FALSE ), ignore_attr = TRUE ) skip_if_not_installed("logspline") expect_equal( format(bayesfactor_parameters(x, verbose = FALSE)), data.frame(BF = "1.00", stringsAsFactors = FALSE), ignore_attr = TRUE ) }) bayestestR/tests/testthat/test-hdi.R0000644000176200001440000000517515005147105017253 0ustar liggesusers# numeric ------------------------------- test_that("hdi", { skip_if_not_installed("curl") skip_if_offline() skip_if_not_installed("httr2") skip_if_not_or_load_if_installed("rstanarm") skip_if_not_or_load_if_installed("brms") skip_if_not_or_load_if_installed("BayesFactor") expect_equal(hdi(distribution_normal(1000), ci = 0.90)$CI_low[1], -1.64, tolerance = 0.02) expect_equal(nrow(hdi(distribution_normal(1000), ci = c(0.80, 0.90, 0.95))), 3, tolerance = 0.01) expect_equal(hdi(distribution_normal(1000), ci = 1)$CI_low[1], -3.29, tolerance = 0.02) expect_identical(nchar(capture.output(print(hdi(distribution_normal(1000))))), 22L) expect_length(capture.output(print(hdi(distribution_normal(1000), ci = c(0.80, 0.90)))), 5) expect_message(hdi(c(2, 3, NA))) expect_warning(hdi(c(2, 3))) expect_message(hdi(distribution_normal(1000), ci = 0.0000001)) expect_warning(hdi(distribution_normal(1000), ci = 950)) expect_message(hdi(c(0, 0, 0))) }) # stanreg --------------------------- test_that("ci", { skip_if_not_installed("curl") skip_if_offline() skip_if_not_installed("httr2") skip_if_not_or_load_if_installed("rstanarm") skip_if_not_or_load_if_installed("brms") skip_if_not_or_load_if_installed("BayesFactor") m <- insight::download_model("stanreg_merMod_5") p <- insight::get_parameters(m, effects = "all") expect_equal( hdi(m, ci = c(0.5, 0.8), effects = "all")$CI_low, hdi(p, ci = c(0.5, 0.8))$CI_low, tolerance = 1e-3 ) }) # brms --------------------------- test_that("rope", { skip_if_not_installed("curl") skip_if_offline() skip_if_not_installed("httr2") skip_if_not_or_load_if_installed("rstanarm") skip_if_not_or_load_if_installed("brms") skip_if_not_or_load_if_installed("BayesFactor") m <- insight::download_model("brms_zi_3") p <- insight::get_parameters(m, effects = "all", component = "all") expect_equal( hdi(m, ci = c(0.5, 0.8), effects = "all", component = "all")$CI_low, hdi(p, ci = c(0.5, 0.8))$CI_low, tolerance = 1e-3 ) }) # BayesFactor --------------------------- test_that("ci - BayesFactor", { skip_if_not_installed("curl") skip_if_offline() skip_if_not_installed("httr2") skip_if_not_or_load_if_installed("rstanarm") skip_if_not_or_load_if_installed("brms") skip_if_not_or_load_if_installed("BayesFactor") mod_bf <- proportionBF(y = 15, N = 25, p = 0.5) p_bf <- insight::get_parameters(mod_bf) expect_equal( hdi(mod_bf, ci = c(0.5, 0.8), effects = "all", component = "all")$CI_low, hdi(p_bf, ci = c(0.5, 0.8))$CI_low, tolerance = 0.1 ) }) bayestestR/tests/testthat/test-equivalence_test.R0000644000176200001440000000333115005147105022037 0ustar liggesusersskip_on_cran() test_that("equivalence test, rstanarm", { skip_if_not_installed("curl") skip_if_offline() skip_if_not_installed("httr2") skip_if_not_or_load_if_installed("rstanarm") m <- insight::download_model("stanreg_merMod_5") out <- equivalence_test(m, verbose = FALSE) expect_snapshot(print(out)) out <- equivalence_test( m, range = list(c(-1, 1), "default", c(0, 2), c(-2, 0), "default"), verbose = FALSE ) expect_snapshot(print(out)) expect_error( equivalence_test( m, range = list(c(-1, 1), "default", c(0, 2), c(-2, 0)), verbose = FALSE ), regex = "Length of" ) expect_error( equivalence_test( m, range = list(c(-1, 1), "default", c(0, 2), c(-2, 0), "a"), verbose = FALSE ), regex = "should be 'default'" ) }) test_that("equivalence test, df", { skip_if_not_installed("curl") skip_if_offline() skip_if_not_installed("httr2") skip_if_not_or_load_if_installed("rstanarm") m <- insight::download_model("stanreg_merMod_5") params <- as.data.frame(m)[1:5] out <- equivalence_test(params, verbose = FALSE) expect_snapshot(print(out)) out <- equivalence_test( params, range = list(c(-1, 1), "default", c(0, 2), c(-2, 0), "default"), verbose = FALSE ) expect_snapshot(print(out)) expect_error( equivalence_test( params, range = list(c(-1, 1), "default", c(0, 2), c(-2, 0)), verbose = FALSE ), regex = "Length of" ) expect_error( equivalence_test( params, range = list(c(-1, 1), "default", c(0, 2), c(-2, 0), "a"), verbose = FALSE ), regex = "should be 'default'" ) }) bayestestR/tests/testthat/test-describe_prior.R0000644000176200001440000001046615005147105021501 0ustar liggesuserstest_that("describe_prior", { skip_if_not_installed("curl") skip_if_offline() skip_if_not_installed("httr2") skip_if_not_or_load_if_installed("rstanarm") skip_if_not_or_load_if_installed("brms") skip_if_not_or_load_if_installed("BayesFactor") skip_on_os("linux") # Bayes Factor ---------------------------------------- expect_equal( describe_prior(correlationBF(mtcars$wt, mtcars$mpg, rscale = 0.5)), structure(list( Parameter = "rho", Prior_Distribution = "beta", Prior_Location = 2, Prior_Scale = 2 ), class = "data.frame", row.names = c( NA, -1L )) ) expect_equal( describe_prior(ttestBF(mtcars$wt, mu = 3)), structure(list( Parameter = "Difference", Prior_Distribution = "cauchy", Prior_Location = 0, Prior_Scale = 0.707106781186548 ), class = "data.frame", row.names = c( NA, -1L )) ) expect_equal( describe_prior(contingencyTableBF( x = table(mtcars$am, mtcars$cyl), sampleType = "poisson" )), structure(list( Parameter = "Ratio", Prior_Distribution = "poisson", Prior_Location = 0, Prior_Scale = 1 ), class = "data.frame", row.names = c( NA, -1L )) ) expect_equal( describe_prior(contingencyTableBF( x = table(mtcars$am, mtcars$cyl), sampleType = "indepMulti", fixedMargin = "cols", priorConcentration = 1.6 )), structure(list( Parameter = "Ratio", Prior_Distribution = "independent multinomial", Prior_Location = 0, Prior_Scale = 1.6 ), class = "data.frame", row.names = c( NA, -1L )) ) expect_equal( describe_prior(anovaBF(extra ~ group, data = sleep, progress = FALSE)), structure(list(Parameter = c( "group-1", "group-2", "mu", "sig2", "g_group" ), Prior_Distribution = c( "cauchy", "cauchy", NA, NA, NA ), Prior_Location = c(0, 0, NA, NA, NA), Prior_Scale = c( 0.5, 0.5, NA, NA, NA )), row.names = c(NA, -5L), class = "data.frame") ) # brms ---------------------------------------- mod_brms <- insight::download_model("brms_1") expect_equal( describe_prior(mod_brms), structure( list( Parameter = c("b_Intercept", "b_wt", "b_cyl", "sigma"), Prior_Distribution = c("student_t", "uniform", "uniform", "student_t"), Prior_Location = c(19.2, NA, NA, 0), Prior_Scale = c(5.4, NA, NA, 5.4), Prior_df = c(3, NA, NA, 3) ), row.names = c(NA, -4L), class = "data.frame", priors = structure( list( prior = c( "(flat)", "(flat)", "(flat)", "student_t(3, 19.2, 5.4)", "student_t(3, 0, 5.4)" ), class = c("b", "b", "b", "Intercept", "sigma"), coef = c("", "cyl", "wt", "", ""), group = c("", "", "", "", ""), resp = c("", "", "", "", ""), dpar = c("", "", "", "", ""), nlpar = c("", "", "", "", ""), bound = c("", "", "", "", ""), source = c( "(unknown)", "(vectorized)", "(vectorized)", "(unknown)", "(unknown)" ), Parameter = c("b_", "b_cyl", "b_wt", "b_Intercept", "sigma") ), special = list(mu = list()), row.names = c(NA, -5L), sample_prior = "no", class = "data.frame" ) ), ignore_attr = TRUE, tolerance = 1e-2 ) # stanreg ---------------------------------------- mod_stanreg1 <- insight::download_model("stanreg_gamm4_1") mod_stanreg2 <- insight::download_model("stanreg_merMod_1") expect_equal( describe_prior(mod_stanreg1), structure(list( Parameter = "(Intercept)", Prior_Distribution = "normal", Prior_Location = 3.05733333333333, Prior_Scale = 1.08966571234175 ), row.names = c( NA, -1L ), class = "data.frame") ) expect_equal( describe_prior(mod_stanreg2), structure( list( Parameter = c("(Intercept)", "cyl"), Prior_Distribution = c( "normal", "normal" ), Prior_Location = c(0, 0), Prior_Scale = c(2.5, 1.39983744766986) ), row.names = c(NA, -2L), class = "data.frame" ) ) }) bayestestR/tests/testthat/test-p_map.R0000644000176200001440000000345715005147105017604 0ustar liggesuserstest_that("p_map", { x <- distribution_normal(1000, 0.4) pmap <- p_map(x) expect_equal(as.numeric(pmap), 0.9285376, tolerance = 0.001) expect_s3_class(pmap, "p_map") expect_s3_class(pmap, "data.frame") expect_identical(dim(pmap), c(1L, 2L)) expect_identical( capture.output(print(pmap)), c( "MAP-based p-value", "", "Parameter | p (MAP)", "-------------------", "Posterior | 0.929" ) ) expect_equal(as.numeric(p_map(distribution_normal(1000))), 1, tolerance = 0.1) expect_equal(as.numeric(p_map(distribution_normal(1000, 1, 1))), 0.62, tolerance = 0.1) expect_equal(as.numeric(p_map(distribution_normal(1000, 2, 1))), 0.15, tolerance = 0.1) expect_equal(as.numeric(p_map(distribution_normal(1000, 3, 0.01))), 0, tolerance = 0.1) }) test_that("p_map", { skip_if_not_installed("curl") skip_if_offline() skip_if_not_installed("httr2") skip_if_not_or_load_if_installed("rstanarm") m <- insight::download_model("stanreg_merMod_5") p <- insight::get_parameters(m, effects = "all") expect_equal( p_map(m, effects = "all")$p_MAP, p_map(p)$p_MAP, tolerance = 0.1 ) }) test_that("p_map", { skip_if_not_installed("curl") skip_if_offline() skip_if_not_installed("httr2") skip_if_not_or_load_if_installed("brms") m <- insight::download_model("brms_zi_3") p <- insight::get_parameters(m, effects = "all", component = "all") expect_equal( p_map(m, effects = "all", component = "all")$p_MAP, p_map(p)$p_MAP, tolerance = 0.1 ) }) test_that("p_map | null", { x <- distribution_normal(4000, mean = 1) expect_equal(as.numeric(p_map(x)), 0.6194317, ignore_attr = TRUE, tolerance = 0.01) expect_equal(as.numeric(p_map(x, null = 1)), 1, ignore_attr = TRUE, tolerance = 0.01) }) bayestestR/tests/testthat/test-bayesian_as_frequentist.R0000644000176200001440000000537015005147105023413 0ustar liggesusersskip_on_cran() skip_if_not_installed("curl") skip_if_offline() skip_if_not_installed("httr2") test_that("rstanarm to freq", { skip_if_not_or_load_if_installed("rstanarm") set.seed(333) m <- insight::download_model("stanreg_glm_1") m1 <- glm(vs ~ wt, data = mtcars, family = "binomial") m2 <- convert_bayesian_as_frequentist(m) expect_equal(coef(m1), coef(m2), tolerance = 1e-3) }) test_that("rstanarm to freq", { skip_if_not_or_load_if_installed("rstanarm") skip_if_not_or_load_if_installed("lme4") set.seed(333) m <- insight::download_model("stanreg_lmerMod_1") m1 <- lme4::lmer(wt ~ cyl + (1 | gear), data = mtcars) m2 <- convert_bayesian_as_frequentist(m) expect_equal(lme4::fixef(m1), lme4::fixef(m2), tolerance = 1e-3) }) test_that("brms beta to freq", { skip_if_not_or_load_if_installed("brms") skip_if_not_or_load_if_installed("glmmTMB") skip_if_not_or_load_if_installed("lme4") skip_if_not_or_load_if_installed("betareg") set.seed(333) m <- suppressWarnings(insight::download_model("brms_beta_1")) data(FoodExpenditure, package = "betareg") m1 <- glmmTMB::glmmTMB( I(food / income) ~ income + (1 | persons), data = FoodExpenditure, family = glmmTMB::beta_family() ) m2 <- convert_bayesian_as_frequentist(m) expect_equal(lme4::fixef(m1)$cond[2], lme4::fixef(m2)$cond[2], tolerance = 1e-2) }) test_that("ordbetareg to freq", { skip_if_not_or_load_if_installed("brms") skip_if_not_or_load_if_installed("ordbetareg") skip_if_not_or_load_if_installed("glmmTMB") skip_if_not_or_load_if_installed("lme4") skip_if_not_or_load_if_installed("datawizard") set.seed(333) data(sleepstudy, package = "lme4") m <- suppressWarnings(insight::download_model("ordbetareg_1")) sleepstudy$y <- datawizard::normalize(sleepstudy$Reaction) m1 <- glmmTMB::glmmTMB( y ~ Days + (Days | Subject), data = sleepstudy, family = glmmTMB::ordbeta() ) m2 <- convert_bayesian_as_frequentist(m) expect_equal(lme4::fixef(m1), lme4::fixef(m2), tolerance = 1e-1) }) test_that("brms 0 + Intercept to freq", { skip_if_not_or_load_if_installed("brms") set.seed(333) data(mtcars) m <- brms::brm(qsec ~ 0 + Intercept + mpg, data = mtcars, refresh = 0) m1 <- lm(qsec ~ mpg, data = mtcars) m2 <- convert_bayesian_as_frequentist(m) expect_equal(coef(m1), coef(m2), tolerance = 1e-2) }) test_that("brms Interaction terms to freq", { skip_if_not_or_load_if_installed("brms") set.seed(333) m <- brms::brm(qsec ~ mpg * as.factor(am), data = mtcars, refresh = 0) m1 <- lm(qsec ~ mpg * as.factor(am), data = mtcars) m2 <- convert_bayesian_as_frequentist(m) expect_equal(coef(m1), coef(m2), tolerance = 1e-2) }) bayestestR/tests/testthat/test-marginaleffects.R0000644000176200001440000001036515054263503021643 0ustar liggesusersskip_on_cran() skip_if_not_installed("withr") skip_if_not_installed("rstanarm") skip_if_not_installed("marginaleffects", minimum_version = "0.29.0") skip_if_not_installed("collapse") withr::with_environment( new.env(), test_that("marginaleffects descrive_posterior", { data("mtcars") mtcars$cyl <- factor(mtcars$cyl) mod <- rstanarm::stan_glm(mpg ~ cyl + hp * am, data = mtcars, refresh = 0) mfx <- marginaleffects::avg_slopes(mod, by = "am") mfx_samps <- data.frame(suppressWarnings(marginaleffects::get_draws( mfx, shape = "DxP" ))) results <- describe_posterior( mfx, centrality = "MAP", ci_method = "hdi", test = c("pd", "rope", "p_map", "equivalence_test") ) results_draws <- describe_posterior( mfx_samps, centrality = "MAP", ci_method = "hdi", test = c("pd", "rope", "p_map", "equivalence_test"), verbose = FALSE ) expect_true(all(c("term", "contrast") %in% colnames(results))) expect_equal( results[setdiff(colnames(results), c("term", "contrast", "am"))], results_draws[setdiff(colnames(results_draws), "Parameter")], ignore_attr = TRUE ) # multi ci levels res <- hdi(mfx, ci = c(0.8, 0.9)) expect_identical( as.data.frame(res[1:3]), data.frame( term = c( "am", "am", "am", "am", "cyl", "cyl", "cyl", "cyl", "cyl", "cyl", "cyl", "cyl", "hp", "hp", "hp", "hp" ), contrast = c( "1 - 0", "1 - 0", "1 - 0", "1 - 0", "6 - 4", "6 - 4", "8 - 4", "8 - 4", "6 - 4", "6 - 4", "8 - 4", "8 - 4", "dY/dX", "dY/dX", "dY/dX", "dY/dX" ), am = c(0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1), stringsAsFactors = FALSE ) ) # estimate_density mfx <- marginaleffects::comparisons( mod, variables = "cyl", newdata = marginaleffects::datagrid(hp = 100, am = 0) ) samps <- insight::get_parameters(mod)[c("cyl6", "cyl8")] res <- estimate_density(mfx) resref <- estimate_density(samps) expect_equal( res[intersect(colnames(res), colnames(resref))], resref[intersect(colnames(res), colnames(resref))], ignore_attr = TRUE ) }) ) withr::with_environment( new.env(), test_that("marginaleffects bayesfactors", { data("mtcars") mtcars$cyl <- factor(mtcars$cyl) mod <- rstanarm::stan_glm(mpg ~ cyl + hp * am, data = mtcars, refresh = 0) modp <- unupdate(mod, verbose = FALSE) mfx <- marginaleffects::avg_slopes(mod, by = "am") mfxp <- marginaleffects::avg_slopes(modp, by = "am") mfx_samps <- as.data.frame(suppressWarnings(marginaleffects::get_draws( mfx, shape = "DxP" ))) mfxp_samps <- as.data.frame(suppressWarnings(marginaleffects::get_draws( mfxp, shape = "DxP" ))) # SI outsi <- si(mfx, prior = mfxp, verbose = FALSE) outsiref <- si(mfx_samps, prior = mfxp_samps, verbose = FALSE) expect_true(all(c("term", "contrast", "am") %in% colnames(outsi))) expect_equal( outsi[setdiff(colnames(outsi), c("term", "contrast", "am"))], outsiref[setdiff(colnames(outsiref), "Parameter")], ignore_attr = TRUE ) # bayesfactor_parameters bfp <- bayesfactor_parameters(mfx, prior = mfxp, verbose = FALSE) bfpref <- bayesfactor_parameters( mfx_samps, prior = mfxp_samps, verbose = FALSE ) expect_equal( bfp[setdiff(colnames(bfp), c("term", "contrast", "am"))], bfpref[setdiff(colnames(bfpref), "Parameter")], ignore_attr = TRUE ) }) ) test_that("marginaleffects bayesfactors", { skip_if_not_installed("curl") skip_if_offline() skip_if_not_installed("httr2") skip_if_not_installed("brms") skip_if_not_installed("modelbased") m <- insight::download_model("brms_mv_1") skip_if(is.null(m)) p <- modelbased::get_marginalmeans(m, "wt") out <- describe_posterior(p) expect_named( out, c( "wt", "group", "Median", "CI", "CI_low", "CI_high", "pd", "ROPE_CI", "ROPE_low", "ROPE_high", "ROPE_Percentage" ) ) expect_identical(dim(out), c(30L, 11L)) }) bayestestR/tests/testthat/test-bayesfactor_parameters.R0000644000176200001440000001024415005147105023225 0ustar liggesuserstest_that("bayesfactor_parameters data frame", { skip_if_not_or_load_if_installed("logspline", "2.1.21") Xprior <- data.frame( x = distribution_normal(1e4), y = distribution_normal(1e4) ) Xposterior <- data.frame( x = distribution_normal(1e4, mean = 0.5), y = distribution_normal(1e4, mean = -0.5) ) # point bfsd <- bayesfactor_parameters(Xposterior, prior = Xprior, null = 0, direction = 0, verbose = FALSE) expect_equal(bfsd$log_BF, c(0.12, 0.12), tolerance = 0.1) bfsd <- bayesfactor_parameters(Xposterior, prior = Xprior, null = 0, direction = 1, verbose = FALSE) expect_equal(bfsd$log_BF, c(0.44, -0.35), tolerance = 0.1) bfsd <- bayesfactor_parameters(Xposterior, prior = Xprior, null = 0, direction = -1, verbose = FALSE) expect_equal(bfsd$log_BF, c(-0.35, 0.44), tolerance = 0.1) bfsd <- bayesfactor_parameters(Xposterior, prior = Xprior, null = 0.5, direction = 0, verbose = FALSE) expect_equal(bfsd$log_BF, c(-0.12, 0.37), tolerance = 0.1) expect_warning(bayesfactor_parameters(Xposterior, Xprior)) w <- capture_warnings(bfsd <- bayesfactor_parameters(Xposterior)) expect_match(w, "Prior", all = FALSE) expect_match(w, "40", all = FALSE) expect_equal(bfsd$log_BF, c(0, 0), tolerance = 0.1) # interval expect_warning( bfsd <- bayesfactor_parameters(Xposterior, prior = Xprior, null = c(-0.1, 0.1), direction = 0), regexp = NA ) expect_equal(bfsd$log_BF, c(0.13, 0.13), tolerance = 0.1) bfsd <- bayesfactor_parameters(Xposterior, prior = Xprior, null = c(-0.1, 0.1), direction = 1) expect_equal(bfsd$log_BF, c(0.47, -0.39), tolerance = 0.1) bfsd <- bayesfactor_parameters(Xposterior, prior = Xprior, null = c(-0.1, 0.1), direction = -1) expect_equal(bfsd$log_BF, c(-0.39, 0.47), tolerance = 0.1) # interval with inf bfsd <- bayesfactor_parameters(Xposterior, prior = Xprior, null = c(-0.1, Inf)) expect_equal(bfsd$log_BF, c(-0.81, 0.80), tolerance = 0.1) bfsd <- bayesfactor_parameters(Xposterior, prior = Xprior, null = c(-Inf, 0.1)) expect_equal(bfsd$log_BF, c(0.80, -0.81), tolerance = 0.1) }) test_that("bayesfactor_parameters RSTANARM", { skip_on_cran() skip_if_not_installed("curl") skip_if_offline() skip_if_not_installed("httr2") skip_if_not_or_load_if_installed("logspline", "2.1.21") skip_if_not_or_load_if_installed("rstanarm") fit <- suppressMessages(stan_glm(mpg ~ ., data = mtcars, refresh = 0)) set.seed(333) fit_p <- unupdate(fit, verbose = FALSE) expect_warning(BF2 <- bayesfactor_parameters(fit, fit_p)) set.seed(333) BF1 <- bayesfactor_parameters(fit, verbose = FALSE) BF3 <- bayesfactor_parameters(insight::get_parameters(fit), insight::get_parameters(fit_p), verbose = FALSE) expect_equal(BF1, BF2) expect_equal(BF1[["Parameter"]], BF3[["Parameter"]]) expect_equal(BF1[["log_BF"]], BF3[["log_BF"]]) model_flat <- suppressMessages( stan_glm(extra ~ group, data = sleep, prior = NULL, refresh = 0) ) suppressMessages( expect_error(bayesfactor_parameters(model_flat)) ) skip_on_ci() fit10 <- update(fit, chains = 10, iter = 5100, warmup = 100) suppressMessages( expect_warning(bayesfactor_parameters(fit10), regexp = NA) ) }) # bayesfactor_parameters BRMS --------------------------------------------- test_that("bayesfactor_parameters BRMS", { skip_if_not_installed("curl") skip_if_offline() skip_if_not_installed("httr2") skip_if_not_or_load_if_installed("logspline", "2.1.21") skip_if_not_or_load_if_installed("brms") skip_if_not_or_load_if_installed("cmdstanr") skip_if_not(dir.exists(cmdstanr::cmdstan_default_install_path())) brms_mixed_6 <- insight::download_model("brms_mixed_6") set.seed(222) brms_mixed_6_p <- unupdate(brms_mixed_6) bfsd1 <- suppressWarnings(bayesfactor_parameters(brms_mixed_6, brms_mixed_6_p, effects = "fixed")) set.seed(222) bfsd2 <- suppressWarnings(bayesfactor_parameters(brms_mixed_6, effects = "fixed")) expect_equal(bfsd1$log_BF, bfsd2$log_BF, tolerance = 0.11) brms_mixed_1 <- insight::download_model("brms_mixed_1") expect_error(bayesfactor_parameters(brms_mixed_1)) }) bayestestR/tests/testthat/test-contr.R0000644000176200001440000000346514542333405017641 0ustar liggesuserstest_that("contr.equalprior | gen", { skip_on_cran() set.seed(1234) k <- 3 g <- 4.1 contr1 <- contr.equalprior(k, contrasts = TRUE) contr2 <- contr.equalprior(k, contrasts = FALSE) samps1 <- replicate(ncol(contr1), { rnorm(4e3, 0, g) }) samps2 <- replicate(ncol(contr2), { rnorm(4e3, 0, g) }) means1 <- t(contr1 %*% t(samps1)) means2 <- t(contr2 %*% t(samps2)) expect_equal(mean(apply(means1, 2, sd)), mean(apply(means2, 2, sd)), tolerance = 0.1) }) test_that("contr.equalprior | pairs", { skip_on_cran() set.seed(1234) k <- 3 g <- 4.1 contr1 <- contr.equalprior_pairs(k, contrasts = TRUE) contr2 <- contr.equalprior_pairs(k, contrasts = FALSE) samps1 <- replicate(ncol(contr1), { rnorm(4e3, 0, g) }) samps2 <- replicate(ncol(contr2), { rnorm(4e3, 0, g) }) means1 <- t(contr1 %*% t(samps1)) means2 <- t(contr2 %*% t(samps2)) w <- matrix(c( -1, 1, 0, 1, 0, -1, 0, -1, 1 ), 3, 3) pairs1 <- t(w %*% t(means1)) pairs2 <- t(w %*% t(means2)) expect_equal(mean(apply(pairs1, 2, sd)), g, tolerance = 0.1) expect_equal(mean(apply(pairs1, 2, sd)), mean(apply(pairs2, 2, sd)), tolerance = 0.1) }) test_that("contr.equalprior | dev", { skip_on_cran() set.seed(1234) k <- 3 g <- 4.1 contr1 <- contr.equalprior_deviations(k, contrasts = TRUE) contr2 <- contr.equalprior_deviations(k, contrasts = FALSE) samps1 <- replicate(ncol(contr1), { rnorm(4e3, 0, g) }) samps2 <- replicate(ncol(contr2), { rnorm(4e3, 0, g) }) means1 <- t(contr1 %*% t(samps1)) means2 <- t(contr2 %*% t(samps2)) expect_equal(mean(apply(means1, 2, sd)), g, tolerance = 0.1) expect_equal(mean(apply(means1, 2, sd)), mean(apply(means2, 2, sd)), tolerance = 0.1) }) bayestestR/tests/testthat/test-spi.R0000644000176200001440000000501715005147105017275 0ustar liggesusers# numeric ------------------------------- test_that("spi", { skip_if_not_installed("curl") skip_if_offline() skip_if_not_installed("httr2") skip_if_not_or_load_if_installed("rstanarm") skip_if_not_or_load_if_installed("brms") skip_if_not_or_load_if_installed("BayesFactor") expect_equal(spi(distribution_normal(1000), ci = 0.90)$CI_low[1], -1.65, tolerance = 0.02) expect_equal(nrow(spi(distribution_normal(1000), ci = c(0.80, 0.90, 0.95))), 3, tolerance = 0.01) expect_equal(spi(distribution_normal(1000), ci = 1)$CI_low[1], -3.29, tolerance = 0.02) expect_equal(nchar(capture.output(print(spi(distribution_normal(1000))))), 22) expect_equal(length(capture.output(print(spi(distribution_normal(1000), ci = c(0.80, 0.90))))), 5) expect_error(spi(c(2, 3, NA))) expect_warning(spi(c(2, 3))) expect_message(spi(distribution_normal(1000), ci = 0.0000001)) expect_warning(spi(distribution_normal(1000), ci = 950)) expect_message(spi(c(0, 0, 0))) }) test_that("ci", { skip_if_not_installed("curl") skip_if_offline() skip_if_not_installed("httr2") skip_if_not_or_load_if_installed("rstanarm") skip_if_not_or_load_if_installed("brms") skip_if_not_or_load_if_installed("BayesFactor") m <- insight::download_model("stanreg_merMod_5") p <- insight::get_parameters(m, effects = "all") expect_equal( spi(m, ci = c(0.5, 0.8), effects = "all")$CI_low, spi(p, ci = c(0.5, 0.8))$CI_low, tolerance = 1e-3 ) }) test_that("spi brms", { skip_if_not_installed("curl") skip_if_offline() skip_if_not_installed("httr2") skip_if_not_or_load_if_installed("rstanarm") skip_if_not_or_load_if_installed("brms") skip_if_not_or_load_if_installed("BayesFactor") m <- insight::download_model("brms_zi_3") p <- insight::get_parameters(m, effects = "all", component = "all") expect_equal( spi(m, ci = c(0.5, 0.8), effects = "all", component = "all")$CI_low, spi(p, ci = c(0.5, 0.8))$CI_low, tolerance = 1e-3 ) }) test_that("ci - BayesFactor", { skip_if_not_installed("curl") skip_if_offline() skip_if_not_installed("httr2") skip_if_not_or_load_if_installed("rstanarm") skip_if_not_or_load_if_installed("brms") skip_if_not_or_load_if_installed("BayesFactor") mod_bf <- proportionBF(y = 15, N = 25, p = 0.5) p_bf <- insight::get_parameters(mod_bf) expect_equal( spi(mod_bf, ci = c(0.5, 0.8), effects = "all", component = "all")$CI_low, spi(p_bf, ci = c(0.5, 0.8))$CI_low, tolerance = 0.1 ) }) bayestestR/tests/testthat/test-weighted_posteriors.R0000644000176200001440000000545714706241121022603 0ustar liggesusersskip_on_os("linux") test_that("weighted_posteriors for BayesFactor", { skip_on_cran() skip_if_not_or_load_if_installed("BayesFactor") set.seed(123) # compute Bayes Factor for 31 different regression models null_den <- regressionBF( mpg ~ cyl + disp + hp + drat + wt, data = mtcars, progress = FALSE ) wBF <- weighted_posteriors(null_den) expect_s3_class(wBF, "data.frame") expect_equal( attr(wBF, "weights")$weights, c( 0, 13, 9, 0, 0, 55, 11, 4, 4, 1246, 6, 2, 38, 4, 946, 12, 3, 3, 209, 3, 491, 174, 4, 134, 7, 293, 1, 123, 35, 92, 51, 27 ), ignore_attr = TRUE ) }) test_that("weighted_posteriors for BayesFactor (intercept)", { # fails for win old-release # skip_on_ci() skip_on_cran() skip_if_not_or_load_if_installed("BayesFactor") set.seed(123) dat <- data.frame( x1 = rnorm(10), x2 = rnorm(10), y = rnorm(10) ) BFmods <- regressionBF(y ~ x1 + x2, data = dat, progress = FALSE) res <- weighted_posteriors(BFmods) expect_equal(attr(res, "weights")$weights, c(1032, 805, 1388, 775), ignore_attr = TRUE) wHDI <- hdi(res[c("x1", "x2")], ci = 0.9) expect_equal(wHDI$CI_low, c(-0.519, -0.640), tolerance = 0.01) expect_equal(wHDI$CI_high, c(0.150, 0.059), tolerance = 0.01) }) test_that("weighted_posteriors for nonlinear BayesFactor", { skip_on_cran() skip_if_not_or_load_if_installed("BayesFactor") set.seed(123) data(sleep) BFS <- ttestBF( x = sleep$extra[sleep$group == 1], y = sleep$extra[sleep$group == 2], nullInterval = c(-Inf, 0), paired = TRUE ) res <- weighted_posteriors(BFS) expect_equal(attributes(res)$weights$weights, c(113, 3876, 11), ignore_attr = TRUE) }) test_that("weighted_posteriors vs posterior_average", { skip("Test creates error, must check why...") skip_on_cran() skip_if_not_or_load_if_installed("BayesFactor") skip_if_not_or_load_if_installed("brms") fit1 <- brm(rating ~ treat + period + carry, data = inhaler, refresh = 0, silent = TRUE, save_pars = save_pars(all = TRUE) ) fit2 <- brm(rating ~ period + carry, data = inhaler, refresh = 0, silent = TRUE, save_pars = save_pars(all = TRUE) ) set.seed(444) expect_warning({ res_BT <- weighted_posteriors(fit1, fit2) }) set.seed(444) res_brms <- brms::posterior_average(fit1, fit2, weights = "bma", missing = 0) res_brms <- res_brms[, 1:4] res_BT1 <- eti(res_BT) res_brms1 <- eti(res_brms) expect_equal(res_BT1$Parameter, res_brms1$Parameter, tolerance = 1e-4) expect_equal(res_BT1$CI, res_brms1$CI, tolerance = 1e-4) expect_equal(res_BT1$CI_low, res_brms1$CI_low, tolerance = 1e-4) expect_equal(res_BT1$CI_high, res_brms1$CI_high, tolerance = 1e-4) }) bayestestR/tests/testthat/test-emmGrid.R0000644000176200001440000001743614746106624020112 0ustar liggesusers# TODO: decide how to rearrange the tests skip_on_ci() skip_if_not_or_load_if_installed("rstanarm") skip_if_not_or_load_if_installed("emmeans") set.seed(300) model <- stan_glm(extra ~ group, data = sleep, refresh = 0, chains = 6, iter = 7000, warmup = 200 ) em_ <- emmeans(model, ~group) c_ <- pairs(em_) emc_ <- emmeans(model, pairwise ~ group) all_ <- rbind(em_, c_) all_summ <- summary(all_) set.seed(4) model_p <- unupdate(model, verbose = FALSE) set.seed(300) # estimate + hdi ---------------------------------------------------------- test_that("emmGrid hdi", { xhdi <- hdi(all_, ci = 0.95) expect_identical(colnames(xhdi)[1:2], c("group", "contrast")) expect_equal(xhdi$CI_low, all_summ$lower.HPD, tolerance = 0.1) expect_equal(xhdi$CI_high, all_summ$upper.HPD, tolerance = 0.1) xhdi2 <- hdi(emc_, ci = 0.95) expect_identical(xhdi$CI_low, xhdi2$CI_low) xhdi3 <- hdi(all_, ci = c(0.9, 0.95)) expect_identical( as.data.frame(xhdi3[1:2]), data.frame( group = c("1", "1", "2", "2", ".", "."), contrast = c(".", ".", ".", ".", "group1 - group2", "group1 - group2"), stringsAsFactors = FALSE ) ) }) test_that("emmGrid point_estimate", { xpest <- point_estimate(all_, centrality = "all", dispersion = TRUE) expect_identical(colnames(xpest)[1:2], c("group", "contrast")) expect_equal(xpest$Median, all_summ$emmean, tolerance = 0.1) xpest2 <- point_estimate(emc_, centrality = "all", dispersion = TRUE) expect_identical(xpest$Median, xpest2$Median) }) # Basics ------------------------------------------------------------------ test_that("emmGrid ci", { xci <- ci(all_, ci = 0.9) expect_identical(colnames(xci)[1:2], c("group", "contrast")) expect_length(xci$CI_low, 3) expect_length(xci$CI_high, 3) }) test_that("emmGrid eti", { xeti <- eti(all_, ci = 0.9) expect_identical(colnames(xeti)[1:2], c("group", "contrast")) expect_length(xeti$CI_low, 3) expect_length(xeti$CI_high, 3) }) test_that("emmGrid equivalence_test", { xeqtest <- equivalence_test(all_, ci = 0.9, range = c(-0.1, 0.1)) expect_identical(colnames(xeqtest)[1:2], c("group", "contrast")) expect_length(xeqtest$ROPE_Percentage, 3) expect_length(xeqtest$ROPE_Equivalence, 3) }) test_that("emmGrid estimate_density", { xestden <- estimate_density(c_, method = "logspline", precision = 5) expect_identical(colnames(xestden)[1], "contrast") expect_length(xestden$x, 5) }) test_that("emmGrid map_estimate", { xmapest <- map_estimate(all_, method = "kernel") expect_identical(colnames(xmapest)[1:2], c("group", "contrast")) expect_length(xmapest$MAP_Estimate, 3) }) test_that("emmGrid p_direction", { xpd <- p_direction(all_, method = "direct") expect_identical(colnames(xpd)[1:2], c("group", "contrast")) expect_length(xpd$pd, 3) }) test_that("emmGrid p_map", { xpmap <- p_map(all_, precision = 2^9) expect_identical(colnames(xpmap)[1:2], c("group", "contrast")) expect_length(xpmap$p_MAP, 3) }) test_that("emmGrid p_rope", { xprope <- p_rope(all_, range = c(-0.1, 0.1)) expect_identical(colnames(xprope)[1:2], c("group", "contrast")) expect_length(xprope$p_ROPE, 3) }) test_that("emmGrid p_significance", { xsig <- p_significance(all_, threshold = c(-0.1, 0.1)) expect_identical(colnames(xsig)[1:2], c("group", "contrast")) expect_length(xsig$ps, 3) }) test_that("emmGrid rope", { xrope <- rope(all_, range = "default", ci = 0.9) expect_identical(colnames(xrope)[1:2], c("group", "contrast")) expect_length(xrope$ROPE_Percentage, 3) }) # describe_posterior ------------------------------------------------------ test_that("emmGrid describe_posterior", { expect_identical( describe_posterior(all_)$median, describe_posterior(emc_)$median ) expect_identical(colnames(describe_posterior(all_))[1:2], c("group", "contrast")) skip_on_cran() expect_identical( describe_posterior(all_, bf_prior = model_p, test = "bf")$log_BF, describe_posterior(emc_, bf_prior = model_p, test = "bf")$log_BF ) }) # BFs --------------------------------------------------------------------- test_that("emmGrid bayesfactor_parameters", { skip_on_cran() set.seed(4) expect_equal( bayesfactor_parameters(all_, prior = model, verbose = FALSE), bayesfactor_parameters(all_, prior = model_p, verbose = FALSE), tolerance = 0.001 ) emc_p <- emmeans(model_p, pairwise ~ group) xbfp <- bayesfactor_parameters(all_, prior = model_p, verbose = FALSE) xbfp2 <- bayesfactor_parameters(emc_, prior = model_p, verbose = FALSE) xbfp3 <- bayesfactor_parameters(emc_, prior = emc_p, verbose = FALSE) expect_identical(colnames(xbfp)[1:2], c("group", "contrast")) expect_equal(xbfp$log_BF, xbfp2$log_BF, tolerance = 0.1) expect_equal(xbfp$log_BF, xbfp3$log_BF, tolerance = 0.1) expect_warning( suppressMessages( bayesfactor_parameters(all_) ), regexp = "Prior not specified" ) # error - cannot deal with regrid / transform e <- capture_error(suppressMessages(bayesfactor_parameters(regrid(all_), prior = model))) expect_match(as.character(e), "Unable to reconstruct prior estimates") }) test_that("emmGrid bayesfactor_restricted", { skip_on_cran() set.seed(4) hyps <- c("`1` < `2`", "`1` < 0") xrbf <- bayesfactor_restricted(em_, prior = model_p, hypothesis = hyps) expect_length(xrbf$log_BF, 2) expect_length(xrbf$p_prior, 2) expect_length(xrbf$p_posterior, 2) expect_warning(bayesfactor_restricted(em_, hypothesis = hyps)) xrbf2 <- bayesfactor_restricted(emc_, prior = model_p, hypothesis = hyps) expect_equal(xrbf, xrbf2, tolerance = 0.1) }) test_that("emmGrid si", { skip_on_cran() set.seed(4) xrsi <- si(all_, prior = model_p, verbose = FALSE) expect_identical(colnames(xrsi)[1:2], c("group", "contrast")) expect_length(xrsi$CI_low, 3) expect_length(xrsi$CI_high, 3) xrsi2 <- si(emc_, prior = model_p, verbose = FALSE) expect_identical(xrsi$CI_low, xrsi2$CI_low) expect_identical(xrsi$CI_high, xrsi2$CI_high) }) # For non linear models --------------------------------------------------- set.seed(333) df <- data.frame( G = rep(letters[1:3], each = 2), Y = rexp(6) ) fit_bayes <- stan_glm(Y ~ G, data = df, family = Gamma(link = "identity"), refresh = 0 ) fit_bayes_prior <- unupdate(fit_bayes, verbose = FALSE) bayes_sum <- emmeans(fit_bayes, ~G) bayes_sum_prior <- emmeans(fit_bayes_prior, ~G) test_that("emmGrid bayesfactor_parameters", { set.seed(333) skip_on_cran() xsdbf1 <- bayesfactor_parameters(bayes_sum, prior = fit_bayes, verbose = FALSE) xsdbf2 <- bayesfactor_parameters(bayes_sum, prior = bayes_sum_prior, verbose = FALSE) expect_equal(xsdbf1$log_BF, xsdbf2$log_BF, tolerance = 0.1) }) # link vs response test_that("emmGrid bayesfactor_parameters / describe w/ nonlinear models", { skip_on_cran() model <- stan_glm(vs ~ mpg, data = mtcars, family = "binomial", refresh = 0 ) probs <- emmeans(model, "mpg", type = "resp") link <- emmeans(model, "mpg") probs_summ <- summary(probs) link_summ <- summary(link) xhdi <- hdi(probs, ci = 0.95) xpest <- point_estimate(probs, centrality = "median", dispersion = TRUE) expect_equal(xhdi$CI_low, probs_summ$lower.HPD, tolerance = 0.1) expect_equal(xhdi$CI_high, probs_summ$upper.HPD, tolerance = 0.1) expect_equal(xpest$Median, probs_summ$prob, tolerance = 0.1) xhdi <- hdi(link, ci = 0.95) xpest <- point_estimate(link, centrality = "median", dispersion = TRUE) expect_equal(xhdi$CI_low, link_summ$lower.HPD, tolerance = 0.1) expect_equal(xhdi$CI_high, link_summ$upper.HPD, tolerance = 0.1) expect_equal(xpest$Median, link_summ$emmean, tolerance = 0.1) }) bayestestR/tests/testthat/test-ci.R0000644000176200001440000000415315005147105017075 0ustar liggesuserstest_that("ci", { skip_on_os(c("mac", "linux")) skip_if_not_or_load_if_installed("quadprog") set.seed(123) x <- rnorm(1000, 3, 2) expect_error(ci(x, method = "FDI"), regex = "`method` should be 'ETI'") out <- capture.output(print(ci(x, method = "SPI"))) expect_identical(out, "95% SPI: [-1.16, 6.76]") out <- capture.output(print(ci(x, method = "BCI"))) expect_identical(out, "95% ETI: [-0.88, 7.08]") }) test_that("ci", { expect_equal(ci(distribution_normal(1000), ci = 0.90)$CI_low[1], -1.6361, tolerance = 0.02) expect_equal(nrow(ci(distribution_normal(1000), ci = c(0.80, 0.90, 0.95))), 3, tolerance = 0.01) expect_equal(ci(distribution_normal(1000), ci = 1)$CI_low[1], -3.29, tolerance = 0.02) expect_length(capture.output(print(ci(distribution_normal(1000), ci = c(0.80, 0.90)))), 5) expect_equal(ci(c(2, 3, NA))$CI_low, 2.02, tolerance = 1e-2) expect_warning(ci(c(2, 3))) expect_warning(ci(distribution_normal(1000), ci = 950)) x <- data.frame(replicate(4, rnorm(100))) x <- ci(x, ci = c(0.68, 0.89, 0.95)) a <- datawizard::reshape_ci(x) expect_identical(c(nrow(x), ncol(x)), c(12L, 4L)) expect_true(all(datawizard::reshape_ci(a) == x)) }) test_that("ci", { skip_if_not_installed("curl") skip_if_offline() skip_if_not_installed("httr2") skip_if_not_or_load_if_installed("rstanarm") skip_if_not_or_load_if_installed("brms") m <- insight::download_model("stanreg_merMod_5") p <- insight::get_parameters(m, effects = "all") expect_equal( ci(m, ci = c(0.5, 0.8), effects = "all")$CI_low, ci(p, ci = c(0.5, 0.8))$CI_low, tolerance = 1e-3 ) }) test_that("rope", { skip_if_not_installed("curl") skip_if_offline() skip_if_not_installed("httr2") skip_if_not_or_load_if_installed("rstanarm") skip_if_not_or_load_if_installed("brms") m <- insight::download_model("brms_zi_3") p <- insight::get_parameters(m, effects = "all", component = "all") expect_equal( ci(m, ci = c(0.5, 0.8), effects = "all", component = "all")$CI_low, ci(p, ci = c(0.5, 0.8))$CI_low, tolerance = 1e-3 ) }) bayestestR/tests/testthat/test-posterior.R0000644000176200001440000001155415005147105020533 0ustar liggesuserstest_that("mp-posterior-draws", { skip_if_not_installed("curl") skip_if_offline() skip_if_not_installed("httr2") skip_if_not_or_load_if_installed("posterior") skip_if_not_or_load_if_installed("brms") model <- insight::download_model("brms_1") x <- posterior::as_draws(model) mp <- describe_posterior(x) expect_equal(mp$Median, c(39.68234, -3.19505, -1.4936, 2.62881, -79.73344), tolerance = 1e-2, ignore_attr = TRUE) expect_equal(mp$pd, c(1, 0.9995, 0.9995, 1, 1), tolerance = 1e-2, ignore_attr = TRUE) expect_identical(mp$Parameter, c("b_Intercept", "b_wt", "b_cyl", "sigma", "lp__")) expect_identical( colnames(mp), c( "Parameter", "Median", "CI", "CI_low", "CI_high", "pd", "ROPE_CI", "ROPE_low", "ROPE_high", "ROPE_Percentage" ) ) }) test_that("mp-posterior-draws_list", { skip_if_not_installed("curl") skip_if_offline() skip_if_not_installed("httr2") skip_if_not_or_load_if_installed("posterior") skip_if_not_or_load_if_installed("brms") model <- insight::download_model("brms_1") x <- posterior::as_draws_list(model) mp <- describe_posterior(x) expect_equal(mp$Median, c(39.68234, -3.19505, -1.4936, 2.62881, -79.73344), tolerance = 1e-2, ignore_attr = TRUE) expect_equal(mp$pd, c(1, 0.9995, 0.9995, 1, 1), tolerance = 1e-2, ignore_attr = TRUE) expect_identical(mp$Parameter, c("b_Intercept", "b_wt", "b_cyl", "sigma", "lp__")) expect_identical( colnames(mp), c( "Parameter", "Median", "CI", "CI_low", "CI_high", "pd", "ROPE_CI", "ROPE_low", "ROPE_high", "ROPE_Percentage" ) ) }) test_that("mp-posterior-draws_df", { skip_if_not_installed("curl") skip_if_offline() skip_if_not_installed("httr2") skip_if_not_or_load_if_installed("posterior") skip_if_not_or_load_if_installed("brms") model <- insight::download_model("brms_1") x <- posterior::as_draws_df(model) mp <- describe_posterior(x) expect_equal(mp$Median, c(39.68234, -3.19505, -1.4936, 2.62881, -79.73344), tolerance = 1e-2, ignore_attr = TRUE) expect_equal(mp$pd, c(1, 0.9995, 0.9995, 1, 1), tolerance = 1e-2, ignore_attr = TRUE) expect_identical(mp$Parameter, c("b_Intercept", "b_wt", "b_cyl", "sigma", "lp__")) expect_identical( colnames(mp), c( "Parameter", "Median", "CI", "CI_low", "CI_high", "pd", "ROPE_CI", "ROPE_low", "ROPE_high", "ROPE_Percentage" ) ) }) test_that("mp-posterior-draws_matrix", { skip_if_not_installed("curl") skip_if_offline() skip_if_not_installed("httr2") skip_if_not_or_load_if_installed("posterior") skip_if_not_or_load_if_installed("brms") model <- insight::download_model("brms_1") x <- posterior::as_draws_matrix(model) mp <- describe_posterior(x) expect_equal(mp$Median, c(39.68234, -3.19505, -1.4936, 2.62881, -79.73344), tolerance = 1e-2, ignore_attr = TRUE) expect_equal(mp$pd, c(1, 0.9995, 0.9995, 1, 1), tolerance = 1e-2, ignore_attr = TRUE) expect_identical(mp$Parameter, c("b_Intercept", "b_wt", "b_cyl", "sigma", "lp__")) expect_identical( colnames(mp), c( "Parameter", "Median", "CI", "CI_low", "CI_high", "pd", "ROPE_CI", "ROPE_low", "ROPE_high", "ROPE_Percentage" ) ) }) test_that("mp-posterior-draws_array", { skip_if_not_installed("curl") skip_if_offline() skip_if_not_installed("httr2") skip_if_not_or_load_if_installed("posterior") skip_if_not_or_load_if_installed("brms") model <- insight::download_model("brms_1") x <- posterior::as_draws_array(model) mp <- describe_posterior(x) expect_equal(mp$Median, c(39.68234, -3.19505, -1.4936, 2.62881, -79.73344), tolerance = 1e-2, ignore_attr = TRUE) expect_equal(mp$pd, c(1, 0.9995, 0.9995, 1, 1), tolerance = 1e-2, ignore_attr = TRUE) expect_identical(mp$Parameter, c("b_Intercept", "b_wt", "b_cyl", "sigma", "lp__")) expect_identical( colnames(mp), c( "Parameter", "Median", "CI", "CI_low", "CI_high", "pd", "ROPE_CI", "ROPE_low", "ROPE_high", "ROPE_Percentage" ) ) }) test_that("mp-posterior-draws_rvar", { skip_if_not_installed("curl") skip_if_offline() skip_if_not_installed("httr2") skip_if_not_or_load_if_installed("posterior") skip_if_not_or_load_if_installed("brms") model <- insight::download_model("brms_1") # Create random vectors by adding an additional dimension: n <- 4 # length of output vector set.seed(123) x <- rvar(array(rnorm(4000 * n, mean = rep(1:n, each = 4000), sd = 1), dim = c(4000, n))) mp <- describe_posterior(x) expect_equal(mp$Median, c(0.99503, 1.99242, 2.9899, 3.99362), tolerance = 1e-2, ignore_attr = TRUE) expect_identical(mp$Parameter, c("x[1]", "x[2]", "x[3]", "x[4]")) expect_identical( colnames(mp), c( "Parameter", "Median", "CI", "CI_low", "CI_high", "pd", "ROPE_CI", "ROPE_low", "ROPE_high", "ROPE_Percentage" ) ) }) bayestestR/tests/testthat/test-bayesfactor_models.R0000644000176200001440000001536614706241121022357 0ustar liggesusers# bayesfactor_models BIC -------------------------------------------------- test_that("bayesfactor_models BIC", { skip_if_not_or_load_if_installed("lme4") set.seed(444) void <- suppressMessages(capture.output({ mo1 <- lme4::lmer(Sepal.Length ~ (1 | Species), data = iris) mo2 <- lme4::lmer(Sepal.Length ~ Petal.Length + (1 | Species), data = iris) mo3 <- lme4::lmer(Sepal.Length ~ Petal.Length + (Petal.Length | Species), data = iris) mo4 <- lme4::lmer(Sepal.Length ~ Petal.Length + Petal.Width + (Petal.Length | Species), data = iris) mo5 <- lme4::lmer(Sepal.Length ~ Petal.Length * Petal.Width + (Petal.Length | Species), data = iris) mo4_e <- lme4::lmer(Sepal.Length ~ Petal.Length + Petal.Width + (Petal.Length | Species), data = iris[-1, ]) })) # both uses of denominator BFM1 <<- bayesfactor_models(mo2, mo3, mo4, mo1, denominator = 4) BFM2 <- bayesfactor_models(mo2, mo3, mo4, denominator = mo1) BFM3 <- bayesfactor_models(mo2, mo3, mo4, mo1, denominator = mo1) BFM4 <<- bayesfactor_models(mo2, mo3, mo4, mo5, mo1, denominator = mo1) expect_equal(BFM1, BFM2, tolerance = 1e-4, ignore_attr = TRUE) expect_equal(BFM1, BFM3, tolerance = 1e-4, ignore_attr = TRUE) expect_equal( BFM1, bayesfactor_models(list(mo2 = mo2, mo3 = mo3, mo4 = mo4, mo1 = mo1), denominator = 4), tolerance = 1e-4, ignore_attr = TRUE ) # only on same data! expect_warning(bayesfactor_models(mo1, mo2, mo4_e)) # update models expect_equal(update(BFM2, subset = c(1, 2))$log_BF, c(1, 57.3, 54.52), tolerance = 0.1) # update reference expect_equal(update(BFM2, reference = 1)$log_BF, c(0, -2.8, -6.2, -57.4), tolerance = 0.1 ) }) test_that("bayesfactor_models BIC, transformed responses", { skip_if_not_or_load_if_installed("lme4") m1 <- lm(mpg ~ 1, mtcars) m2 <- lm(sqrt(mpg) ~ 1, mtcars) BF1 <- bayesfactor_models(m1, m2, check_response = TRUE) expect_equal(BF1$log_BF[2], 2.4404 / 2, tolerance = 0.01) BF2 <- bayesfactor_models(m1, m2, check_response = FALSE) expect_false(isTRUE(all.equal(BF1, BF2))) }) test_that("bayesfactor_models BIC (unsupported / diff nobs)", { skip_if_not_or_load_if_installed("lme4") skip_on_cran() set.seed(444) fit1 <- lm(Sepal.Length ~ Sepal.Width + Petal.Length, iris) fit2a <- lm(Sepal.Length ~ Sepal.Width, iris[-1, ]) # different number of objects fit2b <- lm(Sepal.Length ~ Sepal.Width, iris) # not supported class(fit2b) <- "NOTLM" logLik.NOTLM <<- function(...) { stats:::logLik.lm(...) } # Should warm expect_warning(bayesfactor_models(fit1, fit2a)) # Should fail suppressWarnings(expect_message(bayesfactor_models(fit1, fit2b), "Unable")) }) # bayesfactor_models STAN --------------------------------------------- test_that("bayesfactor_models STAN", { skip_if_not_or_load_if_installed("lme4") skip_if_not_or_load_if_installed("rstanarm") skip_if_not_or_load_if_installed("bridgesampling") skip_if_not_or_load_if_installed("brms") skip_on_cran() set.seed(333) stan_bf_0 <- rstanarm::stan_glm( Sepal.Length ~ 1, data = iris, refresh = 0, iter = 500, diagnostic_file = file.path(tempdir(), "df0.csv") ) stan_bf_1 <- suppressWarnings(rstanarm::stan_glm( Sepal.Length ~ Species, data = iris, refresh = 0, iter = 500, diagnostic_file = file.path(tempdir(), "df1.csv") )) set.seed(333) # compare against bridgesampling bridge_BF <- bridgesampling::bayes_factor( bridgesampling::bridge_sampler(stan_bf_1, silent = TRUE), bridgesampling::bridge_sampler(stan_bf_0, silent = TRUE) ) set.seed(333) suppressMessages({ expect_warning({ stan_models <- bayesfactor_models(stan_bf_0, stan_bf_1) }) }) expect_s3_class(stan_models, "bayesfactor_models") expect_length(stan_models$log_BF, 2) expect_equal(stan_models$log_BF[2], log(bridge_BF$bf), tolerance = 0.1) }) test_that("bayesfactor_models BRMS", { # Checks for brms models skip_on_cran() # skip_on_ci() skip_if_not_or_load_if_installed("bridgesampling") skip_if_not_or_load_if_installed("brms") set.seed(333) stan_brms_model_0 <- suppressWarnings(brms::brm( Sepal.Length ~ 1, data = iris, iter = 500, refresh = 0, save_pars = brms::save_pars(all = TRUE), silent = 2 )) stan_brms_model_1 <- suppressWarnings(brms::brm( Sepal.Length ~ Petal.Length, data = iris, iter = 500, refresh = 0, save_pars = brms::save_pars(all = TRUE), silent = 2 )) set.seed(444) suppressWarnings(suppressMessages( expect_message( { bfm <- bayesfactor_models(stan_brms_model_0, stan_brms_model_1) }, regexp = "marginal" ) )) set.seed(444) stan_brms_model_0wc <- brms::add_criterion( stan_brms_model_0, criterion = "marglik", repetitions = 5, silent = 2 ) stan_brms_model_1wc <- brms::add_criterion( stan_brms_model_1, criterion = "marglik", repetitions = 5, silent = 2 ) suppressWarnings(expect_message( { bfmwc <- bayesfactor_models(stan_brms_model_0wc, stan_brms_model_1wc) }, regexp = NA )) expect_equal(bfmwc$log_BF, bfm$log_BF, tolerance = 0.01) }) # bayesfactor_inclusion --------------------------------------------------- test_that("bayesfactor_inclusion | BayesFactor", { skip_if_not_or_load_if_installed("lme4") skip_if_not_or_load_if_installed("BayesFactor") set.seed(444) # BayesFactor ToothGrowth$dose <- as.factor(ToothGrowth$dose) BF_ToothGrowth <- BayesFactor::anovaBF(len ~ dose * supp, ToothGrowth) expect_equal( bayesfactor_inclusion(BF_ToothGrowth), bayesfactor_inclusion(bayesfactor_models(BF_ToothGrowth)), tolerance = 1e-4, ignore_attr = TRUE ) }) test_that("bayesfactor_inclusion | LMM", { skip_if_not_or_load_if_installed("lme4") skip_if_not_or_load_if_installed("BayesFactor") # with random effects in all models: expect_true(is.nan(bayesfactor_inclusion(BFM1)["1:Species", "log_BF"])) bfinc_all <- bayesfactor_inclusion(BFM4, match_models = FALSE) expect_equal(bfinc_all$p_prior, c(1, 0.8, 0.6, 0.4, 0.2), tolerance = 0.1) expect_equal(bfinc_all$p_posterior, c(1, 1, 0.12, 0.01, 0), tolerance = 0.1) expect_equal(bfinc_all$log_BF, c(NaN, 57.651, -2.352, -4.064, -4.788), tolerance = 0.1) # plus match_models bfinc_matched <- bayesfactor_inclusion(BFM4, match_models = TRUE) expect_equal(bfinc_matched$p_prior, c(1, 0.2, 0.6, 0.2, 0.2), tolerance = 0.1) expect_equal(bfinc_matched$p_posterior, c(1, 0.875, 0.125, 0.009, 0.002), tolerance = 0.1) expect_equal(bfinc_matched$log_BF, c(NaN, 58.904, -3.045, -3.573, -1.493), tolerance = 0.1) }) bayestestR/tests/testthat/test-different_models.R0000644000176200001440000000653714542333405022030 0ustar liggesuserstest_that("insight::get_predicted", { skip_on_os("mac") skip_if_not_or_load_if_installed("rstanarm") x <- suppressWarnings( insight::get_predicted( stan_glm(hp ~ mpg, data = mtcars, iter = 500, refresh = 0) ) ) rez <- point_estimate(x, use_iterations = TRUE) expect_identical(c(nrow(rez), ncol(rez)), c(32L, 4L)) rez <- point_estimate(x, use_iterations = FALSE) expect_identical(c(nrow(rez), ncol(rez)), c(1L, 3L)) rez <- hdi(x, use_iterations = TRUE) expect_identical(c(nrow(rez), ncol(rez)), c(32L, 4L)) rez <- hdi(x, use_iterations = FALSE) expect_identical(c(nrow(rez), ncol(rez)), c(1L, 3L)) rez <- eti(x, use_iterations = TRUE) expect_identical(c(nrow(rez), ncol(rez)), c(32L, 4L)) rez <- eti(x, use_iterations = FALSE) expect_identical(c(nrow(rez), ncol(rez)), c(1L, 3L)) rez <- ci(x, use_iterations = TRUE) expect_identical(c(nrow(rez), ncol(rez)), c(32L, 4L)) rez <- ci(x, use_iterations = FALSE) expect_identical(c(nrow(rez), ncol(rez)), c(1L, 3L)) rez <- map_estimate(x, use_iterations = TRUE) expect_identical(c(nrow(rez), ncol(rez)), c(32L, 2L)) rez <- map_estimate(x, use_iterations = FALSE) expect_identical(c(nrow(rez), ncol(rez)), c(1L, 2L)) rez <- p_direction(x, use_iterations = TRUE) expect_identical(c(nrow(rez), ncol(rez)), c(32L, 2L)) rez <- p_direction(x, use_iterations = FALSE) expect_identical(c(nrow(rez), ncol(rez)), c(1L, 2L)) rez <- p_map(x, use_iterations = TRUE) expect_identical(c(nrow(rez), ncol(rez)), c(32L, 2L)) rez <- p_map(x, use_iterations = FALSE) expect_identical(c(nrow(rez), ncol(rez)), c(1L, 2L)) rez <- p_significance(x, use_iterations = TRUE) expect_identical(c(nrow(rez), ncol(rez)), c(32L, 2L)) rez <- p_significance(x, use_iterations = FALSE) expect_identical(c(nrow(rez), ncol(rez)), c(1L, 2L)) rez <- rope(x, use_iterations = TRUE) expect_identical(c(nrow(rez), ncol(rez)), c(32L, 5L)) rez <- rope(x, use_iterations = FALSE) expect_identical(c(nrow(rez), ncol(rez)), c(1L, 4L)) rez <- describe_posterior(x, use_iterations = TRUE) expect_identical(c(nrow(rez), ncol(rez)), c(32L, 5L)) rez <- estimate_density(x, use_iterations = TRUE) expect_identical(c(nrow(rez), ncol(rez)), c(1024L, 2L)) }) test_that("bayesQR", { skip_on_os("mac") skip_if_not_or_load_if_installed("bayesQR") invisible(capture.output({ x <- bayesQR(Sepal.Length ~ Petal.Width, data = iris, quantile = 0.1, alasso = TRUE, ndraw = 500 ) })) rez <- p_direction(x) expect_identical(c(nrow(rez), ncol(rez)), c(2L, 2L)) rez <- p_map(x) expect_identical(c(nrow(rez), ncol(rez)), c(2L, 2L)) rez <- p_significance(x) expect_identical(c(nrow(rez), ncol(rez)), c(2L, 2L)) rez <- rope(x) expect_identical(c(nrow(rez), ncol(rez)), c(2L, 5L)) rez <- hdi(x) expect_identical(c(nrow(rez), ncol(rez)), c(2L, 4L)) rez <- eti(x) expect_identical(c(nrow(rez), ncol(rez)), c(2L, 4L)) rez <- map_estimate(x) expect_identical(c(nrow(rez), ncol(rez)), c(2L, 2L)) rez <- point_estimate(x) expect_identical(c(nrow(rez), ncol(rez)), c(2L, 4L)) rez <- describe_posterior(x) expect_identical(c(nrow(rez), ncol(rez)), c(2L, 10L)) rez <- estimate_density(x) expect_identical(c(nrow(rez), ncol(rez)), c(2048L, 3L)) }) bayestestR/tests/testthat/helper.R0000644000176200001440000000050414542333405017005 0ustar liggesusersskip_if_not_or_load_if_installed <- function(package, minimum_version = NULL) { testthat::skip_if_not_installed(package, minimum_version = minimum_version) suppressMessages(suppressWarnings(suppressPackageStartupMessages( require(package, warn.conflicts = FALSE, character.only = TRUE, quietly = TRUE) ))) } bayestestR/tests/testthat/test-bayesfactor_restricted.R0000644000176200001440000000312614652220356023242 0ustar liggesusers# bayesfactor_restricted data.frame --------------------------------------- test_that("bayesfactor_restricted df", { prior <- data.frame( X = distribution_normal(100), X1 = c(distribution_normal(50), distribution_normal(50)), X3 = c(distribution_normal(80), distribution_normal(20)) ) posterior <- data.frame( X = distribution_normal(100, 0.4, 0.2), X1 = distribution_normal(100, -0.2, 0.2), X3 = distribution_normal(100, 0.2) ) hyps <- c( "X > X1 & X1 > X3", "X > X1" ) bfr <- bayesfactor_restricted(posterior, hypothesis = hyps, prior = prior) expect_equal(bfr$p_prior, c(0.2, 0.5), tolerance = 0.1) expect_equal(bfr$p_posterior, c(0.31, 1), tolerance = 0.1) expect_equal(bfr$log_BF, c(0.43, 0.69), tolerance = 0.1) expect_equal(exp(bfr$log_BF), bfr$p_posterior / bfr$p_prior, tolerance = 0.1) expect_error(bayesfactor_restricted(posterior, prior, hypothesis = "Y < 0")) }) # bayesfactor_restricted RSTANARM ----------------------------------------- test_that("bayesfactor_restricted RSTANARM", { skip_on_cran() skip_if_not_installed("rstanarm") suppressWarnings( fit_stan <- rstanarm::stan_glm(mpg ~ wt + cyl + am, data = mtcars, refresh = 0, iter = 200) ) hyps <- c( "am > 0 & cyl < 0", "cyl < 0", "wt - cyl > 0" ) set.seed(444) fit_p <- suppressMessages(unupdate(fit_stan)) bfr1 <- bayesfactor_restricted(fit_stan, prior = fit_p, hypothesis = hyps) set.seed(444) bfr2 <- bayesfactor_restricted(fit_stan, hypothesis = hyps) expect_equal(bfr1, bfr2) }) bayestestR/tests/testthat/test-pd_to_p.R0000644000176200001440000000037414542333405020134 0ustar liggesuserstest_that("pd_to_p", { pds <- c(0.7, 0.95, 0.99, 0.5) expect_equal(pd_to_p(pds), c(0.6, 0.1, 0.02, 1)) expect_equal(pd_to_p(pds, direction = 1), c(0.3, 0.05, 0.01, 0.5)) expect_warning(p <- pd_to_p(0.3), "0.5") expect_equal(p, 1) }) bayestestR/tests/testthat/test-distributions.R0000644000176200001440000000312614542333405021410 0ustar liggesuserstest_that("distributions", { tolerance <- 0.01 expect_equal(mean(distribution_normal(10)), 0, tolerance = tolerance) expect_equal(length(distribution_normal(10, random = TRUE)), 10, tolerance = tolerance) expect_equal(mean(distribution_beta(10, 1, 1)), 0.5, tolerance = tolerance) expect_equal(length(distribution_normal(10, 1, 1, random = TRUE)), 10, tolerance = tolerance) expect_equal(mean(distribution_binomial(10, 0, 0.5)), 0, tolerance = tolerance) expect_equal(length(distribution_binomial(10, 0, 0.5, random = TRUE)), 10, tolerance = tolerance) expect_equal(mean(distribution_cauchy(10)), 0, tolerance = tolerance) expect_equal(length(distribution_cauchy(10, random = TRUE)), 10, tolerance = tolerance) expect_equal(mean(distribution_chisquared(10, 1)), 0.893, tolerance = tolerance) expect_equal(length(distribution_chisquared(10, 1, random = TRUE)), 10, tolerance = tolerance) expect_equal(mean(distribution_gamma(10, 1)), 0.9404, tolerance = tolerance) expect_equal(length(distribution_gamma(10, 1, random = TRUE)), 10, tolerance = tolerance) expect_equal(mean(distribution_poisson(10)), 1, tolerance = tolerance) expect_equal(length(distribution_poisson(10, random = TRUE)), 10, tolerance = tolerance) expect_equal(mean(distribution_student(10, 1)), 0, tolerance = tolerance) expect_equal(length(distribution_student(10, 1, random = TRUE)), 10, tolerance = tolerance) expect_equal(mean(distribution_uniform(10)), 0.5, tolerance = tolerance) expect_equal(length(distribution_uniform(10, random = TRUE)), 10, tolerance = tolerance) }) bayestestR/tests/testthat/test-rope_range.R0000644000176200001440000000313214746106624020633 0ustar liggesuserstest_that("rope_range cor", { x <- cor.test(ToothGrowth$len, ToothGrowth$dose) expect_equal(rope_range(x), c(-0.05, 0.05), tolerance = 1e-3) }) test_that("rope_range gaussian", { data(mtcars) mod <- lm(mpg ~ gear + hp, data = mtcars) expect_equal(rope_range(mod), c(-0.1 * sd(mtcars$mpg), 0.1 * sd(mtcars$mpg)), tolerance = 1e-3) }) test_that("rope_range log gaussian", { data(iris) mod <- lm(log(Sepal.Length) ~ Species, data = iris) expect_equal(rope_range(mod), c(-0.01, 0.01), tolerance = 1e-3) }) test_that("rope_range log gaussian 2", { data(mtcars) mod <- glm(mpg ~ gear + hp, data = mtcars, family = gaussian("log")) expect_equal(rope_range(mod), c(-0.01, 0.01), tolerance = 1e-3) }) test_that("rope_range logistic", { data(mtcars) mod <- glm(am ~ gear + hp, data = mtcars, family = binomial()) expect_equal(rope_range(mod), c(-1 * 0.1 * pi / sqrt(3), 0.1 * pi / sqrt(3)), tolerance = 1e-3) }) test_that("rope_range", { skip_if_not_or_load_if_installed("brms") model <- suppressWarnings(brms::brm(mpg ~ wt + gear, data = mtcars, iter = 300)) expect_equal( rope_range(model), c(-0.6026948, 0.6026948), tolerance = 0.01 ) }) test_that("rope_range (multivariate)", { skip_if_not_or_load_if_installed("brms") model <- suppressWarnings( brms::brm(brms::bf(mvbind(mpg, disp) ~ wt + gear) + brms::set_rescor(TRUE), data = mtcars, iter = 300) ) expect_equal( rope_range(model), list( mpg = c(-0.602694, 0.602694), disp = c(-12.393869, 12.393869) ), tolerance = 0.01 ) }) bayestestR/tests/testthat/test-effective_sample.R0000644000176200001440000000246415005147105022006 0ustar liggesuserstest_that("effective_sample", { skip_if_not_installed("curl") skip_if_offline() skip_if_not_installed("httr2") skip_if_not_or_load_if_installed("rstanarm") skip_if_not_or_load_if_installed("brms") skip_if_not_or_load_if_installed("rstan") brms_1 <- insight::download_model("brms_1") skip_if(is.null(brms_1)) res <- effective_sample(brms_1) expect_equal( res, data.frame( Parameter = c("b_Intercept", "b_wt", "b_cyl"), ESS = c(5283, 2120, 2001), ESS_tail = c(3255, 2003, 2227), stringsAsFactors = FALSE ), ignore_attr = TRUE, tolerance = 1e-1 ) brms_null_1 <- insight::download_model("brms_null_1") skip_if(is.null(brms_null_1)) res <- effective_sample(brms_null_1) expect_equal( res, data.frame( Parameter = "b_Intercept", ESS = 2912, ESS_tail = 2388, stringsAsFactors = FALSE ), ignore_attr = TRUE, tolerance = 1e-1 ) brms_null_2 <- insight::download_model("brms_null_2") skip_if(is.null(brms_null_2)) res <- effective_sample(brms_null_2) expect_equal( res, data.frame( Parameter = "b_Intercept", ESS = 1098, ESS_tail = 954, stringsAsFactors = FALSE ), ignore_attr = TRUE, tolerance = 1e-1 ) }) bayestestR/tests/testthat/test-p_rope.R0000644000176200001440000000117115005147105017763 0ustar liggesuserstest_that("p_rope", { skip_if_not_installed("curl") skip_if_offline() skip_if_not_installed("httr2") skip_if_not_or_load_if_installed("rstanarm") m <- insight::download_model("stanreg_merMod_5") expect_equal( p_rope(as.data.frame(m)[2:4], range = list(c(0, 40), "default", c(-1, 0.8)))$p_ROPE, c(0.598, 0.002, 0.396), tolerance = 1e-3 ) expect_error( p_rope(as.data.frame(m)[2:4], range = list(c(0, 40), c(-1, 0.8))), regex = "Length of" ) expect_error( p_rope(as.data.frame(m)[2:4], range = list(c(0, 40), "a", c(-1, 0.8))), regex = "should be 'default'" ) }) bayestestR/tests/testthat/test-density_at.R0000644000176200001440000000031414542333405020645 0ustar liggesuserstest_that("density_at", { expect_equal(density_at(distribution_normal(1000), 0), 0.389, tolerance = 0.1) expect_equal(density_at(distribution_normal(1000), c(0, 1))[1], 0.389, tolerance = 0.1) }) bayestestR/tests/testthat/test-estimate_density.R0000644000176200001440000000270614640231526022063 0ustar liggesuserstest_that("estimate_density", { skip_if_not_or_load_if_installed("logspline") skip_if_not_or_load_if_installed("KernSmooth") skip_if_not_or_load_if_installed("mclust") set.seed(333) x <- distribution_normal(500, 1) # Methods density_kernel <- estimate_density(x, method = "kernel") density_logspline <- estimate_density(x, method = "logspline") density_KernSmooth <- estimate_density(x, method = "KernSmooth") density_mixture <- estimate_density(x, method = "mixture") expect_equal(mean(density_kernel$y - density_logspline$y), 0, tolerance = 0.1) expect_equal(mean(density_kernel$y - density_KernSmooth$y), 0, tolerance = 0.1) expect_equal(mean(density_kernel$y - density_mixture$y), 0, tolerance = 0.1) x <- iris x$Fac <- rep_len(c("A", "B"), 150) rez <- estimate_density(x, select = "Sepal.Length") expect_identical(dim(rez), c(1024L, 3L)) rez <- estimate_density(x, select = c("Sepal.Length", "Petal.Length")) expect_identical(dim(rez), c(2048L, 3L)) rez <- estimate_density(x, select = "Sepal.Length", by = "Species") expect_identical(dim(rez), as.integer(c(1024 * 3, 4))) rez <- estimate_density(x, select = c("Sepal.Length", "Petal.Length"), by = "Species") expect_identical(dim(rez), as.integer(c(2048 * 3, 4))) rez <- estimate_density(x, select = "Sepal.Length", by = c("Species", "Fac"), method = "KernSmooth") expect_identical(dim(rez), as.integer(c(1024 * 3 * 2, 5))) }) bayestestR/tests/testthat/test-as.data.frame.density.R0000644000176200001440000000017614542333405022572 0ustar liggesuserstest_that("as.data.frame.density", { expect_s3_class(as.data.frame(density(distribution_normal(1000))), "data.frame") }) bayestestR/tests/testthat/test-brms.R0000644000176200001440000000741515005147105017451 0ustar liggesuserstest_that("brms", { skip_on_cran() skip_if_not_installed("curl") skip_if_offline() skip_if_not_installed("httr2") skip_if_not_or_load_if_installed("brms") set.seed(333) model <- insight::download_model("brms_mixed_1") skip_if(is.null(model)) expect_s3_class(hdi(model), "data.frame") expect_s3_class(ci(model), "data.frame") expect_s3_class(rope(model, verbose = FALSE), "data.frame") expect_s3_class(equivalence_test(model), "equivalence_test") expect_s3_class(map_estimate(model), "data.frame") expect_s3_class(p_map(model), "data.frame") expect_s3_class(p_direction(model), "data.frame") expect_named(hdi(model), c("Parameter", "CI", "CI_low", "CI_high", "Effects", "Component")) expect_named(hdi(model, effects = "all"), c("Parameter", "CI", "CI_low", "CI_high", "Effects", "Component")) expect_identical(nrow(equivalence_test(model)), 2L) out <- describe_posterior(model, effects = "all", component = "all", centrality = "mean") suppressWarnings({ s <- summary(model) }) expect_identical(colnames(out), c( "Parameter", "Effects", "Component", "Mean", "CI", "CI_low", "CI_high", "pd", "ROPE_CI", "ROPE_low", "ROPE_high", "ROPE_Percentage", "Rhat", "ESS" )) expect_equal(as.vector(s$fixed[, 1, drop = TRUE]), out$Mean[1:2], tolerance = 1e-3) expect_equal(as.vector(s$fixed[, 5, drop = TRUE]), out$Rhat[1:2], tolerance = 1e-1) expect_equal(as.vector(s$random$cyl[, 1, drop = TRUE]), out$Mean[3], tolerance = 1e-3) expect_equal(as.vector(s$random$gear[, 1, drop = TRUE]), out$Mean[4:6], tolerance = 1e-3) }) test_that("brms", { skip_on_cran() skip_if_not_installed("curl") skip_if_offline() skip_if_not_installed("httr2") skip_if_not_or_load_if_installed("brms") set.seed(333) model <- insight::download_model("brms_1") skip_if(is.null(model)) out <- describe_posterior(model, effects = "all", component = "all", centrality = "mean") s <- summary(model) expect_identical(colnames(out), c( "Parameter", "Component", "Mean", "CI", "CI_low", "CI_high", "pd", "ROPE_CI", "ROPE_low", "ROPE_high", "ROPE_Percentage", "Rhat", "ESS" )) expect_equal(as.vector(s$fixed[, 1, drop = TRUE]), out$Mean[1:3], tolerance = 1e-3) expect_equal(as.vector(s$fixed[, 5, drop = TRUE]), out$Rhat[1:3], tolerance = 1e-1) }) test_that("brms", { skip_on_cran() skip_if_not_installed("curl") skip_if_offline() skip_if_not_installed("httr2") skip_if_not_or_load_if_installed("brms") set.seed(333) model <- insight::download_model("brms_mv_2") skip_if(is.null(model)) out <- describe_posterior(model, effects = "all", component = "all", centrality = "mean", test = NULL) s <- suppressWarnings(summary(model)) expect_identical(colnames(out), c( "Parameter", "Effects", "Component", "Mean", "CI", "CI_low", "CI_high", "Rhat", "ESS" )) known <- s$fixed unknown <- out[out$Effects == "fixed" & out$Component == "conditional", ] idx <- match(row.names(known), gsub("b_", "", unknown$Parameter, fixed = TRUE)) unknown <- unknown[idx, ] expect_equal(unknown$Mean, known$Estimate, ignore_attr = TRUE) expect_equal(unknown$Rhat, known$Rhat, tolerance = 1e-2, ignore_attr = TRUE) }) test_that("brms", { skip_on_cran() skip_if_not_installed("curl") skip_if_offline() skip_if_not_installed("httr2") skip_if_not_or_load_if_installed("brms") set.seed(333) model <- insight::download_model("brms_2") skip_if(is.null(model)) out <- describe_posterior(model, effects = "all", component = "all", centrality = "mean", test = NULL) s <- summary(model) expect_equal(as.vector(s$fixed[, 1, drop = TRUE]), out$Mean, tolerance = 1e-3) expect_equal(as.vector(s$fixed[, 5, drop = TRUE]), out$Rhat, tolerance = 1e-1) }) bayestestR/tests/testthat/test-p_significance.R0000644000176200001440000000601015005147105021435 0ustar liggesuserstest_that("p_significance", { # numeric set.seed(333) x <- distribution_normal(10000, 1, 1) ps <- p_significance(x) expect_equal(as.numeric(ps), 0.816, tolerance = 0.1) expect_s3_class(ps, "p_significance") expect_s3_class(ps, "data.frame") expect_identical(dim(ps), c(1L, 2L)) expect_identical( capture.output(print(ps)), c( "Practical Significance (threshold: 0.10)", "", "Parameter | ps", "----------------", "Posterior | 0.82" ) ) # non-symmetric intervals ps <- p_significance(x, threshold = c(0.05, 0.2)) expect_equal(as.numeric(ps), 0.7881, tolerance = 0.1) # should be identical, both ranges have same distance to the mean 1 ps <- p_significance(x, threshold = c(1.8, 1.95)) expect_equal(as.numeric(ps), 0.7881, tolerance = 0.1) set.seed(333) x <- data.frame(replicate(4, rnorm(100))) pd <- p_significance(x) expect_identical(dim(pd), c(4L, 2L)) # error: expect_error(p_significance(x, threshold = 1:3)) }) test_that("stanreg", { skip_if_not_installed("curl") skip_if_offline() skip_if_not_installed("httr2") skip_if_not_or_load_if_installed("rstanarm") m <- insight::download_model("stanreg_merMod_5") expect_equal( p_significance(m, effects = "all")$ps[1], 0.99, tolerance = 1e-2 ) }) test_that("brms", { skip_if_not_installed("curl") skip_if_offline() skip_if_not_installed("httr2") skip_if_not_or_load_if_installed("brms") m2 <- insight::download_model("brms_1") expect_equal( p_significance(m2, effects = "all")$ps, c(1.0000, 0.9985, 0.9785), tolerance = 0.01 ) out <- p_significance(m2, threshold = list(1, "default", 2), effects = "all") expect_equal( out$ps, c(1.00000, 0.99850, 0.12275), tolerance = 0.01 ) expect_equal( attributes(out)$threshold, list(c(-1, 1), c(-0.60269480520891, 0.60269480520891), c(-2, 2)), tolerance = 1e-4 ) expect_error( p_significance(m2, threshold = list(1, "a", 2), effects = "all"), regex = "should be one of" ) expect_error( p_significance(m2, threshold = list(1, 2, 3, 4), effects = "all"), regex = "Length of" ) }) test_that("stan", { skip_if_not_installed("curl") skip_if_offline() skip_if_not_installed("httr2") skip_if_not_or_load_if_installed("rstanarm") m <- insight::download_model("stanreg_merMod_5") expect_equal( p_significance(m, threshold = list("(Intercept)" = 1, period4 = 1.5, period3 = 0.5))$ps, p_significance(m, threshold = list(1, "default", "default", 0.5, 1.5))$ps, tolerance = 1e-4 ) expect_error( p_significance(m, threshold = list("(Intercept)" = 1, point = 1.5, period3 = 0.5)), regex = "Not all elements" ) expect_error( p_significance(m, threshold = list(1, "a", 2), effects = "all"), regex = "should be one of" ) expect_error( p_significance(m, threshold = list(1, 2, 3, 4), effects = "all"), regex = "Length of" ) }) bayestestR/tests/testthat/test-check_prior.R0000644000176200001440000001111415005147105020765 0ustar liggesusersskip_on_os(os = "mac") test_that("check_prior - stanreg", { skip_on_cran() skip_on_os(os = c("windows", "mac")) skip_if_not_installed("curl") skip_if_offline() skip_if_not_installed("httr2") skip_if_not_or_load_if_installed("rstanarm") skip_if_not_or_load_if_installed("BH") skip_if_not_or_load_if_installed("RcppEigen") skip_if_not_or_load_if_installed("brms") set.seed(333) model1 <- insight::download_model("stanreg_lm_1") expect_identical( check_prior(model1)$Prior_Quality, c("informative", "uninformative") ) expect_identical( check_prior(model1, method = "lakeland")$Prior_Quality, c("informative", "informative") ) }) test_that("check_prior - brms (linux)", { skip("TODO: check hard-coded values") skip_on_cran() skip_on_os(os = c("windows", "mac", "solaris")) skip_if_not_installed("curl") skip_if_offline() skip_if_not_installed("httr2") skip_if_not_or_load_if_installed("rstanarm") skip_if_not_or_load_if_installed("BH") skip_if_not_or_load_if_installed("RcppEigen") skip_if_not_or_load_if_installed("brms") # all `brms` examples in circus have uniform prior distribution, so # need to use a custom example here set.seed(333) suppressMessages({ model2 <- brm(rating ~ period + carry + cs(treat), data = inhaler, family = sratio("logit"), prior = set_prior("normal(0,5)"), chains = 2, silent = TRUE, refresh = 0 ) }) expect_warning(expect_identical( check_prior(model2)$Prior_Quality, c( "uninformative", "informative", "informative", "uninformative", "uninformative", "not determinable", "not determinable", "not determinable" ) )) expect_warning(expect_identical( check_prior(model2, method = "lakeland")$Prior_Quality, c( "informative", "informative", "informative", "informative", "informative", "not determinable", "not determinable", "not determinable" ) )) }) test_that("check_prior - brms (linux)", { skip_on_cran() skip_on_os(os = c("windows", "mac", "solaris")) skip_if_not_installed("curl") skip_if_offline() skip_if_not_installed("httr2") skip_if_not_or_load_if_installed("rstanarm") skip_if_not_or_load_if_installed("BH") skip_if_not_or_load_if_installed("RcppEigen") skip_if_not_or_load_if_installed("brms") data(inhaler, package = "brms") # all `brms` examples in circus have uniform prior distribution, so # need to use a custom example here set.seed(333) model2 <- brm(rating ~ period + carry + cs(treat), data = inhaler, family = sratio("logit"), prior = set_prior("normal(0,5)"), chains = 2, silent = TRUE, refresh = 0 ) # TODO: check hard-coded values expect_warning(expect_identical( check_prior(model2)$Prior_Quality, c( "uninformative", "informative", "informative", "uninformative", "uninformative", "not determinable", "not determinable", "not determinable" ) )) ## FIXME: this test returns inconsistent results across platforms and OSs # expect_warning(expect_identical( # check_prior(model2, method = "lakeland")$Prior_Quality, # c( # "informative", "misinformative", "informative", "informative", # "informative", "not determinable", "not determinable", "not determinable" # ) # )) }) test_that("check_prior - brms (not linux or windows)", { skip_on_cran() skip_on_os(os = c("linux", "windows", "mac")) skip_if_not_installed("curl") skip_if_offline() skip_if_not_installed("httr2") skip_if_not_or_load_if_installed("rstanarm") skip_if_not_or_load_if_installed("BH") skip_if_not_or_load_if_installed("RcppEigen") skip_if_not_or_load_if_installed("brms") # all `brms` examples in circus have uniform prior distribution, so # need to use a custom example here set.seed(333) suppressMessages({ model2 <- brm(rating ~ period + carry + cs(treat), data = inhaler, family = sratio("logit"), prior = set_prior("normal(0,5)"), chains = 2, silent = TRUE, refresh = 0 ) }) expect_warning(expect_identical( check_prior(model2)$Prior_Quality, c( "uninformative", "uninformative", "informative", "uninformative", "uninformative", "not determinable", "not determinable", "not determinable" ) )) expect_warning(expect_identical( check_prior(model2, method = "lakeland")$Prior_Quality, c( "informative", "informative", "informative", "informative", "informative", "not determinable", "not determinable", "not determinable" ) )) }) bayestestR/tests/testthat/test-rope.R0000644000176200001440000001524615052670366017470 0ustar liggesuserstest_that("rope, vector", { expect_equal( as.numeric(rope(distribution_normal(1000, 0, 1), verbose = FALSE)), 0.084, tolerance = 0.01 ) expect_identical( equivalence_test(distribution_normal(1000, 0, 1))$ROPE_Equivalence, "Undecided" ) expect_length( capture.output(print(equivalence_test(distribution_normal(1000)))), 9 ) expect_length( capture.output(print(equivalence_test( distribution_normal(1000), ci = c(0.8, 0.9) ))), 14 ) expect_equal( as.numeric(rope(distribution_normal(1000, 2, 0.01), verbose = FALSE)), 0, tolerance = 0.01 ) expect_identical( equivalence_test(distribution_normal(1000, 2, 0.01))$ROPE_Equivalence, "Rejected" ) expect_equal( as.numeric(rope(distribution_normal(1000, 0, 0.001), verbose = FALSE)), 1, tolerance = 0.01 ) expect_identical( equivalence_test(distribution_normal(1000, 0, 0.001))$ROPE_Equivalence, "Accepted" ) expect_identical( equivalence_test( distribution_normal(1000, 0, 0.001), ci = 1 )$ROPE_Equivalence, "Accepted" ) expect_equal( rope( rnorm(1000, mean = 0, sd = 3), ci = c(0.1, 0.5, 0.9), verbose = FALSE )$CI, c(0.1, 0.5, 0.9) ) x <- equivalence_test(distribution_normal(1000, 1, 1), ci = c(0.50, 0.99)) expect_equal(x$ROPE_Percentage[2], 0.0484, tolerance = 0.01) expect_identical(x$ROPE_Equivalence[2], "Undecided") expect_error(rope(distribution_normal(1000, 0, 1), range = c(0.0, 0.1, 0.2))) set.seed(333) expect_s3_class( rope(distribution_normal(1000, 0, 1), verbose = FALSE), "rope" ) expect_error(rope(distribution_normal(1000, 0, 1), range = c("A", 0.1))) expect_equal( as.numeric(rope(distribution_normal(1000, 0, 1), range = c(-0.1, 0.1))), 0.084, tolerance = 0.01 ) set.seed(1234) x <- rnorm(4000, sd = 5) out <- rope(x, complement = TRUE) expect_named( out, c( "CI", "ROPE_low", "ROPE_high", "ROPE_Percentage", "Superiority_Percentage", "Inferiority_Percentage" ) ) expect_snapshot(print(out)) out <- p_rope(x, complement = TRUE) expect_named( out, c("ROPE_low", "ROPE_high", "p_ROPE", "p_Superiority", "p_Inferiority") ) expect_equal(out$p_Superiority, 0.497, tolerance = 1e-3) expect_equal(out$p_Inferiority, 0.4885, tolerance = 1e-3) out <- suppressWarnings(capture.output(describe_posterior( x, test = "p_rope", complement = TRUE ))) expect_identical( out[3], "Parameter | Median | 95% CI | ROPE | p (ROPE) | p (Superiority) | p (Inferiority)" ) }) test_that("rope, bayes", { skip_if_not_installed("curl") skip_if_offline() skip_if_not_installed("httr2") skip_if_not_or_load_if_installed("rstanarm") skip_if_not_or_load_if_installed("brms") m <- insight::download_model("stanreg_merMod_5") p <- insight::get_parameters(m, effects = "all") expect_equal( # fix range to -.1/.1, to compare to data frame method rope( m, range = c(-0.1, 0.1), effects = "all", verbose = FALSE )$ROPE_Percentage, rope(p, verbose = FALSE)$ROPE_Percentage, tolerance = 1e-3 ) # list range expect_equal( rope( m, range = list(c(-1, 0.1), "default", "default", c(-1, 1), c(-1.5, -1)) )$ROPE_Percentage, c(0.15823, 1, 0, 0.3903, 0.38186), tolerance = 1e-3 ) # named elements, chooses "default" for unnamed expect_equal( rope( m, range = list(c(-1, 0.1), "default", "default", c(-1, 1), c(-1.5, -1)) )$ROPE_Percentage, rope( m, range = list( "(Intercept)" = c(-1, 0.1), period4 = c(-1.5, -1), period3 = c(-1, 1) ) )$ROPE_Percentage, tolerance = 1e-3 ) expect_error( rope(m, range = list(c(-0.1, 0.1), c(2, 2))), regex = "Length of" ) expect_error( rope(m, range = list(c(-0.1, 0.1), c(2, 2), "default", "a", c(1, 3))), regex = "should be 'default'" ) expect_error( rope( m, range = list( "(Intercept)" = c(-1, 0.1), pointout = c(-1.5, -1), period3 = c(-1, 1) ) ), regex = "Not all elements" ) }) test_that("rope, get_parameters", { skip_if_not_installed("curl") skip_if_offline() skip_if_not_installed("httr2") skip_if_not_or_load_if_installed("rstanarm") skip_if_not_or_load_if_installed("brms") m <- insight::download_model("brms_zi_3") p <- insight::get_parameters(m, effects = "all", component = "all") expect_equal( rope(m, effects = "all", component = "all", verbose = FALSE)$ROPE_Percentage, rope(p, verbose = FALSE)$ROPE_Percentage, tolerance = 1e-3 ) }) test_that("rope BayesFactor", { skip_on_cran() skip_on_os(c("linux", "mac")) skip_if_not_or_load_if_installed("BayesFactor") mods <- regressionBF(mpg ~ am + cyl, mtcars, progress = FALSE) rx <- suppressMessages(rope(mods, verbose = FALSE)) expect_equal(rx$ROPE_high, -rx$ROPE_low, tolerance = 0.01) expect_equal(rx$ROPE_high[1], 0.6026948, tolerance = 0.01) }) test_that("rope (brms)", { skip_on_cran() skip_if_not_or_load_if_installed("brms") skip_on_os(c("windows", "mac")) set.seed(123) model <- suppressWarnings(brms::brm(mpg ~ wt + gear, data = mtcars, iter = 500)) rope <- rope(model, verbose = FALSE) expect_equal(rope$ROPE_high, -rope$ROPE_low, tolerance = 0.01) expect_equal(rope$ROPE_high[1], 0.6026948) expect_equal(rope$ROPE_Percentage, c(0.00, 0.00, 0.50), tolerance = 0.1) out <- describe_posterior(model, complement = TRUE) expect_equal(out$Superiority_Percentage, c(1, 0, 0.137895), tolerance = 0.01) expect_named( out, c( "Parameter", "Median", "CI", "CI_low", "CI_high", "pd", "ROPE_CI", "ROPE_low", "ROPE_high", "ROPE_Percentage", "Superiority_Percentage", "Inferiority_Percentage", "Rhat", "ESS" ) ) }) test_that("rope (brms, multivariate)", { skip_on_cran() skip_if_not_or_load_if_installed("brms") skip_on_os(c("windows", "mac")) model <- suppressWarnings(brm( bf(mvbind(mpg, disp) ~ wt + gear) + set_rescor(TRUE), data = mtcars, iter = 500, refresh = 0 )) rope <- rope(model, verbose = FALSE) expect_equal(rope$ROPE_high, -rope$ROPE_low, tolerance = 0.01) expect_equal(rope$ROPE_high[1], 0.6026948, tolerance = 0.01) expect_equal(rope$ROPE_high[4], 12.3938694, tolerance = 0.01) expect_equal( rope$ROPE_Percentage, c(0, 0, 0.493457, 0.072897, 0, 0.508411), tolerance = 0.1 ) }) bayestestR/tests/testthat/test-BFBayesFactor.R0000644000176200001440000000632114561426326021126 0ustar liggesusersskip_on_os("linux") test_that("p_direction", { skip_if_not_or_load_if_installed("BayesFactor") set.seed(333) x <- BayesFactor::correlationBF(y = iris$Sepal.Length, x = iris$Sepal.Width) expect_equal(as.numeric(p_direction(x)), 0.9225, tolerance = 1) }) test_that("p_direction: BF t.test one sample", { skip_if_not_or_load_if_installed("BayesFactor") data(sleep) diffScores <- sleep$extra[1:10] - sleep$extra[11:20] x <- BayesFactor::ttestBF(x = diffScores) expect_equal(as.numeric(p_direction(x)), 0.99675, tolerance = 1) }) test_that("p_direction: BF t.test two samples", { skip_if_not_or_load_if_installed("BayesFactor") data(chickwts) chickwts <- chickwts[chickwts$feed %in% c("horsebean", "linseed"), ] chickwts$feed <- factor(chickwts$feed) x <- BayesFactor::ttestBF(formula = weight ~ feed, data = chickwts) expect_equal(as.numeric(p_direction(x)), 1, tolerance = 1) }) test_that("p_direction: BF t.test meta-analytic", { skip_if_not_or_load_if_installed("BayesFactor") t <- c(-0.15, 2.39, 2.42, 2.43) N <- c(100, 150, 97, 99) x <- BayesFactor::meta.ttestBF(t = t, n1 = N, rscale = 1) expect_equal(as.numeric(p_direction(x)), 0.99975, tolerance = 1) }) skip_if_not_or_load_if_installed("BayesFactor") # --------------------------- # "BF ANOVA" data(ToothGrowth) ToothGrowth$dose <- factor(ToothGrowth$dose) levels(ToothGrowth$dose) <- c("Low", "Medium", "High") x <- BayesFactor::anovaBF(len ~ supp * dose, data = ToothGrowth) test_that("p_direction", { expect_equal(as.numeric(p_direction(x)), c(1, 0.95675, 0.95675, 1, 1), tolerance = 0.1) }) # BF ANOVA Random --------------------------- data(puzzles) x <- BayesFactor::anovaBF(RT ~ shape * color + ID, data = puzzles, whichRandom = "ID") test_that("p_direction", { expect_equal(as.numeric(p_direction(x)), c( 1, 0.98125, 0.98125, 0.995, 0.67725, 0.8285, 0.68425, 0.99975, 0.6725, 0.9995, 0.60275, 0.99525, 0.7615, 0.763, 1, 1, 1, 1 ), tolerance = 0.1) }) # --------------------------- # "BF lm" x <- BayesFactor::lmBF(len ~ supp + dose, data = ToothGrowth) test_that("p_direction", { expect_equal(as.numeric(p_direction(x)), c(1, 0.9995, 0.9995, 1, 0.903, 1, 1, 1, 1), tolerance = 0.1) }) x2 <- BayesFactor::lmBF(len ~ supp + dose + supp:dose, data = ToothGrowth) x <- x / x2 test_that("p_direction", { expect_equal(as.numeric(p_direction(x)), c(1, 0.99925, 0.99925, 1, 0.89975, 1, 1, 1, 1), tolerance = 0.1) }) test_that("rope_range", { skip_if_not_or_load_if_installed("BayesFactor") x <- BayesFactor::lmBF(len ~ supp + dose, data = ToothGrowth) expect_equal(rope_range(x)[2], sd(ToothGrowth$len) / 10, tolerance = 1e-4) x <- BayesFactor::ttestBF( ToothGrowth$len[ToothGrowth$supp == "OJ"], ToothGrowth$len[ToothGrowth$supp == "VC"] ) expect_equal(rope_range(x)[2], sd(ToothGrowth$len) / 10, tolerance = 1e-4) x <- BayesFactor::ttestBF(formula = len ~ supp, data = ToothGrowth) expect_equal(rope_range(x)[2], sd(ToothGrowth$len) / 10, tolerance = 1e-4) # else x <- BayesFactor::correlationBF(ToothGrowth$len, as.numeric(ToothGrowth$dose)) expect_equal(rope_range(x, verbose = FALSE), c(-0.05, 0.05), tolerance = 1e-4) }) bayestestR/tests/testthat/test-point_estimate.R0000644000176200001440000000304315005147105021523 0ustar liggesuserstest_that("point_estimate: stanreg", { skip_if_not_installed("curl") skip_if_offline() skip_if_not_installed("httr2") skip_if_not_or_load_if_installed("rstanarm") skip_if_not_or_load_if_installed("brms") m <- insight::download_model("stanreg_merMod_5") p <- insight::get_parameters(m, effects = "all") expect_equal( point_estimate(m, effects = "all")$Median, point_estimate(p)$Median, tolerance = 1e-3 ) }) test_that("point_estimate: brms", { skip_if_not_installed("curl") skip_if_offline() skip_if_not_installed("httr2") skip_if_not_or_load_if_installed("rstanarm") skip_if_not_or_load_if_installed("brms") m <- insight::download_model("brms_zi_3") p <- insight::get_parameters(m, effects = "all", component = "all") expect_equal( point_estimate(m, effects = "all", component = "all")$Median, point_estimate(p)$Median, tolerance = 1e-3 ) }) # edge cases test_that("point_estimate, constant vectors or sparse samples", { x <- c(2.5, 2.5, 2.5, 2.5, 2.5, 2.5, 2.2, 2.2, 2.2, 2.5, 2.5, 2.5, 2.5, 2.5, 2.5) out <- point_estimate(x, centrality = "MAP", verbose = FALSE) expect_true(is.na(out$MAP)) out <- point_estimate(c(3, 3, 3), centrality = "MAP", verbose = FALSE) expect_identical(out$MAP, 3) expect_message( point_estimate(x, centrality = "MAP", verbose = TRUE), regex = "Could not calculate MAP estimate" ) expect_message( point_estimate(c(3, 3, 3), centrality = "MAP", verbose = TRUE), regex = "Data is singular" ) }) bayestestR/tests/testthat/test-data.frame-with-rvar.R0000644000176200001440000001021114746106624022431 0ustar liggesuserstest_that("data.frame w/ rvar_col descrive_posterior etc", { # skip_on_ci() skip_on_cran() skip_if_not_installed("posterior") dfx <- data.frame(mu = c(0, 0.5, 1), sigma = c(1, 0.5, 0.25)) dfx$my_rvar <- posterior::rvar_rng(rnorm, 3, mean = dfx$mu, sd = dfx$sigma) dfx$other_rvar <- posterior::rvar_rng(rnorm, 3, mean = dfx$mu + 0.5, sd = dfx$sigma - 0.1) dfx ## Errors expect_error(p_direction(dfx, rvar_col = "mu")) expect_error(p_direction(dfx, rvar_col = "my_rvarrrrrr")) ## describe_posterior res <- describe_posterior(dfx, rvar_col = "my_rvar", centrality = "MAP", ci_method = "hdi", ci = 0.8, test = c("pd", "p_map", "rope", "equivalence_test"), rope_ci = 1, rope_range = c(-1, 0.5) ) res.ref <- describe_posterior(dfx$my_rvar, centrality = "MAP", ci_method = "hdi", ci = 0.8, test = c("pd", "p_map", "rope", "equivalence_test"), rope_ci = 1, rope_range = c(-1, 0.5) ) expect_true(all(c("mu", "sigma") %in% colnames(res))) expect_equal(res[setdiff(colnames(res), c("mu", "sigma"))], res.ref[setdiff(colnames(res.ref), "Parameter")], ignore_attr = TRUE ) ## CIs res <- eti(dfx, rvar_col = "my_rvar") res.ref <- eti(dfx$my_rvar) expect_true(all(c("mu", "sigma") %in% colnames(res))) expect_identical(nrow(format(res)), 3L) expect_identical(ncol(format(res)), 3L) expect_equal(res[setdiff(colnames(res), c("mu", "sigma"))], res.ref[setdiff(colnames(res.ref), "Parameter")], ignore_attr = TRUE ) res <- eti(dfx, rvar_col = "my_rvar", ci = c(0.8, 0.95)) res.ref <- eti(dfx$my_rvar, ci = c(0.8, 0.95)) expect_true(all(c("mu", "sigma") %in% colnames(res))) expect_identical( as.data.frame(res[c("mu", "sigma")]), data.frame( mu = c(0, 0, 0.5, 0.5, 1, 1), sigma = c(1, 1, 0.5, 0.5, 0.25, 0.25) ) ) expect_identical(nrow(format(res)), 3L) expect_identical(ncol(format(res)), 4L) expect_equal(res[setdiff(colnames(res), c("mu", "sigma"))], res.ref[setdiff(colnames(res.ref), "Parameter")], ignore_attr = TRUE ) ## estimate_density res <- estimate_density(dfx, rvar_col = "my_rvar") res.ref <- estimate_density(dfx$my_rvar) expect_true(all(c("mu", "sigma") %in% colnames(res))) expect_equal(res[setdiff(colnames(res), c("mu", "sigma"))], res.ref[setdiff(colnames(res.ref), "Parameter")], ignore_attr = TRUE ) }) test_that("data.frame w/ rvar_col bayesfactors", { # skip_on_ci() skip_on_cran() skip_if_not_installed("posterior") skip_if_not_installed("logspline") dfx <- data.frame(mu = c(0, 0.5, 1), sigma = c(1, 0.5, 0.25)) dfx$my_rvar <- posterior::rvar_rng(rnorm, 3, mean = dfx$mu, sd = dfx$sigma) dfx$other_rvar <- posterior::rvar_rng(rnorm, 3, mean = dfx$mu + 0.5, sd = dfx$sigma - 0.1) dfx ## SIs res <- si(dfx, rvar_col = "my_rvar", prior = "other_rvar", verbose = FALSE) res.ref <- si(dfx$my_rvar, prior = dfx$other_rvar, verbose = FALSE) expect_true(all(c("mu", "sigma") %in% colnames(res))) expect_identical(nrow(format(res)), 3L) expect_identical(ncol(format(res)), 3L) expect_equal(res[setdiff(colnames(res), c("mu", "sigma"))], res.ref[setdiff(colnames(res.ref), "Parameter")], ignore_attr = TRUE ) res <- si(dfx, rvar_col = "my_rvar", prior = "other_rvar", BF = c(1, 3), verbose = FALSE ) res.ref <- si(dfx$my_rvar, prior = dfx$other_rvar, BF = c(1, 3), verbose = FALSE ) expect_true(all(c("mu", "sigma") %in% colnames(res))) expect_identical(nrow(format(res)), 3L) expect_identical(ncol(format(res)), 4L) expect_equal(format(res[setdiff(colnames(res), c("mu", "sigma"))]), format(res.ref[setdiff(colnames(res.ref), "Parameter")]), ignore_attr = TRUE ) ## bayesfactor_parameters res <- bayesfactor_parameters(dfx, rvar_col = "my_rvar", prior = "other_rvar", verbose = FALSE ) res.ref <- bayesfactor_parameters(dfx$my_rvar, prior = dfx$other_rvar, verbose = FALSE ) expect_equal(res[setdiff(colnames(res), c("mu", "sigma"))], res.ref[setdiff(colnames(res.ref), "Parameter")], ignore_attr = TRUE ) }) bayestestR/tests/testthat/test-rstanarm.R0000644000176200001440000001154215005147105020331 0ustar liggesuserstest_that("rstanarm", { skip_on_cran() skip_if_not_installed("curl") skip_if_offline() skip_if_not_installed("httr2") skip_if_not_or_load_if_installed("rstanarm") set.seed(333) model <- insight::download_model("stanreg_lm_1") expect_equal(rope_range(model, verbose = FALSE)[1], -0.602, tolerance = 0.1) model <- insight::download_model("stanreg_meanfield_lm_1") expect_equal(rope_range(model, verbose = FALSE)[1], -0.602, tolerance = 0.1) model <- insight::download_model("stanreg_fullrank_lm_1") expect_equal(rope_range(model, verbose = FALSE)[1], -0.602, tolerance = 0.1) model <- insight::download_model("stanreg_lmerMod_1") expect_equal(rope_range(model, verbose = FALSE)[1], -0.097, tolerance = 0.1) model <- insight::download_model("stanreg_glm_1") expect_equal(rope_range(model, verbose = FALSE)[1], -0.18, tolerance = 0.1) model <- insight::download_model("stanreg_merMod_1") expect_equal(rope_range(model, verbose = FALSE)[1], -0.18, tolerance = 0.1) model <- insight::download_model("stanreg_gamm4_1") expect_equal(rope_range(model, verbose = FALSE)[1], -0.043, tolerance = 0.1) model <- insight::download_model("stanreg_gam_1") invisible(capture.output( expect_warning(params <- describe_posterior(model, centrality = "all", test = "all", dispersion = TRUE )) )) expect_equal(c(nrow(params), ncol(params)), c(4, 22)) expect_s3_class(hdi(model), "data.frame") expect_s3_class(ci(model), "data.frame") expect_s3_class(rope(model, verbose = FALSE), "data.frame") expect_true(inherits(equivalence_test(model), "equivalence_test")) expect_s3_class(map_estimate(model), "data.frame") expect_s3_class(p_map(model), "data.frame") expect_s3_class(p_direction(model), "data.frame") expect_error(equivalence_test(model, range = c(0.1, 0.3, 0.5))) }) test_that("rstanarm", { skip_on_cran() skip_if_not_installed("curl") skip_if_offline() skip_if_not_installed("httr2") skip_if_not_or_load_if_installed("rstanarm") set.seed(333) model <- insight::download_model("stanreg_glm_3") out <- describe_posterior(model, effects = "all", component = "all", centrality = "mean") s <- summary(model) expect_identical(colnames(out), c( "Parameter", "Mean", "CI", "CI_low", "CI_high", "pd", "ROPE_CI", "ROPE_low", "ROPE_high", "ROPE_Percentage", "Rhat", "ESS" )) expect_equal(as.vector(s[1:4, 1, drop = TRUE]), out$Mean, tolerance = 1e-3) expect_equal(as.vector(s[1:4, 8, drop = TRUE]), out$Rhat, tolerance = 1e-1) }) test_that("rstanarm", { skip_on_cran() skip_if_not_installed("curl") skip_if_offline() skip_if_not_installed("httr2") skip_if_not_or_load_if_installed("rstanarm") set.seed(333) model <- insight::download_model("stanreg_merMod_3") out <- describe_posterior(model, effects = "all", component = "all", centrality = "mean") s <- summary(model) expect_named( out, c( "Parameter", "Effects", "Mean", "CI", "CI_low", "CI_high", "pd", "ROPE_CI", "ROPE_low", "ROPE_high", "ROPE_Percentage", "Rhat", "ESS" ) ) expect_equal(as.vector(s[c(1:4, 8), 1, drop = TRUE]), out$Mean, tolerance = 1e-3) expect_equal(as.vector(s[c(1:4, 8), 8, drop = TRUE]), out$Rhat, tolerance = 1e-1) out <- describe_posterior(model, effects = "full", component = "all", centrality = "mean") s <- summary(model) expect_equal(as.vector(s[1:8, 1, drop = TRUE]), out$Mean, tolerance = 1e-3) expect_equal(as.vector(s[1:8, 8, drop = TRUE]), out$Rhat, tolerance = 1e-1) }) test_that("rstanarm", { skip_on_cran() skip_if_not_installed("curl") skip_if_offline() skip_if_not_installed("httr2") skip_if_not_or_load_if_installed("rstanarm") set.seed(333) model <- insight::download_model("stanmvreg_1") out <- describe_posterior(model, effects = "fixed", component = "all", centrality = "mean", test = NULL) s <- summary(model) expect_named( out, c( "Parameter", "Response", "Mean", "CI", "CI_low", "CI_high", "Rhat", "ESS" ) ) expect_equal(as.vector(s[c(1:2, 5:7), 1, drop = TRUE]), out$Mean, tolerance = 1e-3) expect_equal(as.vector(s[c(1:2, 5:7), 10, drop = TRUE]), out$Rhat, tolerance = 1e-1) }) test_that("rstanarm", { skip_on_cran() skip_if_not_installed("curl") skip_if_offline() skip_if_not_installed("httr2") skip_if_not_or_load_if_installed("rstanarm") set.seed(333) model <- insight::download_model("stanmvreg_1") out <- describe_posterior( model, effects = "fixed", component = "all", centrality = "mean", test = NULL, priors = TRUE ) expect_identical(colnames(out), c( "Parameter", "Response", "Mean", "CI", "CI_low", "CI_high", "Rhat", "ESS", "Prior_Distribution", "Prior_Location", "Prior_Scale" )) expect_equal(nrow(out), 5) }) bayestestR/tests/testthat/test-simulate_data.R0000644000176200001440000000145414640231526021324 0ustar liggesusersskip_if_not_installed("MASS") test_that("simulate_correlation", { set.seed(333) data <- simulate_correlation(r = 0.5, n = 50) expect_equal(as.numeric(cor.test(data$V1, data$V2)$estimate), 0.5, tolerance = 0.001) data <- simulate_correlation(r = 0.5, n = 50, mean = c(0, 1), sd = c(0.7, 1.7)) expect_equal(as.numeric(cor.test(data$V1, data$V2)$estimate), 0.5, tolerance = 0.001) expect_equal(c(mean(data$V1), sd(data$V1)), c(0, 0.7), tolerance = 0.001) expect_equal(c(mean(data$V2), sd(data$V2)), c(1, 1.7), tolerance = 0.001) cor_matrix <- matrix( c( 1.0, 0.2, 0.4, 0.2, 1.0, 0.3, 0.4, 0.3, 1.0 ), nrow = 3 ) data <- simulate_correlation(r = cor_matrix) expect_equal(matrix(cor(data), nrow = 3), cor_matrix, tolerance = 0.001) }) bayestestR/tests/testthat/test-blavaan.R0000644000176200001440000000623215003416760020112 0ustar liggesuserstest_that("blavaan, all", { skip_on_cran() skip_if_not_or_load_if_installed("blavaan") skip_if_not_or_load_if_installed("lavaan") skip_if_not_or_load_if_installed("rstan") skip_if_not_or_load_if_installed("cmdstanr") skip_if_not(dir.exists(cmdstanr::cmdstan_default_install_path())) data("PoliticalDemocracy", package = "lavaan") model <- " # latent variable definitions dem60 =~ y1 + a*y2 dem65 =~ y5 + a*y6 # regressions dem65 ~ dem60 # residual correlations y1 ~~ y5 " model2 <- " # latent variable definitions dem60 =~ y1 + a*y2 dem65 =~ y5 + a*y6 # regressions dem65 ~ 0*dem60 # residual correlations y1 ~~ 0*y5 " suppressWarnings(capture.output({ bfit <- blavaan::bsem(model, data = PoliticalDemocracy, n.chains = 1, burnin = 50, sample = 100 ) bfit2 <- blavaan::bsem(model2, data = PoliticalDemocracy, n.chains = 1, burnin = 50, sample = 100 ) })) x <- point_estimate(bfit, centrality = "all", dispersion = TRUE) expect_true(all(c("Median", "MAD", "Mean", "SD", "MAP", "Component") %in% colnames(x))) expect_identical(nrow(x), 10L) x <- eti(bfit) expect_identical(nrow(x), 10L) x <- hdi(bfit) expect_identical(nrow(x), 10L) x <- p_direction(bfit) expect_identical(nrow(x), 10L) x <- rope(bfit, range = c(-0.1, 0.1)) expect_identical(nrow(x), 10L) x <- p_rope(bfit, range = c(-0.1, 0.1)) expect_identical(nrow(x), 10L) x <- p_map(bfit) expect_identical(nrow(x), 10L) x <- p_significance(bfit, threshold = c(-0.1, 0.1)) expect_identical(nrow(x), 10L) x <- equivalence_test(bfit, range = c(-0.1, 0.1)) expect_identical(nrow(x), 10L) x <- estimate_density(bfit) expect_length(unique(x$Parameter), 10) ## Bayes factors ---- # For these models, no BF available, see #627 expect_warning(bayesfactor_models(bfit, bfit2), regex = "Bayes factors might not be precise") ## FIXME: rror in `Yp[[p]]$SY + tcrossprod(Yp[[p]]$MY - Mu[var.idx])`: ## ! non-conformable arrays # bfit_prior <- unupdate(bfit) # capture.output(x <- expect_warning(bayesfactor_parameters(bfit, prior = bfit_prior))) # expect_identical(nrow(x), 10L) # x <- expect_warning(si(bfit, prior = bfit_prior)) # expect_identical(nrow(x), 10L) ## Prior/posterior checks ---- suppressWarnings(x <- check_prior(bfit)) expect_identical(nrow(x), 9L) ## FIXME: Error in `Yp[[p]]$SY + tcrossprod(Yp[[p]]$MY - Mu[var.idx])`: ## ! non-conformable arrays # x <- check_prior(bfit, simulate_priors = FALSE) # expect_identical(nrow(x), 10L) x <- diagnostic_posterior(bfit) expect_identical(nrow(x), 10L) ## FIXME: no longer 13, but now 9? x <- simulate_prior(bfit) expect_identical(ncol(x), 9L) # YES this is 13! We have two parameters with the same prior. ## FIXME: no longer 13, but now 9? x <- describe_prior(bfit) expect_identical(nrow(x), 9L) # YES this is 13! We have two parameters with the same prior. x <- describe_posterior(bfit, test = "all", rope_range = c(-0.1, 0.1)) expect_identical(nrow(x), 10L) }) bayestestR/tests/testthat/test-si.R0000644000176200001440000000356014542333405017123 0ustar liggesuserstest_that("si.numeric", { skip_if_not_installed("logspline") set.seed(333) prior <- distribution_normal(1000, mean = 0, sd = 1) posterior <- distribution_normal(1000, mean = 0.5, sd = 0.3) expect_warning( { res <- si(posterior, prior) }, regexp = "40" ) expect_equal(res$CI_low, 0.043, tolerance = 0.02) expect_equal(res$CI_high, 1.053103, tolerance = 0.02) expect_s3_class(res, "bayestestR_si") res <- si(posterior, prior, BF = 3, verbose = FALSE) expect_equal(res$CI_low, 0.35, tolerance = 0.02) expect_equal(res$CI_high, 0.759, tolerance = 0.02) res <- si(posterior, prior, BF = 100, verbose = FALSE) expect_true(all(is.na(res$CI_low))) expect_true(all(is.na(res$CI_high))) res <- si(posterior, prior, BF = c(1 / 3, 1, 3), verbose = FALSE) expect_equal(res$CI, c(1 / 3, 1, 3), tolerance = 0.02) expect_equal(res$CI_low, c(-0.1277, 0.0426, 0.3549), tolerance = 0.02) expect_equal(res$CI_high, c(1.213, 1.053, 0.759), tolerance = 0.02) }) test_that("si.rstanarm", { skip_on_cran() skip_if_not_installed("rstanarm") data(sleep) contrasts(sleep$group) <- contr.equalprior_pairs # See vignette stan_model <- suppressWarnings(rstanarm::stan_glmer(extra ~ group + (1 | ID), data = sleep, refresh = 0)) set.seed(333) stan_model_p <- update(stan_model, prior_PD = TRUE) res1 <- si(stan_model, stan_model_p, verbose = FALSE) set.seed(333) res2 <- si(stan_model, verbose = FALSE) expect_s3_class(res1, "bayestestR_si") expect_equal(res1, res2, ignore_attr = TRUE) skip_if_not_installed("emmeans") set.seed(123) group_diff <- suppressWarnings(pairs(emmeans::emmeans(stan_model, ~group))) res3 <- si(group_diff, prior = stan_model, verbose = FALSE) expect_equal(res3$CI_low, -2.746, tolerance = 0.3) expect_equal(res3$CI_high, -0.4, tolerance = 0.3) }) bayestestR/tests/testthat/test-overlap.R0000644000176200001440000000045214542333405020155 0ustar liggesuserstest_that("overlap", { set.seed(333) x <- distribution_normal(1000, 2, 0.5) y <- distribution_normal(1000, 0, 1) expect_equal(as.numeric(overlap(x, y)), 0.185, tolerance = 0.01) out <- capture.output(print(overlap(x, y))) expect_identical(out, c("# Overlap", "", "18.6%")) }) bayestestR/tests/testthat/test-describe_posterior.R0000644000176200001440000004574315054263473022415 0ustar liggesuserstest_that("describe_posterior", { skip_if(getRversion() < "4.2") skip_if_not_installed("curl") skip_if_offline() skip_if_not_installed("httr2") skip_if_not_or_load_if_installed("rstanarm") skip_if_not_or_load_if_installed("brms") skip_if_not_or_load_if_installed("BayesFactor") skip_on_os("linux") set.seed(333) # numeric ------------------------------------------------- x <- distribution_normal(4000) expect_silent(describe_posterior( x, centrality = "all", dispersion = TRUE, test = "all", ci = 0.89, verbose = FALSE )) rez <- as.data.frame(suppressWarnings(describe_posterior( x, centrality = "all", dispersion = TRUE, test = "all", ci = 0.89 ))) expect_identical(dim(rez), c(1L, 19L)) expect_identical(colnames(rez), c( "Parameter", "Median", "MAD", "Mean", "SD", "MAP", "CI", "CI_low", "CI_high", "p_MAP", "pd", "p_ROPE", "ps", "ROPE_CI", "ROPE_low", "ROPE_high", "ROPE_Percentage", "ROPE_Equivalence", "log_BF" )) expect_warning(expect_warning(expect_warning(describe_posterior( x, centrality = "all", dispersion = TRUE, test = "all", ci = c(0.8, 0.9) ), regex = "ROPE range"), regex = "Prior not specified"), regex = "not be precise") rez <- suppressWarnings(describe_posterior( x, centrality = "all", dispersion = TRUE, test = "all", ci = c(0.8, 0.9) )) expect_identical(dim(rez), c(2L, 19L)) rez <- describe_posterior( x, centrality = NULL, dispersion = TRUE, test = NULL, ci_method = "quantile", verbose = FALSE ) expect_identical(dim(rez), c(1L, 4L)) # dataframes ------------------------------------------------- x <- data.frame(replicate(4, rnorm(100))) expect_warning(expect_warning( describe_posterior( x, centrality = "all", dispersion = TRUE, test = "all" ) )) rez <- suppressWarnings(describe_posterior(x, centrality = "all", dispersion = TRUE, test = "all")) expect_identical(dim(rez), c(4L, 19L)) expect_warning(expect_warning( describe_posterior( x, centrality = "all", dispersion = TRUE, test = "all", ci = c(0.8, 0.9) ) )) rez <- suppressWarnings(describe_posterior( x, centrality = "all", dispersion = TRUE, test = "all", ci = c(0.8, 0.9) )) expect_identical(dim(rez), c(8L, 19L)) rez <- describe_posterior( x, centrality = NULL, dispersion = TRUE, test = NULL, ci_method = "quantile" ) expect_identical(dim(rez), c(4L, 4L)) }) test_that("describe_posterior", { skip_on_os(c("mac", "linux")) skip_if_not_installed("curl") skip_if_offline() skip_if_not_installed("httr2") skip_if_not_or_load_if_installed("rstanarm") skip_if_not_or_load_if_installed("brms") skip_if_not_or_load_if_installed("BayesFactor") set.seed(333) # Rstanarm x <- rstanarm::stan_glm(mpg ~ wt, data = mtcars, refresh = 0, iter = 500) expect_warning( { rez <- describe_posterior(x, centrality = "all", dispersion = TRUE, test = "all") }, regex = "not be precise" ) expect_identical(dim(rez), c(2L, 21L)) expect_identical(colnames(rez), c( "Parameter", "Median", "MAD", "Mean", "SD", "MAP", "CI", "CI_low", "CI_high", "p_MAP", "pd", "p_ROPE", "ps", "ROPE_CI", "ROPE_low", "ROPE_high", "ROPE_Percentage", "ROPE_Equivalence", "log_BF", "Rhat", "ESS" )) expect_warning( { rez <- describe_posterior( x, centrality = "all", dispersion = TRUE, test = "all", ci = c(0.8, 0.9) ) }, regex = "not be precise" ) expect_identical(dim(rez), c(4L, 21L)) # allow multiple ropes rez <- describe_posterior(x, rope_range = list(c(-1, 1), "default")) expect_identical(rez$ROPE_low, c(-1, -0.1), tolerance = 1e-3) expect_identical(rez$ROPE_high, c(1, 0.1), tolerance = 1e-3) expect_error( describe_posterior(x, rope_range = list(1, "default")), regex = "should be 'default'" ) expect_error( describe_posterior(x, rope_range = list(c(1, 1), c(2, 2), c(2, 3))), regex = "Length of" ) rez <- describe_posterior( x, centrality = NULL, dispersion = TRUE, test = NULL, ci_method = "quantile", diagnostic = NULL, priors = FALSE ) expect_identical(dim(rez), c(2L, 4L)) # brms ------------------------------------------------- skip_on_os("windows") x <- suppressWarnings(brms::brm(mpg ~ wt + (1 | cyl) + (1 + wt | gear), data = mtcars, refresh = 0)) rez <- describe_posterior(x, centrality = "all", dispersion = TRUE, ci = c(0.8, 0.9)) expect_identical(dim(rez), c(4L, 16L)) expect_identical(colnames(rez), c( "Parameter", "Median", "MAD", "Mean", "SD", "MAP", "CI", "CI_low", "CI_high", "pd", "ROPE_CI", "ROPE_low", "ROPE_high", "ROPE_Percentage", "Rhat", "ESS" )) rez <- describe_posterior( x, centrality = NULL, dispersion = TRUE, test = NULL, ci_method = "quantile", diagnostic = NULL ) expect_identical(dim(rez), c(2L, 4L)) model <- suppressWarnings(brms::brm( mpg ~ drat, data = mtcars, chains = 2, algorithm = "meanfield", refresh = 0 )) expect_identical(nrow(describe_posterior(model)), 2L) # rstanarm ------------------------------------------------- model <- rstanarm::stan_glm(mpg ~ drat, data = mtcars, algorithm = "meanfield", refresh = 0 ) expect_identical(nrow(describe_posterior(model)), 2L) model <- suppressWarnings(rstanarm::stan_glm(mpg ~ drat, data = mtcars, algorithm = "optimizing", refresh = 0 )) expect_identical(nrow(describe_posterior(model)), 2L) model <- rstanarm::stan_glm(mpg ~ drat, data = mtcars, algorithm = "fullrank", refresh = 0 ) expect_identical(nrow(describe_posterior(model)), 2L) ## FIXME: always fails on CI # model <- brms::brm(mpg ~ drat, data = mtcars, chains = 2, algorithm = "fullrank", refresh = 0) # expect_equal(nrow(describe_posterior(model)), 2L) # BayesFactor x <- BayesFactor::ttestBF(x = rnorm(100, 1, 1)) rez <- describe_posterior(x, centrality = "all", dispersion = TRUE, test = "all") expect_identical(dim(rez), c(1L, 23L)) rez <- describe_posterior(x, centrality = "all", dispersion = TRUE, test = "all", ci = c(0.8, 0.9)) expect_identical(dim(rez), c(2L, 23L)) rez <- describe_posterior(x, centrality = NULL, dispersion = TRUE, test = NULL, ci_method = "quantile") expect_identical(dim(rez), c(1L, 7L)) }) test_that("describe_posterior", { skip_if_not_installed("curl") skip_if_offline() skip_if_not_installed("httr2") skip_if_not_or_load_if_installed("rstanarm") skip_if_not_or_load_if_installed("brms") skip_if_not_or_load_if_installed("BayesFactor") m <- insight::download_model("stanreg_merMod_5") p <- insight::get_parameters(m, effects = "all") expect_equal( describe_posterior(m, effects = "all", verbose = FALSE)$Median, describe_posterior(p, verbose = FALSE)$Median, tolerance = 1e-3 ) }) test_that("describe_posterior", { skip_if_not_installed("curl") skip_if_offline() skip_if_not_installed("httr2") skip_if_not_or_load_if_installed("rstanarm") skip_if_not_or_load_if_installed("brms") skip_if_not_or_load_if_installed("BayesFactor") m <- insight::download_model("brms_zi_3") p <- insight::get_parameters(m, effects = "all", component = "all") expect_equal( suppressWarnings(describe_posterior(m, effects = "all", component = "all", verbose = FALSE)$Median), suppressWarnings(describe_posterior(p, verbose = FALSE)$Median), tolerance = 1e-3 ) }) test_that("describe_posterior w/ BF+SI", { skip_on_cran() skip_if_not_installed("curl") skip_if_offline() skip_if_not_installed("httr2") skip_if_not_or_load_if_installed("rstanarm") skip_if_not_or_load_if_installed("brms") skip_if_not_or_load_if_installed("BayesFactor") x <- insight::download_model("stanreg_lm_1") set.seed(555) expect_warning(expect_warning({ rez <- describe_posterior(x, ci_method = "SI", test = "bf") })) # test si set.seed(555) suppressMessages( expect_warning( { rez_si <- si(x) }, regex = "not be precise" ) ) expect_equal(rez$CI_low, rez_si$CI_low, tolerance = 0.1) expect_equal(rez$CI_high, rez_si$CI_high, tolerance = 0.1) # test BF set.seed(555) rez_bf <- suppressWarnings(bayesfactor_parameters(x, verbose = FALSE)) expect_equal(rez$log_BF, log(as.numeric(rez_bf)), tolerance = 0.1) }) # BayesFactor ------------------------------------------------- test_that("describe_posterior: BayesFactor", { skip_if_not(getRversion() >= "4.0", "Don't run with R < 4.0") skip_if_not_installed("curl") skip_if_offline() skip_if_not_installed("httr2") skip_if_not_or_load_if_installed("rstanarm") skip_if_not_or_load_if_installed("brms") skip_if_not_or_load_if_installed("BayesFactor") set.seed(123) expect_equal( as.data.frame(describe_posterior(correlationBF( mtcars$wt, mtcars$mpg, rscale = 0.5 ))), structure( list( Parameter = "rho", Median = -0.833281858269296, CI = 0.95, CI_low = -0.919418102114416, CI_high = -0.715602277241063, pd = 1, ROPE_CI = 0.95, ROPE_low = -0.05, ROPE_high = 0.05, ROPE_Percentage = 0, log_BF = 17.328704623688, BF = 33555274.5519413, Prior_Distribution = "beta", Prior_Location = 2, Prior_Scale = 2 ), row.names = 1L, class = "data.frame", ci_method = "hdi" ), tolerance = 0.1, ignore_attr = TRUE ) set.seed(123) expect_equal( describe_posterior(ttestBF(mtcars$wt, mu = 3), ci = 0.95, ci_method = "hdi"), structure( list( Parameter = "Difference", Median = 0.192275922178887, CI = 0.95, CI_low = -0.172955539648102, CI_high = 0.526426796879103, pd = 0.85875, ROPE_CI = 0.95, ROPE_low = -0.0978457442989697, ROPE_high = 0.0978457442989697, ROPE_Percentage = 0.257300710339384, log_BF = -0.94971351422473, BF = 0.386851835128661, Prior_Distribution = "cauchy", Prior_Location = 0, Prior_Scale = 0.707106781186548 ), row.names = 1L, class = c("describe_posterior", "see_describe_posterior", "data.frame"), ci_method = "hdi", object_name = "ttestBF(mtcars$wt, mu = 3)" ), tolerance = 0.1, ignore_attr = TRUE ) set.seed(123) expect_warning(expect_equal( describe_posterior( contingencyTableBF( x = table(mtcars$am, mtcars$cyl), sampleType = "poisson" ), ci = 0.95, ci_method = "hdi" ), structure( list( Parameter = c( "cell[1,1]", "cell[2,1]", "cell[1,2]", "cell[2,2]", "cell[1,3]", "cell[2,3]", "Ratio" ), Median = c( 3.04620767622137, 7.33170140780154, 3.96252503900368, 3.06206636495483, 10.7088156207511, 2.26008072419983, NA ), CI = c( 0.95, 0.95, 0.95, 0.95, 0.95, 0.95, NA ), CI_low = c( 0.537476720942068, 3.33553818106395, 1.05013765177975, 0.746538992318074, 5.49894434136364, 0.275642629940081, NA ), CI_high = c( 6.62852027141624, 12.6753970192515, 7.74693313388489, 6.87239730676778, 16.9198964674968, 5.4533083861175, NA ), pd = c(1, 1, 1, 1, 1, 1, NA), ROPE_CI = c( 0.95, 0.95, 0.95, 0.95, 0.95, 0.95, NA ), ROPE_low = c( -0.1, -0.1, -0.1, -0.1, -0.1, -0.1, NA ), ROPE_high = c( 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, NA ), ROPE_Percentage = c(0, 0, 0, 0, 0, 0, NA), log_BF = c( 3.84187678153378, 3.84187678153378, 3.84187678153378, 3.84187678153378, 3.84187678153378, 3.84187678153378, NA ), BF = c( 46.6128745808996, 46.6128745808996, 46.6128745808996, 46.6128745808996, 46.6128745808996, 46.6128745808996, NA ), Prior_Distribution = c(NA, NA, NA, NA, NA, NA, "poisson"), Prior_Location = c(NA, NA, NA, NA, NA, NA, 0), Prior_Scale = c( NA, NA, NA, NA, NA, NA, 1 ) ), row.names = c( 1L, 4L, 2L, 5L, 3L, 6L, 7L ), class = c("describe_posterior", "see_describe_posterior") ), tolerance = 0.1, ignore_attr = TRUE )) set.seed(123) expect_warning(expect_equal( describe_posterior( contingencyTableBF( x = table(mtcars$am, mtcars$cyl), sampleType = "indepMulti", fixedMargin = "cols", priorConcentration = 1.6 ), ci = 0.95 ), structure( list( Parameter = c( "cell[1,1]", "cell[2,1]", "cell[1,2]", "cell[2,2]", "cell[1,3]", "cell[2,3]", "Ratio" ), Median = c( 3.33359102240953, 7.27094924961528, 4.13335763121549, 3.36172537199681, 10.3872621523407, 2.56061336771352, NA ), CI = c( 0.95, 0.95, 0.95, 0.95, 0.95, 0.95, NA ), CI_low = c( 0.912122089726423, 3.51744611674693, 1.39218072401004, 0.923175932880601, 6.18021898129278, 0.465587711080369, NA ), CI_high = c( 6.61128887457661, 11.4058892728414, 7.61378018576518, 6.65522159416386, 15.1209075845299, 5.35853420162441, NA ), pd = c(1, 1, 1, 1, 1, 1, NA), ROPE_CI = c( 0.95, 0.95, 0.95, 0.95, 0.95, 0.95, NA ), ROPE_low = c( -0.1, -0.1, -0.1, -0.1, -0.1, -0.1, NA ), ROPE_high = c( 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, NA ), ROPE_Percentage = c(0, 0, 0, 0, 0, 0, NA), log_BF = c( 2.49338780738881, 2.49338780738881, 2.49338780738881, 2.49338780738881, 2.49338780738881, 2.49338780738881, NA ), BF = c( 12.1022066941064, 12.1022066941064, 12.1022066941064, 12.1022066941064, 12.1022066941064, 12.1022066941064, NA ), Prior_Distribution = c(NA, NA, NA, NA, NA, NA, "independent multinomial"), Prior_Location = c(NA, NA, NA, NA, NA, NA, 0), Prior_Scale = c( NA, NA, NA, NA, NA, NA, 1.6 ) ), row.names = c( 1L, 4L, 2L, 5L, 3L, 6L, 7L ), class = c( "describe_posterior", "see_describe_posterior", "data.frame" ), ci_method = "hdi", object_name = "contingencyTableBF(x = table(mtcars$am, mtcars$cyl), sampleType = \"indepMulti\", fixedMargin = \"cols\", priorConcentration = 1.6)" ), tolerance = 0.1, ignore_attr = TRUE )) skip_on_os("linux") set.seed(123) expect_equal( describe_posterior(anovaBF(extra ~ group, data = sleep, progress = FALSE), ci_method = "hdi", ci = 0.95), structure( list( Parameter = c( "mu", "group-1", "group-2", "sig2", "g_group" ), Median = c( 1.53667371296145, -0.571674439385088, 0.571674439385088, 3.69268743002151, 0.349038661644431 ), CI = c( 0.95, 0.95, 0.95, 0.95, 0.95 ), CI_low = c( 0.691696017646264, -1.31604531656452, -0.229408603643392, 1.75779899540302, 0.0192738130412634 ), CI_high = c( 2.43317955922589, 0.229408603643392, 1.31604531656452, 6.88471056133351, 5.30402785651874 ), pd = c(0.99975, 0.927, 0.927, 1, 1), ROPE_CI = c( 0.95, 0.95, 0.95, 0.95, 0.95 ), ROPE_low = c( -0.201791972090071, -0.201791972090071, -0.201791972090071, -0.201791972090071, -0.201791972090071 ), ROPE_high = c( 0.201791972090071, 0.201791972090071, 0.201791972090071, 0.201791972090071, 0.201791972090071 ), ROPE_Percentage = c( 0, 0.162325703762168, 0.162325703762168, 0, 0.346487766377269 ), log_BF = c( 0.235803198474248, 0.235803198474248, 0.235803198474248, 0.235803198474248, 0.235803198474248 ), BF = c( 1.26592514964916, 1.26592514964916, 1.26592514964916, 1.26592514964916, 1.26592514964916 ), Prior_Distribution = c(NA, "cauchy", "cauchy", NA, NA), Prior_Location = c(NA, 0, 0, NA, NA), Prior_Scale = c( NA, 0.5, 0.5, NA, NA ) ), row.names = c(4L, 2L, 3L, 5L, 1L), class = c( "describe_posterior", "see_describe_posterior", "data.frame" ), ci_method = "hdi", object_name = "anovaBF(extra ~ group, data = sleep, progress = FALSE)" ), tolerance = 0.1, ignore_attr = TRUE ) }) test_that("describe_posterior: response column for marginaleffects", { skip_if_not(getRversion() >= "4.0", "Don't run with R < 4.0") skip_if_not_installed("marginaleffects", minimum_version = "0.29.0") skip_if_not_installed("curl") skip_if_offline() skip_if_not_installed("httr2") skip_if_not_installed("brms") m <- insight::download_model("brms_categorical_1_num") skip_if(is.null(m)) out2 <- marginaleffects::avg_predictions(m, variables = "mpg") post <- describe_posterior(out2) expect_named( post, c( "mpg", "group", "Median", "CI", "CI_low", "CI_high", "pd", "ROPE_CI", "ROPE_low", "ROPE_high", "ROPE_Percentage" ) ) expect_identical( post$group, c("3", "3", "3", "3", "3", "4", "4", "4", "4", "4", "5", "5", "5", "5", "5") ) }) bayestestR/tests/testthat.R0000644000176200001440000000010414542333405015522 0ustar liggesuserslibrary(testthat) library(bayestestR) test_check("bayestestR") bayestestR/MD50000644000176200001440000002523615054341320012714 0ustar liggesusers02bd8dc163e942d12d4476750dadfe2a *DESCRIPTION ae72c6c5d54986fbbada4059c9582ef3 *NAMESPACE c36dfded1d1bf68d190fab1bde6b87b1 *NEWS.md abba91dd712374c43b79205e4948a8b1 *R/append_datagrid.R 0604665d2ad6f0c6a2f0307b6993f2c6 *R/area_under_curve.R a990bddd4c54e18ca2bb3b5cf954cbee *R/as.list.R 2bc49e776e7ba4cc4b1451ee68ca7b9a *R/bayesfactor.R b7a20ce76816408b4e57f95cda344556 *R/bayesfactor_inclusion.R 2254fba876fc1553770c1e4e5d9a1c06 *R/bayesfactor_models.R 07c295c7683e3e37bf76214fe58fd3bc *R/bayesfactor_parameters.R 03288781d664a4a1fcd78b75ebfe579c *R/bayesfactor_restricted.R 89fb8182fcba929c2bf1034641601fcc *R/bayestestR-package.R 3ca5ed16d53896605ee23eec387fdb8e *R/bci.R bbb2088ef0c7c5672462636f122d4e11 *R/bic_to_bf.R 43616ef31ac8e512d66175b5a0d0e65f *R/check_prior.R ffb7a7c7be297707dc6c056609fb54b7 *R/ci.R 1e7184d4d5b95d2a4430b8ba4cc092b0 *R/contr.equalprior.R 8fd49be9f215e98477326851e5d404e8 *R/convert_bayesian_to_frequentist.R 65cc8eb91d4971fec04f7b762bbb9d42 *R/convert_pd_to_p.R e87daed2f8a248c6d092f14fc1b1ce6e *R/datasets.R a2d06b02ae80e64f1abfaab99ded24b8 *R/describe_posterior.R 326d0d000f4eacfcffc5e599c248cb42 *R/describe_prior.R 09c09d21dea86f52bb652d54a47e19f9 *R/diagnostic_draws.R d4f057d3311cd55435552bb696917018 *R/diagnostic_posterior.R bfbff3b692b2ecf166ad10e43984a31f *R/display.R 77185016c05cc7a27dcca577c0e94fa1 *R/distribution.R 28cca65f33b773d100ff950ec34e8bb7 *R/effective_sample.R bc7ff3d5bfeeddae8d3f26b5ce85bd3c *R/equivalence_test.R 6c8357abcea7966126cbdeec521f3254 *R/estimate_density.R 9ecf113fb3818c469f1430767dd098c3 *R/eti.R 669c2505909aa8f52615b54d545c4574 *R/format.R 115857f76d0021c96e2b476ab3737ca1 *R/hdi.R 142973cf322b691ca66ae2e6e73a461c *R/is_baysian_grid.R 14fb9816061b7b42993700a416f127ff *R/map_estimate.R f3adb36c398878a72152de0a29a45bdd *R/mcse.R 964a289da08364a42d0fc21e1a2c2f07 *R/mediation.R 6b0cf45761ef3da66bb673fc19088a9e *R/model_to_priors.R 6f32de81368af70989efd003036bb653 *R/overlap.R 10c3c65137ccf0e82c19e62af546550f *R/p_direction.R 5c7307024a1dfd6667e2f8bcf6714822 *R/p_map.R e3d5632576ffd9c047b7a30f72e187ed *R/p_rope.R e2c94e96019f91cc1360a55c3d8f954d *R/p_significance.R 573b9a44e5e1caf234a703536cca20a3 *R/p_to_bf.R 1ba410a92cded6f84210c41c6f77dff2 *R/plot.R 52ba3e74c2f67f3a7cbe61ff4e1ad06b *R/point_estimate.R 2676b67feebc4b0aea26b2b9d22f7dbf *R/print.R a344d1b115e162c5529517c57e4de7ec *R/print.bayesfactor_models.R 947fdd5639d07779594c9fc3ed4db59e *R/print.equivalence_test.R 8c9543c2f2a2e385f6f819c2afbb618a *R/print.rope.R c2382843cc2971242e44fe8985735de9 *R/print_html.R ecff71dd1cacfaa013825712e960895f *R/print_md.R 830219c70153eccb1158f6d24283a100 *R/reexports.R b18d7f1c872653866dd87887788e3730 *R/reshape_iterations.R 1157e42f3e8bb7f0e16b3e6626259ab1 *R/rope.R 1648d3359acd7c6068b09c38ba438219 *R/rope_range.R 09ed6ff00bc3589a6ae5d64c59f103b8 *R/sensitivity_to_prior.R b10bbbb0c48408a9667597a2ead955fc *R/sexit.R fa40dedef3d2ef6c67c01df6200c6ab6 *R/sexit_thresholds.R 471e70c0353b176153e3166f48cd247d *R/si.R c971f7f875161259a20c83aed50fd453 *R/simulate_data.R ac41bee408d8b01db844defd0b592dd5 *R/simulate_priors.R fcb6577d3cf2712abe915d5a3271b842 *R/simulate_simpson.R 571120321bb28aa9ed7f21ed60b53769 *R/spi.R 428cc26ed56d7abd7accde35e361f5cd *R/unupdate.R b9918b49f34632dcf4af4fee900c6409 *R/utils.R 4821e2e530e05dbd1ee6256f15c58efe *R/utils_bayesfactor.R dc05f6b835fc003670256f8bc911fd8f *R/utils_check_collinearity.R 76350b630a3dd2d4bfeff7b91a7a2114 *R/utils_clean_stan_parameters.R 452e47759b5530d115101132cb17718b *R/utils_hdi_ci.R 506839f138e42454411368529c153561 *R/utils_posterior.R a14aa93c5a25aa65bfb66f857a19ae6f *R/utils_print_data_frame.R 094433871086e468dc645b1c60289fac *R/weighted_posteriors.R 4533f523d6cb92676f1d7912e088e29b *R/zzz.R 5ad4e102058a48f92de2a0802e27f810 *README.md 9d40159f50a5fc9949cbfe95a2884126 *build/partial.rdb c33a9544b3ac78c07d5344363641c23e *build/vignette.rds 0ff3ea913147c5a1b14eb94d50333b98 *data/disgust.rdata c5cfd3e44877e6f8487a7d57c28dd0e2 *inst/CITATION 9d02fbd79bedbc645d765538ab6d0039 *inst/WORDLIST 7342a6d63f4cc4501afc55be1d148740 *inst/doc/overview_of_vignettes.R d3047f8dd544e4791a13e4ede781199f *inst/doc/overview_of_vignettes.Rmd 0fc7c30724becb44d7363bfce3a337ad *inst/doc/overview_of_vignettes.html 261ba655620dfbd3001fa10238ff6d0e *man/area_under_curve.Rd 6860290cbdd452ec9f23f98ddf68fb99 *man/as.data.frame.density.Rd 3d348eff3f4bc590080a8cd696304d75 *man/as.numeric.p_direction.Rd 05ec8b7f8bd3c8e6963c58f1ccd29691 *man/bayesfactor.Rd e010167beecd044793d2c5d743e2c47f *man/bayesfactor_inclusion.Rd 6eb4dec7e7d3548c27d76c194cc298f8 *man/bayesfactor_models.Rd a6439fd04476146fd4b284bae1c46ae3 *man/bayesfactor_parameters.Rd e02198429ea11e6fdaf41062bdadd0ad *man/bayesfactor_restricted.Rd 33674c88ef04fdac6d0902ee74b5aedf *man/bayestestR-package.Rd 1d78b1e177f548c625f73c3700d60cd2 *man/bci.Rd 0be80726d814018e2b8a86480ff4c64f *man/bic_to_bf.Rd 811f670a82638d62aa4c202bb520bdfc *man/check_prior.Rd 01ece6479fa2a2327e7d86498499dd41 *man/ci.Rd ade20e470426dcac15633c0337c8aea4 *man/contr.equalprior.Rd 5a4a4b98942ff65245147f8f2b8b2d25 *man/convert_bayesian_as_frequentist.Rd 3b8a829f3b094fa97dccb2f654445209 *man/density_at.Rd c1dcc0d41fac2102ef68a8ebfb2f9918 *man/describe_posterior.Rd ad60537015e5f50ccfdf9327e73939ad *man/describe_prior.Rd 9b3370931892db9cb0c9d0c72a983fe5 *man/diagnostic_draws.Rd 0073c2ac9d64c012b2a2741974ddd7c0 *man/diagnostic_posterior.Rd 933a334f0afcb213569e4ad5d3446e6e *man/disgust.Rd 479bee1951f6a057dbbfaa30d2499b53 *man/display.describe_posterior.Rd 6ad07ee746cbaba4b33e38f1b0b64ee3 *man/distribution.Rd 0b1d93b59d19425ddb3a0d40f38210c1 *man/dot-extract_priors_rstanarm.Rd e450b5ed09ce1a54bb53cf57a436a1a5 *man/dot-prior_new_location.Rd 1991efd66189082be157e0b5d706e148 *man/dot-select_nums.Rd 50955b285942b72913c0cda7a4b2d391 *man/effective_sample.Rd 5be54a25cef829f217795c92d3dd18f0 *man/equivalence_test.Rd 42d84c8e7a5d95fc762cf2a9608bd15c *man/estimate_density.Rd 90f1687a5c360e55c91d052d4e3537a7 *man/eti.Rd 27e0ea3ff40617aff2e5f74afd47970c *man/figures/logo.png 58174a4d44ba8c3229f77bfceed4c00e *man/figures/unnamed-chunk-10-1.png f867a333086307875208cda733058665 *man/figures/unnamed-chunk-12-1.png b3f74fa586e22763e847b80f65036630 *man/figures/unnamed-chunk-14-1.png ed6e47a85efb48ed25c1c52567622922 *man/figures/unnamed-chunk-16-1.png d04a23dfd315323f737f481d85a96d18 *man/figures/unnamed-chunk-7-1.png f0a5545041b0b19098bcc2e5879776c4 *man/figures/unnamed-chunk-8-1.png 72ef9144b9353d2ae958e0fa539d1165 *man/hdi.Rd c51d555eda041575e9cbe454ff642dd3 *man/map_estimate.Rd 7457b2f45a9edbc9b2d5603f9767543a *man/mcse.Rd d4e62d058af7fb4f8c100d5478a8f8fe *man/mediation.Rd 04325eac6de74b6fd291888e66cdfddd *man/model_to_priors.Rd 24a2f8e0c2e682c815c1884908edf1b4 *man/overlap.Rd 4b15509c1e4a0d8a36f13be648329ae8 *man/p_direction.Rd d179d8e139aad777fca2d067aa8a2a3c *man/p_map.Rd 7454b6683d7028150e5d94d30635601d *man/p_rope.Rd 4c7b7fa69812b809ac7f23875ef72138 *man/p_significance.Rd 3e828bec75a649c37ee6978a3ff49d91 *man/p_to_bf.Rd 8b0852b820074c1b636763cc0ad798e7 *man/pd_to_p.Rd 23caf8e99f98f85a052a6401d0c1a209 *man/point_estimate.Rd aaea683a58ad207efc4a82b7cca5fcdb *man/reexports.Rd f9baf506f3a47e5e259a7417091cbce2 *man/reshape_iterations.Rd 282528fa22bae85faa42df380fc2226b *man/rope.Rd 282e0185c674a5848eb054d2953e8d9d *man/rope_range.Rd 4a14fb0774cec032cbd6a13265c9dda6 *man/sensitivity_to_prior.Rd dac60eb2c7370097ecac4252da8dbc44 *man/sexit.Rd 88a10e6bed8b5ae44887dfaa551df89b *man/sexit_thresholds.Rd 44f74acc3cb522ca3d07c9c8ec43a385 *man/si.Rd 701f0ff083a19850d80fca995be49f9b *man/simulate_correlation.Rd 4417b147d85fd831d0e5a8aa7be5ef28 *man/simulate_prior.Rd 0f8c5891b884ffb58a8a1a525ded5829 *man/simulate_simpson.Rd 02962b45ea453072da90d21c8b3924e3 *man/spi.Rd 4767200807e55ea397bcbb466e3c40a7 *man/unupdate.Rd d3622083d781e8536a4a522f06da2e63 *man/weighted_posteriors.Rd ed019fb28c42d301a471042302b2215d *tests/testthat.R 0e84b6d82ae0c55225f7b5606bc6ab10 *tests/testthat/helper.R 77395e828ae6acde88f6ea2ca2f9b222 *tests/testthat/test-BFBayesFactor.R a9cbb2928bdc3e39c85d40383e12f538 *tests/testthat/test-as.data.frame.density.R bf35e06dfeaff8283f2ea51dfae73da6 *tests/testthat/test-bayesfactor_models.R 7b14d05694c2de7e3b92337ef688f747 *tests/testthat/test-bayesfactor_parameters.R b633fffd21c81733a92f41be827ca227 *tests/testthat/test-bayesfactor_restricted.R 103719ca3c022cfe6de1258124a001aa *tests/testthat/test-bayesian_as_frequentist.R 0fc8de8b650ebaad57d1b44d9134e1ce *tests/testthat/test-blavaan.R 867b6b17dd84bb076f7cd2a6092f0fa5 *tests/testthat/test-brms.R ce45b9cc1832cfd8b0ee9db71e094fb1 *tests/testthat/test-check_prior.R e7ce4a136ba3a5e3e19802ad5fefed00 *tests/testthat/test-ci.R 43dfdbc876dff66ea3914899c32f73c0 *tests/testthat/test-contr.R 0cf623ea068683b85c865cbeb60e46a1 *tests/testthat/test-data.frame-with-rvar.R 8cfbb3b3a84cc76ad6fac4e191b705a7 *tests/testthat/test-density_at.R 7982a334a624351a3793479110fee8c4 *tests/testthat/test-describe_posterior.R 9bc4de70fb0a2519092112cfabb87bb4 *tests/testthat/test-describe_prior.R db725a1034057c4cc62159b861fd88a1 *tests/testthat/test-different_models.R ed8c019fa0e88ef258102036899bf543 *tests/testthat/test-distributions.R 970659f4f08135ae9a0715000b383684 *tests/testthat/test-effective_sample.R 479eaa60235fcef08a0beb277b9f2616 *tests/testthat/test-emmGrid.R eedd6b294f489213fe0318772801b548 *tests/testthat/test-equivalence_test.R 72390c0791e5b44a7550bb9d5a06a677 *tests/testthat/test-estimate_density.R a13a9f515a42098194c484317fc682e5 *tests/testthat/test-format.R 87ec8528931521a99cdc613329dc103e *tests/testthat/test-hdi.R 6510bfd5cacef3fdb6244d11deff089d *tests/testthat/test-map_estimate.R ee600e75284969c9e20c4e9c7bb7f7b5 *tests/testthat/test-marginaleffects.R 7a8d3e0aff4d56f414f9adbc6f657275 *tests/testthat/test-overlap.R d4fa9fe6d52388d3e9de5f2219edf8e8 *tests/testthat/test-p_direction.R 23f5abe86c57b57759aea6e6bfb67fd4 *tests/testthat/test-p_map.R 5d7f121f28245b414edd320b64ad87c6 *tests/testthat/test-p_rope.R 6769988ee9935fdb9e0279da5ebfd208 *tests/testthat/test-p_significance.R 339b310dff63000e06b2f5a03836fb71 *tests/testthat/test-p_to_bf.R 7af7475726cb85b9af8c37003d69e88a *tests/testthat/test-pd_to_p.R b467985ca7568a4c3feee9a57cbba1ca *tests/testthat/test-point_estimate.R dd3a759e9294282d61adeab750c07bd4 *tests/testthat/test-posterior.R 5eaaea7589929d5388952a1c61d9e1c3 *tests/testthat/test-print.R 7f7b64b58961bbf2d53badf84e3a0379 *tests/testthat/test-rope.R 6bc4cc8671708ca3624ae49aae4e4650 *tests/testthat/test-rope_range.R 00d9e5ecc6d810ef3b36746483c9c544 *tests/testthat/test-rstanarm.R ff4c3c90dc4dce37fae9e8c3fef3787a *tests/testthat/test-si.R 97679c198087bafee22b280e9069032c *tests/testthat/test-simulate_data.R eded0048e363bbada93da3ea320cc643 *tests/testthat/test-spi.R 987d3efdbf307afc2c43d907f6a5ffef *tests/testthat/test-weighted_posteriors.R d3047f8dd544e4791a13e4ede781199f *vignettes/overview_of_vignettes.Rmd bayestestR/R/0000755000176200001440000000000015053406143012601 5ustar liggesusersbayestestR/R/format.R0000644000176200001440000002432615052670366014234 0ustar liggesusers#' @export format.describe_posterior <- function(x, cp = NULL, digits = 2, format = "text", ci_string = "CI", caption = NULL, subtitles = NULL, ...) { # reshape CI if (is.data.frame(x) && insight::n_unique(x$CI) > 1) { att <- attributes(x) x <- datawizard::reshape_ci(x) attributes(x) <- utils::modifyList(att, attributes(x)) } # validation check if (is.null(digits)) { digits <- 2 } # format columns and values of data frame out <- insight::format_table(x, digits = digits, format = format, ...) # different CI-types as column names? if (ci_string != "CI" && any(endsWith(colnames(out), "CI"))) { colnames(out) <- gsub("(.*)CI$", paste0("\\1", ci_string), colnames(out)) } # match and split at components if (!is.null(cp) && !all(is.na(match(cp$Parameter, out$Parameter)))) { out <- insight::print_parameters( cp, out, keep_parameter_column = FALSE, remove_empty_column = TRUE, titles = caption, subtitles = subtitles, format = format ) } else { attr(out, "table_caption") <- caption attr(out, "table_subtitle") <- subtitles } out } #' @export format.point_estimate <- format.describe_posterior #' @export format.p_rope <- format.describe_posterior #' @export format.p_direction <- format.describe_posterior #' @export format.p_map <- format.describe_posterior #' @export format.map_estimate <- format.describe_posterior #' @export format.p_significance <- format.describe_posterior #' @export format.bayestestR_hdi <- format.describe_posterior #' @export format.bayestestR_eti <- format.describe_posterior #' @export format.bayestestR_si <- format.describe_posterior #' @export format.equivalence_test <- format.describe_posterior #' @export format.rope <- format.describe_posterior # special handling for bayes factors ------------------ #' @export format.bayesfactor_models <- function(x, digits = 3, log = FALSE, show_names = TRUE, format = "text", caption = NULL, exact = TRUE, ...) { BFE <- x denominator <- attr(BFE, "denominator") grid.type <- attr(BFE, "BF_method") model_names <- attr(BFE, "model_names") formula_length <- attr(BFE, "text_length") BFE <- as.data.frame(BFE) BFE$log_BF <- as.numeric(x, log = log) BFE$BF <- insight::format_bf(abs(BFE$log_BF), name = NULL, exact = exact, ...) sgn <- sign(BFE$log_BF) if (any((sgn < 0)[!is.na(BFE$log_BF)])) { BFE$BF[sgn] <- paste0("-", BFE$BF[sgn]) } BFE$Model[BFE$Model == "1"] <- "(Intercept only)" # indicate null-model # shorten model formulas? if (!is.null(formula_length) && !is.null(BFE$Model)) { BFE$Model <- insight::format_string(BFE$Model, length = formula_length) } if (isFALSE(show_names) || is.null(model_names) || length(model_names) != nrow(BFE)) { BFE$i <- paste0("[", seq_len(nrow(BFE)), "]") } else { BFE$i <- paste0("[", model_names, "]") } # Denominator denM <- insight::trim_ws(paste0(BFE$i, " ", BFE$Model)[denominator]) BFE <- BFE[-denominator, ] BFE <- BFE[c("i", "Model", "BF")] colnames(BFE)[1] <- ifelse(identical(format, "html"), "Name", "") # footer if (is.null(format) || format == "text") { footer <- list( "\n* Against Denominator: ", c(denM, "cyan"), "\n* Bayes Factor Type: ", c(grid.type, "cyan"), if (log) c("\n\nBayes Factors are on the log-scale.", "red") ) # color formatting for caption if (!is.null(caption)) { caption <- c(caption, "blue") } } else { footer <- insight::compact_list(list( paste0("Against Denominator: ", denM), paste0("Bayes Factor Type: ", grid.type), if (log) "Bayes Factors are on the log-scale." )) } attr(BFE, "table_footer") <- footer attr(BFE, "table_caption") <- caption BFE } #' @export format.bayesfactor_inclusion <- function(x, digits = 3, log = FALSE, format = "text", caption = NULL, exact = TRUE, ...) { priorOdds <- attr(x, "priorOdds") matched <- attr(x, "matched") # format table BFE <- as.data.frame(x) BFE$log_BF <- as.numeric(x, log = log) BFE$BF <- insight::format_bf(abs(BFE$log_BF), name = NULL, exact = exact, ...) sgn <- sign(BFE$log_BF) if (any((sgn < 0)[!is.na(BFE$log_BF)])) { BFE$BF[sgn] <- paste0("-", BFE$BF[sgn]) } BFE <- BFE[c("p_prior", "p_posterior", "BF")] BFE <- cbind(rownames(BFE), BFE) colnames(BFE) <- c("", "P(prior)", "P(posterior)", "Inclusion BF") colnames(BFE)[1] <- ifelse(identical(format, "html"), "Parameter", "") # footer if (is.null(format) || format == "text") { footer <- list( "\n* Compared among: ", c(if (matched) "matched models only" else "all models", "cyan"), "\n* Priors odds: ", c(if (!is.null(priorOdds)) "custom" else "uniform-equal", "cyan"), if (log) c("\n\nBayes Factors are on the log-scale.", "red") ) # color formatting for caption if (!is.null(caption)) { caption <- c(caption, "blue") } } else { footer <- insight::compact_list(list( paste0("Compared among: ", if (matched) "matched models only" else "all models"), paste0("Priors odds: ", if (!is.null(priorOdds)) "custom" else "uniform-equal"), if (log) "Bayes Factors are on the log-scale." )) } attr(BFE, "table_footer") <- footer attr(BFE, "table_caption") <- caption BFE } #' @export format.bayesfactor_restricted <- function(x, digits = 3, log = FALSE, format = "text", caption = NULL, exact = TRUE, ...) { BFE <- as.data.frame(x) # Format BFE$log_BF <- as.numeric(x, log = log) BFE$BF <- insight::format_bf(abs(BFE$log_BF), name = NULL, exact = exact, ...) sgn <- sign(BFE$log_BF) if (any((sgn < 0)[!is.na(BFE$log_BF)])) { BFE$BF[sgn] <- paste0("-", BFE$BF[sgn]) } BFE$log_BF <- NULL colnames(BFE)[colnames(BFE) == "p_prior"] <- "P(Prior)" colnames(BFE)[colnames(BFE) == "p_posterior"] <- "P(Posterior)" # footer if (is.null(format) || format == "text") { footer <- list( "\n* Bayes factors for the restricted model vs. the un-restricted model.\n", if (log) c("\nBayes Factors are on the log-scale.\n", "red") ) # color formatting for caption if (!is.null(caption)) { caption <- c(caption, "blue") } } else { footer <- insight::compact_list(list( "Bayes factors for the restricted model vs. the un-restricted model.", if (log) "Bayes Factors are on the log-scale." )) } attr(BFE, "table_footer") <- footer attr(BFE, "table_caption") <- caption BFE } #' @export format.bayesfactor_parameters <- function(x, cp = NULL, digits = 3, log = FALSE, format = "text", exact = TRUE, ...) { null <- attr(x, "hypothesis") direction <- attr(x, "direction") x$log_BF <- as.numeric(x, log = log) x$BF_override <- insight::format_bf(abs(x$log_BF), name = NULL, exact = exact, ...) sgn <- sign(x$log_BF) if (any((sgn < 0)[!is.na(x$log_BF)])) { x$BF_override[sgn] <- paste0("-", x$BF_override[sgn]) } x$log_BF <- NULL # format columns and values of data frame out <- insight::format_table(x, digits = digits, format = format, ...) colnames(out)[colnames(out) == "BF_override"] <- "BF" # table caption caption <- sprintf( "Bayes Factor (%s)", if (length(null) == 1) "Savage-Dickey density ratio" else "Null-Interval" ) if (is.null(format) || format == "text") { caption <- c(caption, "blue") } # format null-value if (length(null) == 1) { null <- insight::format_value(null, digits = digits, protect_integers = TRUE) } else { null <- insight::format_ci(null[1], null[2], ci = NULL, digits = digits) } # footer if (is.null(format) || format == "text") { footer <- list( "\n* Evidence Against The Null: ", c(paste0(null, "\n"), "cyan"), if (direction) "* Direction: ", if (direction < 0) c("Left-Sided test", "cyan"), if (direction > 0) c("Right-Sided test", "cyan"), if (direction) "\n", if (log) c("\n\nBayes Factors are on the log-scale.\n", "red") ) } else { footer <- insight::compact_list(list( paste0("Evidence Against The Null: ", null), if (direction) "Direction: ", if (direction < 0) "Left-Sided test", if (direction > 0) "Right-Sided test", if (log) "Bayes Factors are on the log-scale." )) } # match and split at components if (!is.null(cp) && !all(is.na(match(cp$Parameter, out$Parameter)))) { out <- insight::print_parameters( cp, out, keep_parameter_column = FALSE, remove_empty_column = TRUE, format = format ) attr(out[[1]], "table_caption") <- caption attr(out[[length(out)]], "table_footer") <- footer } else { attr(out, "table_caption") <- caption attr(out, "table_footer") <- footer } out } bayestestR/R/diagnostic_draws.R0000644000176200001440000000323214706241121016245 0ustar liggesusers#' Diagnostic values for each iteration #' #' Returns the accumulated log-posterior, the average Metropolis acceptance rate, divergent transitions, treedepth rather than terminated its evolution normally. #' @inheritParams diagnostic_posterior #' #' @examples #' \donttest{ #' set.seed(333) #' #' if (require("brms", quietly = TRUE)) { #' model <- suppressWarnings(brm(mpg ~ wt * cyl * vs, #' data = mtcars, #' iter = 100, control = list(adapt_delta = 0.80), #' refresh = 0 #' )) #' diagnostic_draws(model) #' } #' } #' #' @export diagnostic_draws <- function(posterior, ...) { UseMethod("diagnostic_draws") } #' @export diagnostic_draws.brmsfit <- function(posterior, ...) { insight::check_if_installed("brms") nuts_parameters <- brms::nuts_params(posterior) nuts_parameters$idvar <- paste0( nuts_parameters$Chain, "_", nuts_parameters$Iteration ) out <- stats::reshape( nuts_parameters, v.names = "Value", idvar = "idvar", timevar = "Parameter", direction = "wide" ) out$idvar <- NULL out <- merge( out, brms::log_posterior(posterior), by = c("Chain", "Iteration"), sort = FALSE ) # Rename names(out)[names(out) == "Value.accept_stat__"] <- "Acceptance_Rate" names(out)[names(out) == "Value.treedepth__"] <- "Tree_Depth" names(out)[names(out) == "Value.stepsize__"] <- "Step_Size" names(out)[names(out) == "Value.divergent__"] <- "Divergent" names(out)[names(out) == "Value.n_leapfrog__"] <- "n_Leapfrog" names(out)[names(out) == "Value.energy__"] <- "Energy" names(out)[names(out) == "Value"] <- "LogPosterior" out } bayestestR/R/effective_sample.R0000644000176200001440000001646714765755711016264 0ustar liggesusers#' Effective Sample Size (ESS) #' #' Effective Sample Size (ESS) is a measure of how much independent information #' there is in autocorrelated chains. It is used to assess the quality of MCMC #' samples. A higher ESS indicates more reliable estimates. For most #' applications, an effective sample size greater than 1,000 is sufficient for #' stable estimates (Bürkner, 2017). This function returns the effective sample #' size (ESS) for various Bayesian model objects. For `brmsfit` objects, the #' returned ESS corresponds to the bulk-ESS (and the tail-ESS is also returned). #' #' @param model A `stanreg`, `stanfit`, `brmsfit`, `blavaan`, or `MCMCglmm` object. #' @param ... Currently not used. #' @inheritParams hdi #' #' @inheritSection hdi Model components #' #' @return A data frame with two columns: Parameter name and effective sample size (ESS). #' #' @details #' - **Effective Sample (ESS)** should be as large as possible, altough #' for most applications, an effective sample size greater than 1,000 is #' sufficient for stable estimates (Bürkner, 2017). The ESS corresponds to the #' number of independent samples with the same estimation power as the N #' autocorrelated samples. It is is a measure of \dQuote{how much independent #' information there is in autocorrelated chains} (*Kruschke 2015, p182-3*). #' #' - **Bulk-ESS** is useful as a diagnostic for the sampling efficiency in #' the bulk of the posterior. It is defined as the effective sample size for #' rank normalized values using split chains. It can be interpreted as the #' reliability of indices of central tendency (mean, median, etc.). #' #' - **Tail-ESS** is useful as a diagnostic for the sampling efficiency in #' the tails of the posterior. It is defined as the minimum of the effective #' sample sizes for 5% and 95% quantiles. It can be interpreted as the #' reliability of indices that depend on the tails of the distribution (e.g., #' credible intervals, tail probabilities, etc.). #' #' @references #' - Kruschke, J. (2014). Doing Bayesian data analysis: A tutorial with R, JAGS, #' and Stan. Academic Press. #' - Bürkner, P. C. (2017). brms: An R package for Bayesian multilevel models #' using Stan. Journal of Statistical Software, 80(1), 1-28 #' - Vehtari, A., Gelman, A., Simpson, D., Carpenter, B., & Bürkner, P.-C. #' (2021). Rank-normalization, folding, and localization: An improved R-hat #' for assessing convergence of MCMC. Bayesian Analysis, 16(2), 667-718. #' #' @examplesIf all(insight::check_if_installed(c("rstanarm", "brms", "posterior"), quietly = TRUE)) #' \donttest{ #' model <- suppressWarnings(rstanarm::stan_glm( #' mpg ~ wt + gear, #' data = mtcars, #' chains = 2, #' iter = 200, #' refresh = 0 #' )) #' effective_sample(model) #' #' model <- suppressWarnings(brms::brm( #' mpg ~ wt, #' data = mtcars, #' chains = 2, #' iter = 200, #' refresh = 0 #' )) #' effective_sample(model) #' } #' @export effective_sample <- function(model, ...) { UseMethod("effective_sample") } #' @export effective_sample.default <- function(model, ...) { insight::format_error( paste0( "'effective_sample()' is not yet implemented for objects of class '", class(model)[1], "'." ) ) } #' @rdname effective_sample #' @export effective_sample.brmsfit <- function(model, effects = "fixed", component = "conditional", parameters = NULL, ...) { pars <- insight::find_parameters( model, effects = effects, component = component, parameters = parameters, flatten = TRUE ) insight::check_if_installed("posterior") idx <- as.data.frame(posterior::summarise_draws(model)) rows_to_keep <- idx$variable %in% pars # ess_*() functions are defined in: # https://github.com/stan-dev/posterior/blob/master/R/convergence.R data.frame( Parameter = idx$variable[rows_to_keep], ESS = round(idx[rows_to_keep, "ess_bulk"]), ESS_tail = round(idx[rows_to_keep, "ess_tail"]), stringsAsFactors = FALSE, row.names = NULL ) } #' @export effective_sample.stanreg <- function(model, effects = "fixed", component = "location", parameters = NULL, ...) { effective_sample.brmsfit( model, effects = effects, component = component, parameters = parameters, ... ) } #' @export effective_sample.stanmvreg <- function(model, effects = "fixed", component = "location", parameters = NULL, ...) { pars <- insight::get_parameters( model, effects = effects, component = component, parameters = parameters ) insight::check_if_installed("posterior") idx <- as.data.frame(posterior::summarise_draws(model)) rows_to_keep <- idx$variable %in% colnames(pars) # ess_*() functions are defined in: # https://github.com/stan-dev/posterior/blob/master/R/convergence.R data.frame( Parameter = idx$variable[rows_to_keep], ESS = round(idx[rows_to_keep, "ess_bulk"]), ESS_tail = round(idx[rows_to_keep, "ess_tail"]), stringsAsFactors = FALSE, row.names = NULL ) } #' @export effective_sample.stanfit <- function(model, effects = "fixed", parameters = NULL, ...) { pars <- insight::get_parameters( model, effects = effects, parameters = parameters ) insight::check_if_installed("rstan") s <- as.data.frame(rstan::summary(model)$summary) s <- s[rownames(s) %in% colnames(pars), ] data.frame( Parameter = rownames(s), ESS = s[["n_eff"]], stringsAsFactors = FALSE, row.names = NULL ) } #' @export effective_sample.blavaan <- function(model, parameters = NULL, ...) { insight::check_if_installed("blavaan") ESS <- blavaan::blavInspect(model, what = "neff") data.frame( Parameter = colnames(insight::get_parameters(model)), ESS = ESS, stringsAsFactors = FALSE, row.names = NULL ) } #' @export effective_sample.MCMCglmm <- function(model, effects = "fixed", parameters = NULL, ...) { pars <- insight::get_parameters( model, effects = effects, parameters = parameters, summary = TRUE ) s.fixed <- as.data.frame(summary(model)$solutions) s.random <- as.data.frame(summary(model)$Gcovariances) es <- data.frame( Parameter = rownames(s.fixed), ESS = round(s.fixed[["eff.samp"]]), stringsAsFactors = FALSE, row.names = NULL ) if (nrow(s.random) > 0L) { es <- rbind(es, data.frame( Parameter = rownames(s.random), ESS = round(s.random[["eff.samp"]]), stringsAsFactors = FALSE, row.names = NULL )) } es[match(pars[[1]], es$Parameter), ] } bayestestR/R/plot.R0000644000176200001440000000456514542333405013716 0ustar liggesusers#' @export plot.equivalence_test <- function(x, ...) { insight::check_if_installed("see", "to plot results from equivalence-test") NextMethod() } #' @export plot.p_direction <- function(x, ...) { insight::check_if_installed("see", "to plot results from p_direction()") NextMethod() } #' @export plot.point_estimate <- function(x, ...) { insight::check_if_installed("see", "to plot point-estimates") NextMethod() } #' @export plot.map_estimate <- function(x, ...) { insight::check_if_installed("see", "to plot point-estimates") NextMethod() } #' @export plot.rope <- function(x, ...) { insight::check_if_installed("see", "to plot ROPE") NextMethod() } #' @export plot.bayestestR_hdi <- function(x, ...) { insight::check_if_installed("see", "to plot HDI") NextMethod() } #' @export plot.bayestestR_eti <- function(x, ...) { insight::check_if_installed("see", "to plot credible intervals") NextMethod() } #' @export plot.bayestestR_si <- function(x, ...) { insight::check_if_installed("see", "to plot support intervals") NextMethod() } #' @export plot.bayesfactor_parameters <- function(x, ...) { insight::check_if_installed("see", "to plot Savage-Dickey Bayes factor") NextMethod() } #' @export plot.bayesfactor_models <- function(x, ...) { insight::check_if_installed("see", "to plot models' Bayes factors") NextMethod() } #' @export plot.estimate_density <- function(x, ...) { insight::check_if_installed("see", "to plot densities") NextMethod() } #' @export plot.estimate_density_df <- function(x, ...) { insight::check_if_installed("see", "to plot models' densities") NextMethod() } #' @export plot.p_significance <- function(x, ...) { insight::check_if_installed("see", "to plot practical significance") NextMethod() } #' @export plot.describe_posterior <- function(x, stack = FALSE, ...) { insight::check_if_installed("see", "to plot posterior samples") insight::check_if_installed("ggplot2", "to plot posterior samples") model <- .retrieve_model(x) if (!is.null(model)) { graphics::plot(estimate_density(model), stack = stack, ...) + ggplot2::labs(title = "Posterior Samples", x = NULL, y = NULL) } else { insight::format_alert("Could not find model-object. Try `plot(estimate_density(model))` instead.") } } bayestestR/R/bic_to_bf.R0000644000176200001440000000230614706241121014630 0ustar liggesusers#' Convert BIC indices to Bayes Factors via the BIC-approximation method. #' #' The difference between two Bayesian information criterion (BIC) indices of #' two models can be used to approximate Bayes factors via: #' \cr #' \deqn{BF_{10} = e^{(BIC_0 - BIC_1)/2}}{BF10 = exp((BIC0-BIC1)/2)} #' #' @param bic A vector of BIC values. #' @param denominator The BIC value to use as a denominator (to test against). #' @param log If `TRUE`, return the `log(BF)`. #' #' @references #' Wagenmakers, E. J. (2007). A practical solution to the pervasive problems of #' p values. Psychonomic bulletin & review, 14(5), 779-804 #' #' @examples #' bic1 <- BIC(lm(Sepal.Length ~ 1, data = iris)) #' bic2 <- BIC(lm(Sepal.Length ~ Species, data = iris)) #' bic3 <- BIC(lm(Sepal.Length ~ Species + Petal.Length, data = iris)) #' bic4 <- BIC(lm(Sepal.Length ~ Species * Petal.Length, data = iris)) #' #' bic_to_bf(c(bic1, bic2, bic3, bic4), denominator = bic1) #' @return The Bayes Factors corresponding to the BIC values against the denominator. #' #' @export bic_to_bf <- function(bic, denominator, log = FALSE) { delta <- (denominator - bic) / 2 if (log) { delta } else { exp(delta) } } bayestestR/R/spi.R0000644000176200001440000003475315004650367013540 0ustar liggesusers#' Shortest Probability Interval (SPI) #' #' Compute the **Shortest Probability Interval (SPI)** of posterior distributions. #' The SPI is a more computationally stable HDI. The implementation is based on #' the algorithm from the **SPIn** package. #' #' @inheritParams hdi #' @inherit ci return #' @inherit hdi seealso #' @family ci #' #' @note The code to compute the SPI was adapted from the **SPIn** package, #' and slightly modified to be more robust for Stan models. Thus, credits go #' to Ying Liu for the original SPI algorithm and R implementation. #' #' @details The SPI is an alternative method to the HDI ([hdi()]) to quantify #' uncertainty of (posterior) distributions. The SPI is said to be more stable #' than the HDI, because, the _"HDI can be noisy (that is, have a high Monte Carlo error)"_ #' (Liu et al. 2015). Furthermore, the HDI is sensitive to additional assumptions, #' in particular assumptions related to the different estimation methods, which #' can make the HDI less accurate or reliable. #' #' @references #' Liu, Y., Gelman, A., & Zheng, T. (2015). Simulation-efficient shortest probability intervals. Statistics and Computing, 25(4), 809–819. https://doi.org/10.1007/s11222-015-9563-8 #' #' @examplesIf require("quadprog") && require("rstanarm") #' library(bayestestR) #' #' posterior <- rnorm(1000) #' spi(posterior) #' spi(posterior, ci = c(0.80, 0.89, 0.95)) #' #' df <- data.frame(replicate(4, rnorm(100))) #' spi(df) #' spi(df, ci = c(0.80, 0.89, 0.95)) #' \donttest{ #' library(rstanarm) #' model <- suppressWarnings( #' stan_glm(mpg ~ wt + gear, data = mtcars, chains = 2, iter = 200, refresh = 0) #' ) #' spi(model) #' } #' #' @export spi <- function(x, ...) { UseMethod("spi") } #' @export spi.default <- function(x, ...) { insight::format_error(paste0("'spi()' is not yet implemented for objects of class '", class(x)[1], "'.")) } #' @rdname spi #' @export spi.numeric <- function(x, ci = 0.95, verbose = TRUE, ...) { out <- do.call(rbind, lapply(ci, function(i) { .spi(x = x, ci = i, verbose = verbose) })) class(out) <- unique(c("bayestestR_hdi", "see_hdi", "bayestestR_ci", "see_ci", "bayestestR_spi", class(out))) attr(out, "data") <- x out } #' @export #' @rdname spi #' @inheritParams p_direction spi.data.frame <- function(x, ci = 0.95, rvar_col = NULL, verbose = TRUE, ...) { obj_name <- insight::safe_deparse_symbol(substitute(x)) x_rvar <- .possibly_extract_rvar_col(x, rvar_col) if (length(x_rvar) > 0L) { cl <- match.call() cl[[1]] <- bayestestR::spi cl$x <- x_rvar cl$rvar_col <- NULL out <- eval.parent(cl) attr(out, "object_name") <- sprintf('%s[["%s"]]', obj_name, rvar_col) return(.append_datagrid(out, x, long = length(ci) > 1L)) } dat <- .compute_interval_dataframe(x = x, ci = ci, verbose = verbose, fun = "spi") attr(dat, "object_name") <- obj_name dat } #' @export spi.draws <- function(x, ci = 0.95, verbose = TRUE, ...) { dat <- .compute_interval_dataframe(x = .posterior_draws_to_df(x), ci = ci, verbose = verbose, fun = "spi") attr(dat, "object_name") <- insight::safe_deparse_symbol(substitute(x)) dat } #' @export spi.rvar <- spi.draws #' @export spi.MCMCglmm <- function(x, ci = 0.95, verbose = TRUE, ...) { hdi(x, ci = ci, verbose = verbose, ci_method = "spi", ...) } #' @export spi.bamlss <- function(x, ci = 0.95, component = "all", verbose = TRUE, ...) { hdi(x, ci = ci, component = component, verbose = verbose, ci_method = "spi") } #' @export spi.mcmc <- function(x, ci = 0.95, verbose = TRUE, ...) { hdi(x, ci = ci, verbose = verbose, ci_method = "spi", ...) } #' @export spi.bcplm <- spi.mcmc #' @export spi.bayesQR <- spi.mcmc #' @export spi.blrm <- spi.mcmc #' @export spi.mcmc.list <- spi.mcmc #' @export spi.BGGM <- spi.mcmc #' @export spi.sim.merMod <- function(x, ci = 0.95, effects = "fixed", parameters = NULL, verbose = TRUE, ...) { hdi( x, ci = ci, effects = effects, parameters = parameters, verbose = verbose, ci_method = "spi", ... ) } #' @export spi.sim <- function(x, ci = 0.95, parameters = NULL, verbose = TRUE, ...) { hdi(x, ci = ci, parameters = parameters, verbose = verbose, ci_method = "spi", ...) } #' @export spi.emmGrid <- function(x, ci = 0.95, verbose = TRUE, ...) { xdf <- insight::get_parameters(x) out <- spi(xdf, ci = ci, verbose = verbose, ...) out <- .append_datagrid(out, x, long = length(ci) > 1L) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) out } #' @export spi.emm_list <- spi.emmGrid #' @export spi.slopes <- function(x, ci = 0.95, verbose = TRUE, ...) { xrvar <- .get_marginaleffects_draws(x) out <- spi(xrvar, ci = ci, verbose = verbose, ...) out <- .append_datagrid(out, x, long = length(ci) > 1L) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) out } #' @export spi.comparisons <- spi.slopes #' @export spi.predictions <- spi.slopes #' @export spi.stanreg <- function(x, ci = 0.95, effects = "fixed", component = "location", parameters = NULL, verbose = TRUE, ...) { cleaned_parameters <- insight::clean_parameters(x) out <- .prepare_output( spi( insight::get_parameters( x, effects = effects, component = component, parameters = parameters ), ci = ci, verbose = verbose, ... ), cleaned_parameters, inherits(x, "stanmvreg") ) attr(out, "clean_parameters") <- cleaned_parameters attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) class(out) <- unique(c("bayestestR_hdi", "see_hdi", "bayestestR_spi", class(out))) out } #' @export spi.stanfit <- spi.stanreg #' @export spi.blavaan <- spi.stanreg #' @rdname spi #' @export spi.brmsfit <- function(x, ci = 0.95, effects = "fixed", component = "conditional", parameters = NULL, verbose = TRUE, ...) { cleaned_parameters <- insight::clean_parameters(x) out <- .prepare_output( spi( insight::get_parameters( x, effects = effects, component = component, parameters = parameters, ... ), ci = ci, verbose = verbose, ... ), cleaned_parameters ) attr(out, "clean_parameters") <- cleaned_parameters attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) class(out) <- unique(c("bayestestR_hdi", "see_hdi", "bayestestR_spi", class(out))) out } #' @export spi.BFBayesFactor <- function(x, ci = 0.95, verbose = TRUE, ...) { out <- spi(insight::get_parameters(x), ci = ci, verbose = verbose, ...) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) out } #' @rdname spi #' @export spi.get_predicted <- function(x, ci = 0.95, use_iterations = FALSE, verbose = TRUE, ...) { if (isTRUE(use_iterations)) { if ("iterations" %in% names(attributes(x))) { out <- spi(as.data.frame(t(attributes(x)$iterations)), ci = ci, verbose = verbose, ...) } else { insight::format_error("No iterations present in the output.") } attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) } else { out <- spi(as.numeric(x), ci = ci, verbose = verbose, ...) } out } # Helper ------------------------------------------------------------------ # Code taken (and slightly simplified) from: # SPIn::SPIn() # Author: Ying Liu yliu@stat.columbia.edu # Reference: Simulation efficient shortest probability intervals. (arXiv:1302.2142) # Code licensed under License: GPL (>= 2) .spi <- function(x, ci, verbose = TRUE) { insight::check_if_installed("quadprog") check_ci <- .check_ci_argument(x, ci, verbose) if (!is.null(check_ci)) { return(check_ci) } dens <- stats::density(x) n.sims <- length(x) conf <- 1 - ci nn <- round(n.sims * conf) # validation check for very low CI levels if (nn >= n.sims) { nn <- n.sims <- 1 } x <- sort(x) xx <- x[(n.sims - nn):n.sims] - x[1:(nn + 1)] m <- min(xx) k <- which(xx == m)[1] l <- x[k] ui <- n.sims - nn + k - 1 u <- x[ui] bw <- round((sqrt(n.sims) - 1) / 2) k <- which(x == l)[1] ui <- which(x == u)[1] # lower bound if (!anyNA(k) && all(k == 1)) { x.l <- l } else { x.l <- .safe(.spi_lower(bw = bw, n.sims = n.sims, k = k, l = l, dens = dens, x = x)) frac <- 1 while (is.null(x.l)) { frac <- frac - 0.1 x.l <- .safe(.spi_lower(bw = frac * bw, n.sims = n.sims, k = k, l = l, dens = dens, x = x)) if (frac <= 0.1) { insight::format_alert("Could not find a solution for the SPI lower bound.") x.l <- NA } } } # upper bound if (!anyNA(ui) && all(ui == n.sims)) { x.u <- u } else { x.u <- .safe(.spi_upper(bw = bw, n.sims = n.sims, ui = ui, u = u, dens = dens, x = x)) frac <- 1 while (is.null(x.u)) { frac <- frac - 0.1 x.u <- .safe(.spi_upper(bw = frac * bw, n.sims = n.sims, ui = ui, u = u, dens = dens, x = x)) if (frac <= 0.1) { insight::format_alert("Could not find a solution for the SPI upper bound.") x.u <- NA } } } # output data.frame(CI = ci, CI_low = x.l, CI_high = x.u) } .spi_lower <- function(bw, n.sims, k, l, dens, x) { l.l <- max(1, k - bw) l.u <- k + (k - l.l) range_ll_lu <- l.u - l.l range_ll_k <- k - l.l n.l <- range_ll_lu + 1 D.l <- matrix(nrow = n.l, ncol = n.l) # create quadratic function p <- (l.l:l.u) / (n.sims + 1) q <- 1 - p Q <- stats::quantile(x, p) d.q <- rep(0, n.l) for (r in 1:n.l) { d.q[r] <- dens$y[which.min(abs(dens$x - Q[r]))] } Q. <- 1 / d.q diag(D.l) <- 2 * (Q^2 + p * q * Q.^2 / (n.sims + 2)) d.l <- 2 * Q * l if (n.l > 1) { for (j in 1:(n.l - 1)) { for (m in (j + 1):n.l) { D.l[j, m] <- Q.[j] * Q.[m] * p[j] * q[m] * 2 / (n.sims + 2) + Q[j] * Q[m] * 2 D.l[m, j] <- D.l[j, m] } } } # create constraint matrix A.l <- matrix(0, nrow = range_ll_lu + 3, ncol = range_ll_lu + 1) A.l[1, ] <- 1 if (bw > 1 && k > 2) { for (j in 1:(range_ll_k - 1)) { if (x[l.l + j + 1] == x[l.l + j]) { A.l[1 + j, j + 1] <- 1 A.l[1 + j, j + 2] <- -1 } else { aa <- (x[l.l + j] - x[l.l + j - 1]) / (x[l.l + j + 1] - x[l.l + j]) A.l[1 + j, j] <- 1 A.l[1 + j, j + 1] <- -(aa + 1) A.l[1 + j, j + 2] <- aa } } for (j in 0:(l.u - k - 2)) { if (x[k + j + 1] == x[k + j + 2]) { A.l[range_ll_k + 1 + j, range_ll_k + 2 + j] <- 1 A.l[range_ll_k + 1 + j, range_ll_k + 3 + j] <- -1 } else { aa <- (x[k + j] - x[k + j + 1]) / (x[k + j + 1] - x[k + j + 2]) A.l[range_ll_k + 1 + j, range_ll_k + 1 + j] <- -1 A.l[range_ll_k + 1 + j, range_ll_k + 2 + j] <- aa + 1 A.l[range_ll_k + 1 + j, range_ll_k + 3 + j] <- -aa } } } if (x[k + 1] == x[k]) { aa <- (x[k] - x[k - 1]) / (x[k + 1] - x[k] + 0.000001) } else { aa <- (x[k] - x[k - 1]) / (x[k + 1] - x[k]) } A.l[range_ll_lu, range_ll_k + 1] <- aa - 1 A.l[range_ll_lu, range_ll_k] <- 1 A.l[range_ll_lu, range_ll_k + 2] <- -aa A.l[range_ll_lu + 1, range_ll_lu] <- 1 A.l[range_ll_lu + 1, range_ll_lu + 1] <- -1 A.l[range_ll_lu + 2, 1] <- 1 A.l[range_ll_lu + 3, range_ll_lu + 1] <- 1 A.l <- t(A.l) w.l <- quadprog::solve.QP(D.l, d.l, A.l, c(1, rep(0, range_ll_lu + 2)), range_ll_lu) x.l <- w.l$solution %*% x[l.l:l.u] x.l } .spi_upper <- function(bw, n.sims, ui, u, dens, x) { u.u <- min(n.sims, ui + bw) u.l <- ui - (u.u - ui) range_ul_uu <- u.u - u.l range_ul_ui <- ui - u.l n.u <- range_ul_uu + 1 D.u <- matrix(nrow = n.u, ncol = n.u) # create quadratic function p <- (u.l:u.u) / (n.sims + 1) q <- 1 - p Q <- stats::quantile(x, p) d.q <- rep(0, n.u) for (r in 1:n.u) { d.q[r] <- dens$y[which.min(abs(dens$x - Q[r]))] } Q. <- 1 / d.q diag(D.u) <- 2 * (Q^2 + p * q * Q.^2 / (n.sims + 2)) d.u <- 2 * Q * u if (n.u > 1) { for (j in 1:(n.u - 1)) { for (m in (j + 1):n.u) { D.u[j, m] <- Q.[j] * Q.[m] * p[j] * q[m] * 2 / (n.sims + 2) + Q[j] * Q[m] * 2 D.u[m, j] <- D.u[j, m] } } } # create constraint matrix A.u <- matrix(0, nrow = range_ul_uu + 3, ncol = range_ul_uu + 1) A.u[1, ] <- 1 if (bw > 1 && range_ul_ui > 1) { for (j in 1:(range_ul_ui - 1)) { if (x[u.l + j + 1] == x[u.l + j]) { A.u[1 + j, j + 1] <- 1 A.u[1 + j, j + 2] <- -1 } else { aa <- (x[u.l + j] - x[u.l + j - 1]) / (x[u.l + j + 1] - x[u.l + j]) A.u[1 + j, j] <- 1 A.u[1 + j, j + 1] <- -(aa + 1) A.u[1 + j, j + 2] <- aa } } i <- 0 for (j in (range_ul_ui):(range_ul_uu - 2)) { if (x[ui + i + 1] == x[ui + i + 2]) { A.u[1 + j, j + 2] <- 1 A.u[1 + j, j + 3] <- -1 } else { aa <- (x[ui + i] - x[ui + i + 1]) / (x[ui + i + 1] - x[ui + i + 2]) A.u[1 + j, j + 1] <- -1 A.u[1 + j, j + 2] <- aa + 1 A.u[1 + j, j + 3] <- -aa } i <- i + 1 } } if (x[ui + 1] == x[ui]) { aa <- (x[ui] - x[ui - 1]) / (x[ui + 2] - x[ui]) A.u[range_ul_uu, range_ul_ui] <- 1 A.u[range_ul_uu, range_ul_ui + 1] <- aa - 1 A.u[range_ul_uu, range_ul_ui + 3] <- -aa } else { aa <- (x[ui] - x[ui - 1]) / (x[ui + 1] - x[ui]) A.u[range_ul_uu, range_ul_ui] <- 1 A.u[range_ul_uu, range_ul_ui + 1] <- aa - 1 A.u[range_ul_uu, range_ul_ui + 2] <- -aa } A.u[range_ul_uu + 1, range_ul_uu] <- 1 A.u[range_ul_uu + 1, range_ul_uu + 1] <- -1 A.u[range_ul_uu + 2, 1] <- 1 A.u[range_ul_uu + 3, range_ul_uu + 1] <- 1 A.u <- t(A.u) w.u <- quadprog::solve.QP(D.u, d.u, A.u, c(1, rep(0, range_ul_uu + 2)), range_ul_uu) x.u <- w.u$solution %*% x[u.l:u.u] return(x.u) } bayestestR/R/p_direction.R0000644000176200001440000005615515052646230015241 0ustar liggesusers#' Probability of Direction (pd) #' #' Compute the **Probability of Direction** (***pd***, also known as the Maximum #' Probability of Effect - *MPE*). This can be interpreted as the probability #' that a parameter (described by its posterior distribution) is strictly #' positive or negative (whichever is the most probable). Although differently #' expressed, this index is fairly similar (*i.e.*, is strongly correlated) to #' the frequentist **p-value** (see details). #' #' @param x A vector representing a posterior distribution, a data frame of #' posterior draws (samples be parameter). Can also be a Bayesian model. #' @param method Can be `"direct"` or one of methods of [`estimate_density()`], #' such as `"kernel"`, `"logspline"` or `"KernSmooth"`. See details. #' @param null The value considered as a "null" effect. Traditionally 0, but #' could also be 1 in the case of ratios of change (OR, IRR, ...). #' @param as_p If `TRUE`, the p-direction (pd) values are converted to a #' frequentist p-value using [`pd_to_p()`]. #' @param remove_na Should missing values be removed before computation? Note #' that `Inf` (infinity) are *not* removed. #' @param rvar_col Name of an `rvar`-type column. If `NULL`, each column in the #' data frame is assumed to represent draws from a posterior distribution. #' @inheritParams hdi #' @inheritParams insight::get_parameters.BFBayesFactor #' #' @inheritSection hdi Model components #' #' @section What is the *pd*?: #' #' The Probability of Direction (pd) is an index of effect existence, representing #' the certainty with which an effect goes in a particular direction (i.e., is #' positive or negative / has a sign), typically ranging from 0.5 to 1 (but see #' next section for cases where it can range between 0 and 1). Beyond #' its simplicity of interpretation, understanding and computation, this index #' also presents other interesting properties: #' - Like other posterior-based indices, *pd* is solely based on the posterior #' distributions and does not require any additional information from the data #' or the model (e.g., such as priors, as in the case of Bayes factors). #' - It is robust to the scale of both the response variable and the predictors. #' - It is strongly correlated with the frequentist p-value, and can thus #' be used to draw parallels and give some reference to readers non-familiar #' with Bayesian statistics (Makowski et al., 2019). #' #' @section Relationship with the p-value: #' #' In most cases, it seems that the *pd* has a direct correspondence with the #' frequentist one-sided *p*-value through the formula (for two-sided *p*): #' \ifelse{html}{\out{p = 2 * (1 - pd)}}{\eqn{p = 2 \times (1 - p_d)}} #' Thus, a two-sided p-value of respectively `.1`, `.05`, `.01` and `.001` would #' correspond approximately to a *pd* of `95%`, `97.5%`, `99.5%` and `99.95%`. #' See [pd_to_p()] for details. #' #' @section Possible Range of Values: #' #' The largest value *pd* can take is 1 - the posterior is strictly directional. #' However, the smallest value *pd* can take depends on the parameter space #' represented by the posterior. #' #' **For a continuous parameter space**, exact values of 0 (or any point null #' value) are not possible, and so 100% of the posterior has _some_ sign, some #' positive, some negative. Therefore, the smallest the *pd* can be is 0.5 - #' with an equal posterior mass of positive and negative values. Values close to #' 0.5 _cannot_ be used to support the null hypothesis (that the parameter does #' _not_ have a direction) is a similar why to how large p-values cannot be used #' to support the null hypothesis (see [`pd_to_p()`]; Makowski et al., 2019). #' #' **For a discrete parameter space or a parameter space that is a mixture #' between discrete and continuous spaces**, exact values of 0 (or any point #' null value) _are_ possible! Therefore, the smallest the *pd* can be is 0 - #' with 100% of the posterior mass on 0. Thus values close to 0 can be used to #' support the null hypothesis (see van den Bergh et al., 2021). #' #' Examples of posteriors representing discrete parameter space: #' - When a parameter can only take discrete values. #' - When a mixture prior/posterior is used (such as the spike-and-slab prior; #' see van den Bergh et al., 2021). #' - When conducting Bayesian model averaging (e.g., [weighted_posteriors()] or #' `brms::posterior_average`). #' #' @section Methods of computation: #' #' The *pd* is defined as: #' \deqn{p_d = max({Pr(\hat{\theta} < \theta_{null}), Pr(\hat{\theta} > \theta_{null})})}{pd = max(mean(x < null), mean(x > null))} #' #' The most simple and direct way to compute the *pd* is to compute the #' proportion of positive (or larger than `null`) posterior samples, the #' proportion of negative (or smaller than `null`) posterior samples, and take #' the larger of the two. This "simple" method is the most straightforward, but #' its precision is directly tied to the number of posterior draws. #' #' The second approach relies on [density estimation][estimate_density]: It starts by #' estimating the continuous-smooth density function (for which many methods are #' available), and then computing the [area under the curve][area_under_curve] #' (AUC) of the density curve on either side of `null` and taking the maximum #' between them. Note the this approach assumes a continuous density function, #' and so **when the posterior represents a (partially) discrete parameter #' space, only the direct method _must_ be used** (see above). #' #' @return #' Values between 0.5 and 1 *or* between 0 and 1 (see above) corresponding to #' the probability of direction (pd). #' #' @seealso [pd_to_p()] to convert between Probability of Direction (pd) and p-value. #' #' @note There is also a [`plot()`-method](https://easystats.github.io/see/articles/bayestestR.html) implemented in the \href{https://easystats.github.io/see/}{\pkg{see}-package}. #' #' @references #' - Makowski, D., Ben-Shachar, M. S., Chen, S. A., & Lüdecke, D. (2019). #' Indices of effect existence and significance in the Bayesian framework. #' Frontiers in psychology, 10, 2767. \doi{10.3389/fpsyg.2019.02767} #' - van den Bergh, D., Haaf, J. M., Ly, A., Rouder, J. N., & Wagenmakers, E. J. #' (2021). A cautionary note on estimating effect size. Advances in Methods #' and Practices in Psychological Science, 4(1). \doi{10.1177/2515245921992035} #' #' @examplesIf requireNamespace("rstanarm", quietly = TRUE) && requireNamespace("emmeans", quietly = TRUE) && requireNamespace("brms", quietly = TRUE) && requireNamespace("BayesFactor", quietly = TRUE) #' library(bayestestR) #' #' # Simulate a posterior distribution of mean 1 and SD 1 #' # ---------------------------------------------------- #' posterior <- rnorm(1000, mean = 1, sd = 1) #' p_direction(posterior) #' p_direction(posterior, method = "kernel") #' #' # Simulate a dataframe of posterior distributions #' # ----------------------------------------------- #' df <- data.frame(replicate(4, rnorm(100))) #' p_direction(df) #' p_direction(df, method = "kernel") #' #' \donttest{ #' # rstanarm models #' # ----------------------------------------------- #' model <- rstanarm::stan_glm(mpg ~ wt + cyl, #' data = mtcars, #' chains = 2, refresh = 0 #' ) #' p_direction(model) #' p_direction(model, method = "kernel") #' #' # emmeans #' # ----------------------------------------------- #' p_direction(emmeans::emtrends(model, ~1, "wt", data = mtcars)) #' #' # brms models #' # ----------------------------------------------- #' model <- brms::brm(mpg ~ wt + cyl, data = mtcars) #' p_direction(model) #' p_direction(model, method = "kernel") #' #' # BayesFactor objects #' # ----------------------------------------------- #' bf <- BayesFactor::ttestBF(x = rnorm(100, 1, 1)) #' p_direction(bf) #' p_direction(bf, method = "kernel") #' } #' #' @examplesIf requireNamespace("posterior", quietly = TRUE) #' # Using "rvar_col" #' x <- data.frame(mu = c(0, 0.5, 1), sigma = c(1, 0.5, 0.25)) #' x$my_rvar <- posterior::rvar_rng(rnorm, 3, mean = x$mu, sd = x$sigma) #' x #' p_direction(x, rvar_col = "my_rvar") #' #' @export p_direction <- function(x, ...) { UseMethod("p_direction") } #' @rdname p_direction #' @export pd <- p_direction #' @export p_direction.default <- function(x, ...) { insight::format_error(paste0("'p_direction()' is not yet implemented for objects of class '", class(x)[1], "'.")) } #' @rdname p_direction #' @export p_direction.numeric <- function(x, method = "direct", null = 0, as_p = FALSE, remove_na = TRUE, ...) { obj_name <- insight::safe_deparse_symbol(substitute(x)) out <- p_direction( data.frame(Posterior = x), method = method, null = null, as_p = as_p, remove_na = remove_na, ... ) attr(out, "object_name") <- obj_name out } #' @rdname p_direction #' @param rvar_col A single character - the name of an `rvar` column in the data #' frame to be processed. See example in [p_direction()]. #' @export p_direction.data.frame <- function(x, method = "direct", null = 0, as_p = FALSE, remove_na = TRUE, rvar_col = NULL, ...) { obj_name <- insight::safe_deparse_symbol(substitute(x)) x_rvar <- .possibly_extract_rvar_col(x, rvar_col) if (length(x_rvar) > 0L) { cl <- match.call() cl[[1]] <- bayestestR::p_direction cl$x <- x_rvar cl$rvar_col <- NULL out <- eval.parent(cl) attr(out, "object_name") <- sprintf('%s[["%s"]]', obj_name, rvar_col) return(.append_datagrid(out, x)) } x <- .select_nums(x) if (ncol(x) == 1) { pd <- .p_direction( x[[1]], method = method, null = null, as_p = as_p, remove_na = remove_na, ... ) } else { pd <- sapply( x, .p_direction, method = method, null = null, as_p = as_p, remove_na = remove_na, simplify = TRUE, ... ) } out <- data.frame( Parameter = names(x), pd = pd, row.names = NULL, stringsAsFactors = FALSE ) # rename column if (as_p) { colnames(out)[2] <- "p" } attr(out, "object_name") <- obj_name attr(out, "as_p") <- as_p class(out) <- unique(c("p_direction", "see_p_direction", class(out))) out } #' @export p_direction.draws <- function(x, method = "direct", null = 0, as_p = FALSE, remove_na = TRUE, ...) { p_direction( .posterior_draws_to_df(x), method = method, null = null, as_p = as_p, remove_na = remove_na, ... ) } #' @export p_direction.rvar <- p_direction.draws #' @export p_direction.MCMCglmm <- function(x, method = "direct", null = 0, as_p = FALSE, remove_na = TRUE, ...) { nF <- x$Fixed$nfl out <- p_direction(as.data.frame(x$Sol[, 1:nF, drop = FALSE]), method = method, null = null, as_p = as_p, remove_na = remove_na, ... ) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) out } #' @export p_direction.mcmc <- function(x, method = "direct", null = 0, as_p = FALSE, remove_na = TRUE, ...) { p_direction( as.data.frame(x), method = method, null = null, as_p = as_p, remove_na = remove_na, ... ) } #' @export p_direction.BGGM <- function(x, method = "direct", null = 0, as_p = FALSE, remove_na = TRUE, ...) { p_direction( as.data.frame(x), method = method, null = null, as_p = as_p, remove_na = remove_na, ... ) } #' @export p_direction.bcplm <- function(x, method = "direct", null = 0, as_p = FALSE, remove_na = TRUE, ...) { p_direction( insight::get_parameters(x), method = method, null = null, as_p = as_p, remove_na = remove_na, ... ) } #' @export p_direction.mcmc.list <- p_direction.bcplm #' @export p_direction.blrm <- p_direction.bcplm #' @export p_direction.bayesQR <- p_direction.bcplm #' @export p_direction.bamlss <- function(x, method = "direct", null = 0, as_p = FALSE, remove_na = TRUE, component = "all", ...) { out <- p_direction( insight::get_parameters(x, component = component), method = method, null = null, as_p = as_p, remove_na = remove_na, ... ) out <- .add_clean_parameters_attribute(out, x) out } #' @export p_direction.emmGrid <- function(x, method = "direct", null = 0, as_p = FALSE, remove_na = TRUE, ...) { xdf <- insight::get_parameters(x) out <- p_direction( xdf, method = method, null = null, as_p = as_p, remove_na = remove_na, ... ) out <- .append_datagrid(out, x) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) out } #' @export p_direction.emm_list <- p_direction.emmGrid #' @export p_direction.slopes <- function(x, method = "direct", null = 0, as_p = FALSE, remove_na = TRUE, ...) { xrvar <- .get_marginaleffects_draws(x) out <- p_direction( xrvar, method = method, null = null, as_p = as_p, remove_na = remove_na, ... ) out <- .append_datagrid(out, x) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) out } #' @export p_direction.comparisons <- p_direction.slopes #' @export p_direction.predictions <- p_direction.slopes #' @keywords internal .p_direction_models <- function(x, effects, component, parameters, method = "direct", null = 0, as_p = FALSE, remove_na = TRUE, ...) { p_direction( insight::get_parameters( x, effects = effects, component = component, parameters = parameters ), method = method, null = null, as_p = as_p, remove_na = remove_na, ... ) } #' @export p_direction.sim.merMod <- function(x, effects = "fixed", parameters = NULL, method = "direct", null = 0, as_p = FALSE, remove_na = TRUE, ...) { out <- .p_direction_models( x = x, effects = effects, component = "conditional", parameters = parameters, method = method, null = null, as_p = as_p, remove_na = remove_na, ... ) attr(out, "data") <- insight::get_parameters( x, effects = effects, parameters = parameters ) out } #' @export p_direction.sim <- function(x, parameters = NULL, method = "direct", null = 0, as_p = FALSE, remove_na = TRUE, ...) { out <- .p_direction_models( x = x, effects = "fixed", component = "conditional", parameters = parameters, method = method, null = null, as_p = as_p, remove_na = remove_na, ... ) attr(out, "data") <- insight::get_parameters(x, parameters = parameters) out } #' @export p_direction.stanreg <- function(x, effects = "fixed", component = "location", parameters = NULL, method = "direct", null = 0, as_p = FALSE, remove_na = TRUE, ...) { cleaned_parameters <- .get_cleaned_parameters(x, ...) out <- .prepare_output( p_direction( insight::get_parameters( x, effects = effects, component = component, parameters = parameters ), method = method, null = null, as_p = as_p, remove_na = remove_na, ... ), cleaned_parameters, inherits(x, "stanmvreg") ) attr(out, "clean_parameters") <- cleaned_parameters class(out) <- unique(c("p_direction", "see_p_direction", class(out))) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) attr(out, "as_p") <- as_p out } #' @export p_direction.stanfit <- p_direction.stanreg #' @export p_direction.blavaan <- p_direction.stanreg #' @rdname p_direction #' @export p_direction.brmsfit <- function(x, effects = "fixed", component = "conditional", parameters = NULL, method = "direct", null = 0, as_p = FALSE, remove_na = TRUE, ...) { cleaned_parameters <- .get_cleaned_parameters(x, ...) out <- .prepare_output( p_direction( insight::get_parameters( x, effects = effects, component = component, parameters = parameters, ... ), method = method, null = null, as_p = as_p, remove_na = remove_na, ... ), cleaned_parameters ) attr(out, "clean_parameters") <- cleaned_parameters class(out) <- unique(c("p_direction", "see_p_direction", class(out))) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) attr(out, "as_p") <- as_p out } #' @export p_direction.BFBayesFactor <- function(x, method = "direct", null = 0, as_p = FALSE, remove_na = TRUE, ...) { out <- p_direction( insight::get_parameters(x), method = method, null = null, as_p = as_p, remove_na = remove_na, ... ) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) out } #' @rdname p_direction #' @export p_direction.get_predicted <- function(x, method = "direct", null = 0, as_p = FALSE, remove_na = TRUE, use_iterations = FALSE, verbose = TRUE, ...) { if (isTRUE(use_iterations)) { if ("iterations" %in% names(attributes(x))) { out <- p_direction( as.data.frame(t(attributes(x)$iterations)), method = method, null = null, as_p = as_p, remove_na = remove_na, verbose = verbose, ... ) } else { insight::format_error("No iterations present in the output.") } attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) } else { out <- p_direction(as.numeric(x), method = method, null = null, as_p = as_p, remove_na = remove_na, verbose = verbose, ... ) } out } #' @export p_direction.parameters_model <- function(x, ...) { out <- data.frame( Parameter = x$Parameter, pd = p_to_pd(p = x[["p"]]), row.names = NULL, stringsAsFactors = FALSE ) if (!is.null(x$Component)) { out$Component <- x$Component } attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) class(out) <- unique(c("p_direction", "see_p_direction", class(out))) out } # Definition -------------------------------------------------------------- #' @keywords internal .p_direction <- function(x, method = "direct", null = 0, as_p = FALSE, remove_na = TRUE, ...) { # handle missing values if (remove_na) { x <- x[!is.na(x)] } # sanity check if (length(x) == 0) { insight::format_error("No valid values found. Maybe the data contains only missing values.") } # sanity check if (anyNA(x)) { return(NA_real_) } # any inf values? then warn... if (any(is.infinite(x))) { insight::format_warning("Infinite values detected. These are not removed. Please check your results carefully!") } if (method == "direct") { pdir <- max( length(x[x > null]), # pd positive length(x[x < null]) # pd negative ) / length(x) } else { dens <- estimate_density(x, method = method, precision = 2^10, extend = TRUE, ...) if (length(x[x > null]) > length(x[x < null])) { dens <- dens[dens$x > null, ] } else { dens <- dens[dens$x < null, ] } pdir <- area_under_curve(dens$x, dens$y, method = "spline") if (pdir >= 1) { # Enforce bounds pdir <- 1 } } # convert to frequentist p? if (as_p) { pdir <- pd_to_p(pdir) } pdir } # Methods ----------------------------------------------------------------- #' Convert to Numeric #' #' @inheritParams base::as.numeric #' @method as.numeric p_direction #' @export as.numeric.p_direction <- function(x, ...) { if (inherits(x, "data.frame")) { # check if we have frequentist p-values if (isTRUE(attributes(x)$as_p) && "p" %in% colnames(x)) { as.numeric(as.vector(x$p)) } else { as.numeric(as.vector(x$pd)) } } else { as.vector(x) } } #' @method as.double p_direction #' @export as.double.p_direction <- as.numeric.p_direction #' @method as.vector p_direction #' @export as.vector.p_direction <- as.numeric.p_direction bayestestR/R/bci.R0000644000176200001440000002016615052646230013470 0ustar liggesusers#' Bias Corrected and Accelerated Interval (BCa) #' #' Compute the **Bias Corrected and Accelerated Interval (BCa)** of posterior #' distributions. #' #' @inheritParams hdi #' @inherit ci return #' @inherit hdi details #' @inherit hdi seealso #' @family ci #' #' @inheritSection hdi Model components #' #' @references #' DiCiccio, T. J. and B. Efron. (1996). Bootstrap Confidence Intervals. #' Statistical Science. 11(3): 189–212. 10.1214/ss/1032280214 #' #' @examples #' posterior <- rnorm(1000) #' bci(posterior) #' bci(posterior, ci = c(0.80, 0.89, 0.95)) #' @export bci <- function(x, ...) { UseMethod("bci") } #' @rdname bci #' @export bcai <- bci #' @rdname bci #' @export bci.numeric <- function(x, ci = 0.95, verbose = TRUE, ...) { out <- do.call(rbind, lapply(ci, function(i) { .bci(x = x, ci = i, verbose = verbose) })) class(out) <- unique(c("bayestestR_eti", "see_eti", "bayestestR_ci", "see_ci", class(out))) attr(out, "data") <- x out } #' @rdname bci #' @inheritParams p_direction #' @export bci.data.frame <- function(x, ci = 0.95, rvar_col = NULL, verbose = TRUE, ...) { obj_name <- insight::safe_deparse_symbol(substitute(x)) x_rvar <- .possibly_extract_rvar_col(x, rvar_col) if (length(x_rvar) > 0L) { cl <- match.call() cl[[1]] <- bayestestR::bci cl$x <- x_rvar cl$rvar_col <- NULL out <- eval.parent(cl) attr(out, "object_name") <- sprintf('%s[["%s"]]', obj_name, rvar_col) return(.append_datagrid(out, x, long = length(ci) > 1L)) } dat <- .compute_interval_dataframe(x = x, ci = ci, verbose = verbose, fun = "bci") attr(dat, "object_name") <- obj_name dat } #' @export bci.draws <- function(x, ci = 0.95, verbose = TRUE, ...) { dat <- .compute_interval_dataframe(x = .posterior_draws_to_df(x), ci = ci, verbose = verbose, fun = "bci") attr(dat, "object_name") <- insight::safe_deparse_symbol(substitute(x)) dat } #' @export bci.rvar <- bci.draws #' @export bci.MCMCglmm <- function(x, ci = 0.95, verbose = TRUE, ...) { nF <- x$Fixed$nfl d <- as.data.frame(x$Sol[, 1:nF, drop = FALSE]) dat <- .compute_interval_dataframe(x = d, ci = ci, verbose = verbose, fun = "bci") attr(dat, "data") <- insight::safe_deparse_symbol(substitute(x)) dat } #' @export bci.mcmc <- function(x, ci = 0.95, verbose = TRUE, ...) { d <- as.data.frame(x) dat <- .compute_interval_dataframe(x = d, ci = ci, verbose = verbose, fun = "bci") attr(dat, "data") <- insight::safe_deparse_symbol(substitute(x)) dat } #' @export bci.bamlss <- function(x, ci = 0.95, component = "all", verbose = TRUE, ...) { d <- insight::get_parameters(x, component = component) dat <- .compute_interval_dataframe(x = d, ci = ci, verbose = verbose, fun = "bci") attr(dat, "data") <- insight::safe_deparse_symbol(substitute(x)) dat } #' @export bci.bcplm <- function(x, ci = 0.95, verbose = TRUE, ...) { d <- insight::get_parameters(x) dat <- .compute_interval_dataframe(x = d, ci = ci, verbose = verbose, fun = "bci") attr(dat, "data") <- insight::safe_deparse_symbol(substitute(x)) dat } #' @export bci.bayesQR <- bci.bcplm #' @export bci.blrm <- bci.bcplm #' @export bci.mcmc.list <- bci.bcplm #' @export bci.BGGM <- bci.bcplm #' @export bci.sim.merMod <- function(x, ci = 0.95, effects = "fixed", parameters = NULL, verbose = TRUE, ...) { dat <- .compute_interval_simMerMod( x = x, ci = ci, effects = effects, parameters = parameters, verbose = verbose, fun = "bci" ) out <- dat$result attr(out, "data") <- dat$data out } #' @export bci.sim <- function(x, ci = 0.95, parameters = NULL, verbose = TRUE, ...) { dat <- .compute_interval_sim( x = x, ci = ci, parameters = parameters, verbose = verbose, fun = "bci" ) out <- dat$result attr(out, "data") <- dat$data out } #' @export bci.emmGrid <- function(x, ci = 0.95, verbose = TRUE, ...) { xdf <- insight::get_parameters(x) dat <- bci(xdf, ci = ci, verbose = verbose, ...) dat <- .append_datagrid(dat, x, long = length(ci) > 1L) attr(dat, "object_name") <- insight::safe_deparse_symbol(substitute(x)) dat } #' @export bci.emm_list <- bci.emmGrid #' @export bci.slopes <- function(x, ci = 0.95, verbose = TRUE, ...) { xrvar <- .get_marginaleffects_draws(x) dat <- bci(xrvar, ci = ci, verbose = verbose, ...) dat <- .append_datagrid(dat, x, long = length(ci) > 1L) attr(dat, "object_name") <- insight::safe_deparse_symbol(substitute(x)) dat } #' @export bci.comparisons <- bci.slopes #' @export bci.predictions <- bci.slopes #' @export bci.stanreg <- function(x, ci = 0.95, effects = "fixed", component = "location", parameters = NULL, verbose = TRUE, ...) { out <- .prepare_output( bci( insight::get_parameters( x, effects = effects, component = component, parameters = parameters ), ci = ci, verbose = verbose, ... ), .get_cleaned_parameters(x, ...), inherits(x, "stanmvreg") ) class(out) <- unique(c("bayestestR_eti", "see_eti", class(out))) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) out } #' @export bci.stanfit <- bci.stanreg #' @export bci.blavaan <- bci.stanreg #' @rdname bci #' @export bci.brmsfit <- function(x, ci = 0.95, effects = "fixed", component = "conditional", parameters = NULL, verbose = TRUE, ...) { out <- .prepare_output( bci( insight::get_parameters( x, effects = effects, component = component, parameters = parameters ), ci = ci, verbose = verbose, ... ), .get_cleaned_parameters(x, ...) ) class(out) <- unique(c("bayestestR_eti", "see_eti", class(out))) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) out } #' @export bci.BFBayesFactor <- function(x, ci = 0.95, verbose = TRUE, ...) { out <- bci(insight::get_parameters(x), ci = ci, verbose = verbose, ...) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) out } #' @rdname bci #' @export bci.get_predicted <- function(x, ci = 0.95, use_iterations = FALSE, verbose = TRUE, ...) { if (isTRUE(use_iterations)) { if ("iterations" %in% names(attributes(x))) { out <- bci(as.data.frame(t(attributes(x)$iterations)), ci = ci, verbose = verbose, ...) } else { insight::format_error("No iterations present in the output.") } attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) } else { out <- bci(as.numeric(x), ci = ci, verbose = verbose, ...) } out } # Helper ------------------------------------------------------------------ .bci <- function(x, ci, verbose = TRUE) { check_ci <- .check_ci_argument(x, ci, verbose) if (!is.null(check_ci)) { return(check_ci) } low <- (1 - ci) / 2 high <- 1 - low sims <- length(x) z.inv <- length(x[x < mean(x, na.rm = TRUE)]) / sims z <- stats::qnorm(z.inv) U <- (sims - 1) * (mean(x, na.rm = TRUE) - x) top <- sum(U^3) under <- 6 * (sum(U^2))^1.5 a <- top / under lower.inv <- stats::pnorm(z + (z + stats::qnorm(low)) / (1 - a * (z + stats::qnorm(low)))) lower <- stats::quantile(x, lower.inv, names = FALSE, na.rm = TRUE) upper.inv <- stats::pnorm(z + (z + stats::qnorm(high)) / (1 - a * (z + stats::qnorm(high)))) upper <- stats::quantile(x, upper.inv, names = FALSE, na.rm = TRUE) data.frame( CI = ci, CI_low = lower, CI_high = upper ) } bayestestR/R/print.R0000644000176200001440000001677415052646230014101 0ustar liggesusers#' @rdname display.describe_posterior #' @export print.describe_posterior <- function(x, digits = 2, caption = "Summary of Posterior Distribution", ...) { .print_default( x = x, digits = digits, caption = caption, ... ) } #' @export print.point_estimate <- function(x, digits = 2, caption = "Point Estimate", ...) { .print_default( x = x, digits = digits, caption = caption, ... ) } #' @export print.p_direction <- function(x, digits = 2, caption = "Probability of Direction", ...) { .print_default( x = x, digits = digits, caption = caption, ... ) } #' @export print.p_map <- function(x, digits = 2, caption = "MAP-based p-value", ...) { .print_default( x = x, digits = digits, caption = caption, ... ) } #' @export print.map_estimate <- function(x, digits = 2, caption = "MAP Estimate", ...) { .print_default( x = x, digits = digits, caption = caption, ... ) } #' @export print.p_rope <- function(x, digits = 2, ...) { # check if we have multiple ROPE values if (insight::n_unique(x$ROPE_low) > 1) { caption <- "Proportion of samples inside the ROPE" } else { caption <- sprintf( "Proportion of samples inside the ROPE [%.*f, %.*f]", digits, x$ROPE_low[1], digits, x$ROPE_high[1] ) x$ROPE_low <- x$ROPE_high <- NULL } .print_default( x = x, digits = digits, caption = caption, ci_string = "ROPE", ... ) } #' @export print.p_significance <- function(x, digits = 2, ...) { threshold <- attributes(x)$threshold if (is.list(threshold)) { caption <- "Practical Significance" out <- as.data.frame(do.call(rbind, threshold)) colnames(out) <- c("ROPE_low", "ROPE_high") x$ROPE_low <- out$ROPE_low x$ROPE_high <- out$ROPE_high ci_string <- "ROPE" } else { caption <- sprintf( "Practical Significance (threshold: %s)", insight::format_value(attributes(x)$threshold, digits = digits) ) ci_string <- NULL } .print_default( x = x, digits = digits, caption = caption, ci_string = ci_string, ... ) } #' @export print.bayestestR_hdi <- function(x, digits = 2, caption = "Highest Density Interval", ...) { ci_string <- "HDI" if (inherits(x, "bayestestR_spi")) { caption <- "Shortest Probability Interval" ci_string <- "SPI" } .print_default( x = x, digits = digits, caption = caption, ci_string = ci_string, ... ) } #' @export print.bayestestR_eti <- function(x, digits = 2, caption = "Equal-Tailed Interval", ...) { .print_default( x = x, digits = digits, caption = caption, ci_string = "ETI", ... ) } #' @export print.bayestestR_si <- function(x, digits = 2, caption = "Support Interval", ...) { .print_default( x = x, digits = digits, caption = caption, ci_string = "SI", ... ) } # special handling for bayes factors ------------------ #' @export print.bayesfactor_models <- function(x, digits = 3, log = FALSE, show_names = TRUE, caption = "Bayes Factors for Model Comparison", ...) { show_names <- show_names & !attr(x, "unsupported_models") .print_bf_default( x = x, digits = digits, log = log, show_names = show_names, caption = caption, align = "llr", ... ) } #' @export print.bayesfactor_inclusion <- function(x, digits = 3, log = FALSE, caption = "Inclusion Bayes Factors (Model Averaged)", ...) { .print_bf_default( x = x, digits = digits, log = log, caption = caption, ... ) } #' @export print.bayesfactor_restricted <- function(x, digits = 3, log = FALSE, caption = "Bayes Factor (Order-Restriction)", ...) { .print_bf_default( x = x, digits = digits, log = log, caption = caption, ... ) } #' @export print.bayesfactor_parameters <- function(x, digits = 3, log = FALSE, ...) { # retrieve information with cleaned parameter names cp <- attr(x, "clean_parameters") # format data frame and columns formatted_table <- format( x, cp = cp, digits = digits, log = log, format = "text", ... ) cat(insight::export_table(formatted_table, format = "text")) invisible(x) } # util --------------------- .print_default <- function(x, digits = 2, caption = NULL, subtitles = NULL, ci_string = "CI", ...) { # retrieve information with cleaned parameter names cp <- attr(x, "clean_parameters") # format data frame and columns formatted_table <- format( x, cp = cp, digits = digits, format = "text", ci_string = ci_string, caption = caption, subtitles = subtitles, ... ) # check if we have a 1x1 data frame (i.e. a numeric input) if (is.data.frame(formatted_table) && nrow(formatted_table) == 1 && ncol(formatted_table) == 1) { # print for numeric caption <- attr(formatted_table, "table_caption") # if we have no useful column name and a caption, use caption if (!is.null(caption) && !endsWith(colnames(formatted_table), ci_string)) { cat(paste0(caption, ": ")) } else { cat(paste0(colnames(formatted_table), ": ")) } cat(formatted_table[1, 1]) cat("\n") } else { # print for data frame cat(insight::export_table( formatted_table, caption = caption )) } invisible(x) } .print_bf_default <- function(x, digits = 3, log = FALSE, caption = NULL, align = NULL, ...) { # format data frame and columns formatted_table <- format( x, digits = digits, log = log, format = "text", caption = caption, ... # pass show_names ) cat(insight::export_table( formatted_table, sep = " ", header = NULL, format = "text", align = align )) invisible(x) } bayestestR/R/check_prior.R0000644000176200001440000001547215052646230015227 0ustar liggesusers#' Check if Prior is Informative #' #' Performs a simple test to check whether the prior is informative to the #' posterior. This idea, and the accompanying heuristics, were discussed in #' _Gelman et al. 2017_. #' #' @param method Can be `"gelman"` or `"lakeland"`. For the #' `"gelman"` method, if the SD of the posterior is more than 0.1 times #' the SD of the prior, then the prior is considered as informative. For the #' `"lakeland"` method, the prior is considered as informative if the #' posterior falls within the `95%` HDI of the prior. #' @param simulate_priors Should prior distributions be simulated using #' [simulate_prior()] (default; faster) or sampled via #' [unupdate()] (slower, more accurate). #' @inheritParams effective_sample #' @inheritParams hdi #' #' @return A data frame with two columns: The parameter names and the quality #' of the prior (which might be `"informative"`, `"uninformative"`) #' or `"not determinable"` if the prior distribution could not be #' determined). #' #' @examplesIf require("rstanarm") && require("see") #' \donttest{ #' library(bayestestR) #' model <- rstanarm::stan_glm(mpg ~ wt + am, data = mtcars, chains = 1, refresh = 0) #' check_prior(model, method = "gelman") #' check_prior(model, method = "lakeland") #' #' # An extreme example where both methods diverge: #' model <- rstanarm::stan_glm(mpg ~ wt, #' data = mtcars[1:3, ], #' prior = normal(-3.3, 1, FALSE), #' prior_intercept = normal(0, 1000, FALSE), #' refresh = 0 #' ) #' check_prior(model, method = "gelman") #' check_prior(model, method = "lakeland") #' # can provide visual confirmation to the Lakeland method #' plot(si(model, verbose = FALSE)) #' } #' @references #' Gelman, A., Simpson, D., and Betancourt, M. (2017). The Prior Can Often Only #' Be Understood in the Context of the Likelihood. Entropy, 19(10), 555. #' \doi{10.3390/e19100555} #' #' @export check_prior <- function(model, method = "gelman", simulate_priors = TRUE, ...) { UseMethod("check_prior") } #' @rdname check_prior #' @export check_prior.brmsfit <- function(model, method = "gelman", simulate_priors = TRUE, effects = "fixed", component = "conditional", parameters = NULL, verbose = TRUE, ...) { posteriors <- insight::get_parameters( model, effects = effects, component = component, parameters = parameters ) if (isTRUE(simulate_priors)) { priors <- simulate_prior( model, effects = effects, component = component, parameters = parameters, verbose = verbose ) } else { priors <- unupdate(model, verbose = FALSE) priors <- insight::get_parameters( priors, effects = effects, component = component, parameters = parameters ) } .check_prior(priors, posteriors, method, verbose = verbose, cleaned_parameters = .get_cleaned_parameters(model, ...) ) } #' @export check_prior.stanreg <- check_prior.brmsfit #' @export check_prior.blavaan <- check_prior.brmsfit #' @keywords internal .check_prior <- function(priors, posteriors, method = "gelman", verbose = TRUE, cleaned_parameters = NULL) { # validation check for matching parameters. Some weird priors like # rstanarm's R2 prior might cause problems if (!is.null(cleaned_parameters) && ncol(priors) != ncol(posteriors)) { ## TODO for now only fixed effects if ("Effects" %in% colnames(cleaned_parameters)) { cleaned_parameters <- cleaned_parameters[cleaned_parameters$Effects == "fixed", ] } # rename cleaned parameters, so they match name of prior parameter column cp <- cleaned_parameters$Cleaned_Parameter cp <- gsub("(.*)(\\.|\\[)\\d+(\\.|\\])", "\\1", cp) cp[cp == "Intercept"] <- "(Intercept)" cleaned_parameters$Cleaned_Parameter <- cp colnames(priors)[colnames(priors) == "Intercept"] <- "(Intercept)" # at this point, the colnames of "posteriors" should match "cp$Parameter", # while colnames of "priors" should match "cp$Cleaned_Parameter". To ensure # that ncol of priors is the same as ncol of posteriors, we now duplicate # prior columns and match them with the posteriors if (ncol(posteriors) > ncol(priors)) { matched_columns <- stats::na.omit(match(cleaned_parameters$Cleaned_Parameter, colnames(priors))) matched_column_names <- stats::na.omit(match(colnames(priors), cleaned_parameters$Cleaned_Parameter)) priors <- priors[matched_columns] } else { matched_columns <- stats::na.omit(match(colnames(priors), cleaned_parameters$Cleaned_Parameter)) matched_column_names <- stats::na.omit(match(cleaned_parameters$Cleaned_Parameter, colnames(priors))) priors <- priors[matched_columns] } colnames(priors) <- cleaned_parameters$Parameter[matched_column_names] } # still different ncols? if (ncol(priors) != ncol(posteriors)) { common_columns <- intersect(colnames(priors), colnames(posteriors)) priors <- priors[common_columns] posteriors <- posteriors[common_columns] if (verbose) { insight::format_warning( "Parameters and priors could not be fully matched. Only returning results for parameters with matching priors." ) } } # for priors whose distribution cannot be simulated, prior values are # all NA. Catch those, and warn user all_missing <- vapply(priors, function(i) all(is.na(i)), TRUE) if (any(all_missing) && verbose) { insight::format_warning("Some priors could not be simulated.") } .gelman <- function(prior, posterior) { if (all(is.na(prior))) { "not determinable" } else if (stats::sd(posterior, na.rm = TRUE) > 0.1 * stats::sd(prior, na.rm = TRUE)) { "informative" } else { "uninformative" } } .lakeland <- function(prior, posterior) { if (all(is.na(prior))) { "not determinable" } else { hdi <- hdi(prior, ci = 0.95) r <- rope(posterior, ci = 1, range = c(hdi$CI_low, hdi$CI_high)) if (as.numeric(r) > 0.99) { "informative" } else { "misinformative" } } } if (method == "gelman") { result <- mapply(.gelman, priors, posteriors) } else if (method == "lakeland") { result <- mapply(.lakeland, priors, posteriors) } else { insight::format_error("method should be 'gelman' or 'lakeland'.") } data.frame( Parameter = names(posteriors), Prior_Quality = unname(result), stringsAsFactors = FALSE ) } bayestestR/R/simulate_simpson.R0000644000176200001440000000317314677026462016340 0ustar liggesusers#' Simpson's paradox dataset simulation #' #' Simpson's paradox, or the Yule-Simpson effect, is a phenomenon in probability #' and statistics, in which a trend appears in several different groups of data #' but disappears or reverses when these groups are combined. #' #' @param n The number of observations for each group to be generated (minimum 4). #' @param groups Number of groups (groups can be participants, clusters, anything). #' @param difference Difference between groups. #' @param group_prefix The prefix of the group name (e.g., "G_1", "G_2", "G_3", ...). #' @inheritParams simulate_correlation #' #' @return A dataset. #' #' @examplesIf requireNamespace("MASS", quietly = TRUE) #' data <- simulate_simpson(n = 10, groups = 5, r = 0.5) #' #' if (require("ggplot2")) { #' ggplot(data, aes(x = V1, y = V2)) + #' geom_point(aes(color = Group)) + #' geom_smooth(aes(color = Group), method = "lm") + #' geom_smooth(method = "lm") #' } #' @export simulate_simpson <- function(n = 100, r = 0.5, groups = 3, difference = 1, group_prefix = "G_") { if (n <= 3) { insight::format_error("The number of observations `n` should be larger than 3.") } out <- data.frame() for (i in 1:groups) { dat <- simulate_correlation(n = n, r = r) dat$V1 <- dat$V1 + difference * i # (i * -sign(r)) dat$V2 <- dat$V2 + difference * (i * -sign(r)) dat$Group <- sprintf(paste0(group_prefix, "%0", nchar(trunc(abs(groups))), "d"), i) out <- rbind(out, dat) } out } bayestestR/R/weighted_posteriors.R0000644000176200001440000002353114765755711017042 0ustar liggesusers#' Generate posterior distributions weighted across models #' #' Extract posterior samples of parameters, weighted across models. Weighting is #' done by comparing posterior model probabilities, via [bayesfactor_models()]. #' #' @param ... Fitted models (see details), all fit on the same data, or a single #' `BFBayesFactor` object. #' @param missing An optional numeric value to use if a model does not contain a #' parameter that appears in other models. Defaults to 0. #' @param prior_odds Optional vector of prior odds for the models compared to #' the first model (or the denominator, for `BFBayesFactor` objects). For #' `data.frame`s, this will be used as the basis of weighting. #' @param iterations For `BayesFactor` models, how many posterior samples to draw. #' @inheritParams bayesfactor_models #' @inheritParams bayesfactor_parameters #' #' @details #' Note that across models some parameters might play different roles. For #' example, the parameter `A` plays a different role in the model `Y ~ A + B` #' (where it is a main effect) than it does in the model `Y ~ A + B + A:B` #' (where it is a simple effect). In many cases centering of predictors (mean #' subtracting for continuous variables, and effects coding via `contr.sum` or #' orthonormal coding via [`contr.equalprior_pairs`] for factors) can reduce this #' issue. In any case you should be mindful of this issue. #' #' See [bayesfactor_models()] details for more info on passed models. #' #' Note that for `BayesFactor` models, posterior samples cannot be generated #' from intercept only models. #' #' This function is similar in function to `brms::posterior_average`. #' #' @note For `BayesFactor < 0.9.12-4.3`, in some instances there might be #' some problems of duplicate columns of random effects in the resulting data #' frame. #' #' @return A data frame with posterior distributions (weighted across models) . #' #' @seealso [`bayesfactor_inclusion()`] for Bayesian model averaging. #' #' @examples #' \donttest{ #' if (require("rstanarm") && require("see") && interactive()) { #' stan_m0 <- suppressWarnings(stan_glm(extra ~ 1, #' data = sleep, #' family = gaussian(), #' refresh = 0, #' diagnostic_file = file.path(tempdir(), "df0.csv") #' )) #' #' stan_m1 <- suppressWarnings(stan_glm(extra ~ group, #' data = sleep, #' family = gaussian(), #' refresh = 0, #' diagnostic_file = file.path(tempdir(), "df1.csv") #' )) #' #' res <- weighted_posteriors(stan_m0, stan_m1, verbose = FALSE) #' #' plot(eti(res)) #' } #' #' ## With BayesFactor #' if (require("BayesFactor")) { #' extra_sleep <- ttestBF(formula = extra ~ group, data = sleep) #' #' wp <- weighted_posteriors(extra_sleep, verbose = FALSE) #' #' describe_posterior(extra_sleep, test = NULL, verbose = FALSE) #' # also considers the null #' describe_posterior(wp$delta, test = NULL, verbose = FALSE) #' } #' #' #' ## weighted prediction distributions via data.frames #' if (require("rstanarm") && interactive()) { #' m0 <- suppressWarnings(stan_glm( #' mpg ~ 1, #' data = mtcars, #' family = gaussian(), #' diagnostic_file = file.path(tempdir(), "df0.csv"), #' refresh = 0 #' )) #' #' m1 <- suppressWarnings(stan_glm( #' mpg ~ carb, #' data = mtcars, #' family = gaussian(), #' diagnostic_file = file.path(tempdir(), "df1.csv"), #' refresh = 0 #' )) #' #' # Predictions: #' pred_m0 <- data.frame(posterior_predict(m0)) #' pred_m1 <- data.frame(posterior_predict(m1)) #' #' BFmods <- bayesfactor_models(m0, m1, verbose = FALSE) #' #' wp <- weighted_posteriors( #' pred_m0, pred_m1, #' prior_odds = as.numeric(BFmods)[2], #' verbose = FALSE #' ) #' #' # look at first 5 prediction intervals #' hdi(pred_m0[1:5]) #' hdi(pred_m1[1:5]) #' hdi(wp[1:5]) # between, but closer to pred_m1 #' } #' } #' #' @references #' #' - Clyde, M., Desimone, H., & Parmigiani, G. (1996). Prediction via #' orthogonalized model mixing. Journal of the American Statistical #' Association, 91(435), 1197-1208. #' #' - Hinne, M., Gronau, Q. F., van den Bergh, D., and Wagenmakers, E. #' (2019, March 25). A conceptual introduction to Bayesian Model Averaging. #' \doi{10.31234/osf.io/wgb64} #' #' - Rouder, J. N., Haaf, J. M., & Vandekerckhove, J. (2018). Bayesian #' inference for psychology, part IV: Parameter estimation and Bayes factors. #' Psychonomic bulletin & review, 25(1), 102-113. #' #' - van den Bergh, D., Haaf, J. M., Ly, A., Rouder, J. N., & Wagenmakers, #' E. J. (2019). A cautionary note on estimating effect size. #' #' @export weighted_posteriors <- function(..., prior_odds = NULL, missing = 0, verbose = TRUE) { UseMethod("weighted_posteriors") } #' @export #' @rdname weighted_posteriors weighted_posteriors.data.frame <- function(..., prior_odds = NULL, missing = 0, verbose = TRUE) { Mods <- list(...) mnames <- sapply(match.call(expand.dots = FALSE)$`...`, insight::safe_deparse) # find min nrow iterations <- min(vapply(Mods, nrow, numeric(1))) # make weights from prior_odds if (!is.null(prior_odds)) { prior_odds <- c(1, prior_odds) } else { if (verbose) { insight::format_warning( "'prior_odds = NULL'; Using uniform priors odds.\n", "For weighted data frame, 'prior_odds' should be specified as a numeric vector." ) } prior_odds <- rep(1, length(Mods)) } Probs <- prior_odds / sum(prior_odds) weighted_samps <- round(iterations * Probs) # pass to .weighted_posteriors res <- .weighted_posteriors(Mods, weighted_samps, missing) # make weights table attr(res, "weights") <- data.frame(Model = mnames, weights = weighted_samps) return(res) } #' @export #' @rdname weighted_posteriors weighted_posteriors.stanreg <- function(..., prior_odds = NULL, missing = 0, verbose = TRUE, effects = "fixed", component = "conditional", parameters = NULL) { Mods <- list(...) # Get Bayes factors BFMods <- bayesfactor_models(..., denominator = 1, verbose = verbose) # Compute posterior model probabilities model_tab <- .get_model_table(BFMods, priorOdds = prior_odds) postProbs <- model_tab$postProbs # Compute weighted number of samples iterations <- min(sapply(Mods, .total_samps)) weighted_samps <- round(iterations * postProbs) # extract parameters params <- lapply(Mods, insight::get_parameters, effects = effects, component = component, parameters = parameters ) res <- .weighted_posteriors(params, weighted_samps, missing) attr(res, "weights") <- data.frame(Model = BFMods$Model, weights = weighted_samps) return(res) } #' @export weighted_posteriors.brmsfit <- weighted_posteriors.stanreg #' @export weighted_posteriors.blavaan <- weighted_posteriors.stanreg #' @rdname weighted_posteriors #' @export weighted_posteriors.BFBayesFactor <- function(..., prior_odds = NULL, missing = 0, verbose = TRUE, iterations = 4000) { Mods <- c(...) # Get Bayes factors BFMods <- bayesfactor_models(Mods, verbose = verbose) # Compute posterior model probabilities model_tab <- .get_model_table(BFMods, priorOdds = prior_odds, add_effects_table = FALSE) postProbs <- model_tab$postProbs # Compute weighted number of samples weighted_samps <- round(iterations * postProbs) # extract parameters intercept_only <- which(BFMods$Model == "1") params <- vector(mode = "list", length = nrow(BFMods)) for (m in seq_along(params)) { if (length(intercept_only) && m == intercept_only) { # warning( # "Cannot sample from BFBayesFactor model with intercept only (model prob = ", # round(postProbs[m], 3) * 100, "%).\n", # "Omitting the intercept model.", # call. = FALSE # ) params[[m]] <- data.frame( mu = rep(NA, iterations), sig2 = rep(NA, iterations), g = rep(NA, iterations) ) } else if (m == 1) { # If the model is the "den" model params[[m]] <- BayesFactor::posterior(1 / Mods[1], iterations = iterations, progress = FALSE) } else { params[[m]] <- BayesFactor::posterior( Mods[m - 1], iterations = iterations, progress = FALSE ) } } params <- lapply(params, data.frame) res <- .weighted_posteriors(params, weighted_samps, missing) attr(res, "weights") <- data.frame(Model = BFMods$Model, weights = weighted_samps) return(res) } .weighted_posteriors <- function(params, weighted_samps, missing) { par_names <- unique(unlist(sapply(params, colnames), recursive = TRUE)) # remove empty (0 sample) models params <- params[weighted_samps != 0] weighted_samps <- weighted_samps[weighted_samps != 0] for (m in seq_along(weighted_samps)) { temp_params <- params[[m]] i <- sample(nrow(temp_params), size = weighted_samps[m]) temp_params <- temp_params[i, , drop = FALSE] # If any parameters not estimated in the model, they are assumed to be 0 (the default value of `missing`) missing_pars <- setdiff(par_names, colnames(temp_params)) temp_params[, missing_pars] <- missing params[[m]] <- temp_params } # combine all do.call("rbind", params) } #' @keywords internal .total_samps <- function(mod) { x <- insight::find_algorithm(mod) if (is.null(x$iterations)) x$iterations <- x$sample x$chains * (x$iterations - x$warmup) } bayestestR/R/unupdate.R0000644000176200001440000000753414765755711014603 0ustar liggesusers#' Un-update Bayesian models to their prior-to-data state #' #' As posteriors are priors that have been updated after observing some data, #' the goal of this function is to un-update the posteriors to obtain models #' representing the priors. These models can then be used to examine the prior #' predictive distribution, or to compare priors with posteriors. #' #' This function in used internally to compute Bayes factors. #' #' @param model A fitted Bayesian model. #' @param verbose Toggle warnings. #' @param newdata List of `data.frames` to update the model with new data. #' Required even if the original data should be used. #' @param ... Not used #' #' @return A model un-fitted to the data, representing the prior model. #' #' @keywords internal #' @export unupdate <- function(model, verbose = TRUE, ...) { UseMethod("unupdate") } #' @export unupdate.stanreg <- function(model, verbose = TRUE, ...) { insight::check_if_installed("rstanarm") prior_PD <- stats::getCall(model)$prior_PD if (!is.null(prior_PD) && isTRUE(eval(parse(text = prior_PD)))) { return(model) } if (verbose) { insight::format_alert("Sampling priors, please wait...") } prior_dists <- sapply(rstanarm::prior_summary(model), `[[`, "dist") if (anyNA(prior_dists)) { insight::format_error( "Cannot sample from flat priors (such as when priors are set to 'NULL' in a 'stanreg' model)." ) } model_prior <- suppressWarnings( stats::update(model, prior_PD = TRUE, refresh = 0) ) model_prior } #' @rdname unupdate #' @export unupdate.brmsfit <- function(model, verbose = TRUE, ...) { insight::check_if_installed("brms") if (isTRUE(attr(model$prior, "sample_prior") == "only")) { return(model) } if (verbose) { insight::format_alert("Sampling priors, please wait...") } utils::capture.output({ model_prior <- try(suppressMessages(suppressWarnings( stats::update(model, sample_prior = "only", refresh = 0) )), silent = TRUE) }) if (methods::is(model_prior, "try-error")) { if (grepl("proper priors", model_prior, fixed = TRUE)) { insight::format_error( "Cannot sample from flat priors (such as the default priors for fixed-effects in a 'brmsfit' model)." ) } else { insight::format_error(model_prior) } } model_prior } #' @rdname unupdate #' @export unupdate.brmsfit_multiple <- function(model, verbose = TRUE, newdata = NULL, ...) { insight::check_if_installed("brms") if (isTRUE(attr(model$prior, "sample_prior") == "only")) { return(model) } if (verbose) { insight::format_alert("Sampling priors, please wait...") } utils::capture.output({ model_prior <- try(suppressMessages(suppressWarnings( stats::update( model, sample_prior = "only", newdata = newdata, refresh = 0 ) )), silent = TRUE) }) if (methods::is(model_prior, "try-error")) { if (grepl("proper priors", model_prior, fixed = TRUE)) { insight::format_error( "Cannot sample from flat priors (such as the default priors for fixed-effects in a 'brmsfit' model)." ) } else { insight::format_error(model_prior) } } model_prior } #' @export unupdate.blavaan <- function(model, verbose = TRUE, ...) { insight::check_if_installed("blavaan") cl <- model@call if (isTRUE(eval(cl$prisamp))) { return(model) } if (verbose) { insight::format_alert("Sampling priors, please wait...") } cl$prisamp <- TRUE suppressMessages(suppressWarnings( utils::capture.output({ model_prior <- eval(cl) }) )) model_prior } bayestestR/R/diagnostic_posterior.R0000644000176200001440000002713015005147105017156 0ustar liggesusers#' Posteriors Sampling Diagnostic #' #' Extract diagnostic metrics (Effective Sample Size (`ESS`), `Rhat` and Monte #' Carlo Standard Error `MCSE`). #' #' @param posterior A `stanreg`, `stanfit`, `brmsfit`, or `blavaan` object. #' @param diagnostic Diagnostic metrics to compute. Character (vector) or list #' with one or more of these options: `"ESS"`, `"Rhat"`, `"MCSE"` or `"all"`. #' #' @inheritSection hdi Model components #' #' @details #' **Effective Sample (ESS)** should be as large as possible, although for #' most applications, an effective sample size greater than 1000 is sufficient #' for stable estimates (_Bürkner, 2017_). The ESS corresponds to the number of #' independent samples with the same estimation power as the N autocorrelated #' samples. It is is a measure of "how much independent information there is #' in autocorrelated chains" (_Kruschke 2015, p182-3_). #' #' **Rhat** should be the closest to 1. It should not be larger than 1.1 #' (_Gelman and Rubin, 1992_) or 1.01 (_Vehtari et al., 2019_). The split #' Rhat statistic quantifies the consistency of an ensemble of Markov chains. #' #' **Monte Carlo Standard Error (MCSE)** is another measure of accuracy of the #' chains. It is defined as standard deviation of the chains divided by their #' effective sample size (the formula for `mcse()` is from Kruschke 2015, p. #' 187). The MCSE "provides a quantitative suggestion of how big the estimation #' noise is". #' #' #' @examplesIf require("rstanarm") && require("brms") #' \donttest{ #' # rstanarm models #' # ----------------------------------------------- #' model <- suppressWarnings( #' rstanarm::stan_glm(mpg ~ wt + gear, data = mtcars, chains = 2, iter = 200, refresh = 0) #' ) #' diagnostic_posterior(model) #' #' # brms models #' # ----------------------------------------------- #' model <- brms::brm(mpg ~ wt + cyl, data = mtcars) #' diagnostic_posterior(model) #' } #' @references #' - Gelman, A., & Rubin, D. B. (1992). Inference from iterative simulation #' using multiple sequences. Statistical science, 7(4), 457-472. #' - Vehtari, A., Gelman, A., Simpson, D., Carpenter, B., and Bürkner, P. C. #' (2019). Rank-normalization, folding, and localization: An improved Rhat #' for assessing convergence of MCMC. arXiv preprint arXiv:1903.08008. #' - Kruschke, J. (2014). Doing Bayesian data analysis: A tutorial with R, #' JAGS, and Stan. Academic Press. #' @export diagnostic_posterior <- function(posterior, ...) { UseMethod("diagnostic_posterior") } #' @rdname diagnostic_posterior #' @export diagnostic_posterior.default <- function(posterior, diagnostic = c("ESS", "Rhat"), ...) { insight::format_error("'diagnostic_posterior()' only works with rstanarm, brms or blavaan models.") } #' @inheritParams insight::get_parameters.BFBayesFactor #' @inheritParams insight::get_parameters #' @rdname diagnostic_posterior #' @export diagnostic_posterior.stanreg <- function(posterior, diagnostic = "all", effects = "fixed", component = "location", parameters = NULL, ...) { # Find parameters params <- insight::find_parameters( posterior, effects = effects, component = component, parameters = parameters, flatten = TRUE ) # If no diagnostic if (is.null(diagnostic)) { return(data.frame(Parameter = params)) } diagnostic <- match.arg( diagnostic, c("ESS", "Rhat", "MCSE", "all"), several.ok = TRUE ) if ("all" %in% diagnostic) { diagnostic <- c("ESS", "Rhat", "MCSE", "khat") } else { diagnostic <- diagnostic if ("Rhat" %in% diagnostic) diagnostic <- c(diagnostic, "khat") } # Get indices and rename diagnostic_df <- as.data.frame(posterior$stan_summary) diagnostic_df$Parameter <- row.names(diagnostic_df) if ("n_eff" %in% names(diagnostic_df)) { diagnostic_df$ESS <- diagnostic_df$n_eff } # special handling for MCSE, due to some parameters (like lp__) missing in rows MCSE <- mcse(posterior, effects = "full") diagnostic_df <- merge(diagnostic_df, MCSE, by = "Parameter", all = FALSE) # Select columns available_columns <- intersect(colnames(diagnostic_df), c("Parameter", diagnostic)) diagnostic_df <- diagnostic_df[available_columns] names(diagnostic_df)[available_columns == "khat"] <- "Khat" row.names(diagnostic_df) <- NULL # Remove columns with all Nans diagnostic_df <- diagnostic_df[!sapply(diagnostic_df, function(x) all(is.na(x)))] # Select rows diagnostic_df[diagnostic_df$Parameter %in% params, ] } #' @inheritParams insight::get_parameters #' @export diagnostic_posterior.stanmvreg <- function(posterior, diagnostic = "all", effects = "fixed", parameters = NULL, ...) { # Find parameters all_params <- insight::find_parameters( posterior, effects = effects, parameters = parameters, flatten = FALSE ) params <- unlist(lapply(names(all_params), function(i) { all_params[[i]]$sigma <- NULL unlist(all_params[[i]], use.names = FALSE) }), use.names = FALSE) # If no diagnostic if (is.null(diagnostic)) { return(data.frame(Parameter = params)) } diagnostic <- match.arg( diagnostic, c("ESS", "Rhat", "MCSE", "all"), several.ok = TRUE ) if ("all" %in% diagnostic) { diagnostic <- c("ESS", "Rhat", "MCSE", "khat") } else { diagnostic <- diagnostic if ("Rhat" %in% diagnostic) diagnostic <- c(diagnostic, "khat") } # Get indices and rename diagnostic_df <- as.data.frame(posterior$stan_summary) diagnostic_df$Parameter <- row.names(diagnostic_df) if ("n_eff" %in% names(diagnostic_df)) { diagnostic_df$ESS <- diagnostic_df$n_eff } # special handling for MCSE, due to some parameters (like lp__) missing in rows MCSE <- mcse(posterior, effects = effects) diagnostic_df <- merge(diagnostic_df, MCSE, by = "Parameter", all = FALSE) # Select columns available_columns <- intersect(colnames(diagnostic_df), c("Parameter", diagnostic)) diagnostic_df <- diagnostic_df[available_columns] names(diagnostic_df)[available_columns == "khat"] <- "Khat" row.names(diagnostic_df) <- NULL # Remove columns with all Nans diagnostic_df <- diagnostic_df[!sapply(diagnostic_df, function(x) all(is.na(x)))] diagnostic_df$Response <- gsub("(b\\[)*(.*)\\|(.*)", "\\2", diagnostic_df$Parameter) # Select rows diagnostic_df <- diagnostic_df[diagnostic_df$Parameter %in% params, ] # clean parameters for (i in unique(diagnostic_df$Response)) { diagnostic_df$Parameter <- gsub( sprintf("%s|", i), "", diagnostic_df$Parameter, fixed = TRUE ) } diagnostic_df } #' @inheritParams insight::get_parameters #' @export diagnostic_posterior.brmsfit <- function(posterior, diagnostic = "all", effects = "fixed", component = "conditional", parameters = NULL, ...) { # Find parameters params <- insight::find_parameters(posterior, effects = effects, component = component, parameters = parameters, flatten = TRUE ) # If no diagnostic if (is.null(diagnostic)) { return(data.frame(Parameter = params)) } # Get diagnostic diagnostic <- match.arg( diagnostic, c("ESS", "Rhat", "MCSE", "all"), several.ok = TRUE ) if ("all" %in% diagnostic) { diagnostic <- c("ESS", "Rhat", "MCSE", "khat") # Add MCSE } else if ("Rhat" %in% diagnostic) { diagnostic <- c(diagnostic, "khat") } insight::check_if_installed("rstan") # Get indices and rename diagnostic_df <- as.data.frame(rstan::summary(posterior$fit)$summary) diagnostic_df$Parameter <- row.names(diagnostic_df) diagnostic_df$ESS <- diagnostic_df$n_eff # special handling for MCSE, due to some parameters (like lp__) missing in rows MCSE <- mcse(posterior, effects = "full", component = "all") diagnostic_df <- merge(diagnostic_df, MCSE, by = "Parameter", all = FALSE) # Select columns available_columns <- intersect(colnames(diagnostic_df), c("Parameter", diagnostic)) diagnostic_df <- diagnostic_df[available_columns] names(diagnostic_df)[available_columns == "khat"] <- "Khat" row.names(diagnostic_df) <- NULL # Remove columns with all Nans diagnostic_df <- diagnostic_df[!sapply(diagnostic_df, function(x) all(is.na(x)))] # Select rows diagnostic_df[diagnostic_df$Parameter %in% params, ] } #' @inheritParams insight::get_parameters #' @export diagnostic_posterior.stanfit <- function(posterior, diagnostic = "all", effects = "fixed", parameters = NULL, ...) { # Find parameters params <- insight::find_parameters( posterior, effects = effects, parameters = parameters, flatten = TRUE ) # If no diagnostic if (is.null(diagnostic)) { return(data.frame(Parameter = params)) } # Get diagnostic diagnostic <- match.arg( diagnostic, c("ESS", "Rhat", "MCSE", "all"), several.ok = TRUE ) if ("all" %in% diagnostic) { diagnostic <- c("ESS", "Rhat", "MCSE") } insight::check_if_installed("rstan") all_params <- insight::find_parameters(posterior, effects = effects, flatten = TRUE ) diagnostic_df <- data.frame( Parameter = all_params, stringsAsFactors = FALSE ) if ("ESS" %in% diagnostic) { diagnostic_df$ESS <- effective_sample(posterior, effects = effects)$ESS } if ("MCSE" %in% diagnostic) { diagnostic_df$MCSE <- mcse(posterior, effects = effects)$MCSE } if ("Rhat" %in% diagnostic) { s <- as.data.frame(rstan::summary(posterior)$summary) diagnostic_df$Rhat <- s[rownames(s) %in% all_params, ]$Rhat } # Remove columns with all Nans diagnostic_df <- diagnostic_df[!sapply(diagnostic_df, function(x) all(is.na(x)))] # Select rows diagnostic_df[diagnostic_df$Parameter %in% params, ] } #' @export diagnostic_posterior.blavaan <- function(posterior, diagnostic = "all", ...) { # Find parameters params <- suppressWarnings(insight::find_parameters(posterior, flatten = TRUE)) out <- data.frame(Parameter = params) # If no diagnostic if (is.null(diagnostic)) { return(out) } diagnostic <- match.arg( diagnostic, c("ESS", "Rhat", "MCSE", "all"), several.ok = TRUE ) if ("all" %in% diagnostic) { diagnostic <- c("ESS", "Rhat", "MCSE") } else { diagnostic <- diagnostic if ("Rhat" %in% diagnostic) diagnostic <- c(diagnostic, "khat") } # Get indices if ("Rhat" %in% diagnostic) { insight::check_if_installed("blavaan") Rhat <- blavaan::blavInspect(posterior, what = "psrf") Rhat <- data.frame( Parameter = colnames(insight::get_parameters(posterior)), Rhat = Rhat ) out <- merge(out, Rhat, by = "Parameter", all = TRUE) } if ("ESS" %in% diagnostic) { ESS <- effective_sample(posterior) out <- merge(out, ESS, by = "Parameter", all = TRUE) } if ("MCSE" %in% diagnostic) { MCSE <- mcse(posterior) out <- merge(out, MCSE, by = "Parameter", all = TRUE) } unique(out) } bayestestR/R/p_map.R0000644000176200001440000003033515052646230014026 0ustar liggesusers#' Bayesian p-value based on the density at the Maximum A Posteriori (MAP) #' #' Compute a Bayesian equivalent of the *p*-value, related to the odds that a #' parameter (described by its posterior distribution) has against the null #' hypothesis (*h0*) using Mills' (2014, 2017) *Objective Bayesian Hypothesis #' Testing* framework. It corresponds to the density value at the null (e.g., 0) #' divided by the density at the Maximum A Posteriori (MAP). #' #' @details Note that this method is sensitive to the density estimation `method` #' (see the section in the examples below). #' #' ## Strengths and Limitations #' #' **Strengths:** Straightforward computation. Objective property of the posterior #' distribution. #' #' **Limitations:** Limited information favoring the null hypothesis. Relates #' on density approximation. Indirect relationship between mathematical #' definition and interpretation. Only suitable for weak / very diffused priors. #' #' @inheritParams hdi #' @inheritParams density_at #' @inheritParams pd #' #' @inheritSection hdi Model components #' #' @examplesIf require("rstanarm") && require("emmeans") && require("brms") && require("BayesFactor") #' library(bayestestR) #' #' p_map(rnorm(1000, 0, 1)) #' p_map(rnorm(1000, 10, 1)) #' \donttest{ #' model <- suppressWarnings( #' rstanarm::stan_glm(mpg ~ wt + gear, data = mtcars, chains = 2, iter = 200, refresh = 0) #' ) #' p_map(model) #' #' p_map(suppressWarnings( #' emmeans::emtrends(model, ~1, "wt", data = mtcars) #' )) #' #' model <- brms::brm(mpg ~ wt + cyl, data = mtcars) #' p_map(model) #' #' bf <- BayesFactor::ttestBF(x = rnorm(100, 1, 1)) #' p_map(bf) #' #' # --------------------------------------- #' # Robustness to density estimation method #' set.seed(333) #' data <- data.frame() #' for (iteration in 1:250) { #' x <- rnorm(1000, 1, 1) #' result <- data.frame( #' Kernel = as.numeric(p_map(x, method = "kernel")), #' KernSmooth = as.numeric(p_map(x, method = "KernSmooth")), #' logspline = as.numeric(p_map(x, method = "logspline")) #' ) #' data <- rbind(data, result) #' } #' data$KernSmooth <- data$Kernel - data$KernSmooth #' data$logspline <- data$Kernel - data$logspline #' #' summary(data$KernSmooth) #' summary(data$logspline) #' boxplot(data[c("KernSmooth", "logspline")]) #' } #' @seealso [Jeff Mill's talk](https://www.youtube.com/watch?v=Ip8Ci5KUVRc) #' #' @references #' - Makowski D, Ben-Shachar MS, Chen SHA, Lüdecke D (2019) Indices of Effect Existence and Significance in the Bayesian Framework. Frontiers in Psychology 2019;10:2767. \doi{10.3389/fpsyg.2019.02767} #' - Mills, J. A. (2018). Objective Bayesian Precise Hypothesis Testing. University of Cincinnati. #' #' @export p_map <- function(x, ...) { UseMethod("p_map") } #' @rdname p_map #' @export p_pointnull <- p_map #' @rdname p_map #' @export p_map.numeric <- function(x, null = 0, precision = 2^10, method = "kernel", ...) { p_map(data.frame(Posterior = x), null = null, precision = precision, method = method, ...) } #' @rdname p_map #' @export p_map.get_predicted <- function(x, null = 0, precision = 2^10, method = "kernel", use_iterations = FALSE, verbose = TRUE, ...) { if (isTRUE(use_iterations)) { if ("iterations" %in% names(attributes(x))) { out <- p_map( as.data.frame(t(attributes(x)$iterations)), null = null, precision = precision, method = method, verbose = verbose, ... ) } else { insight::format_error("No iterations present in the output.") } attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) } else { out <- p_map(as.numeric(x), null = null, precision = precision, method = method, verbose = verbose, ... ) } out } #' @export #' @rdname p_map #' @inheritParams p_direction p_map.data.frame <- function(x, null = 0, precision = 2^10, method = "kernel", rvar_col = NULL, ...) { x_rvar <- .possibly_extract_rvar_col(x, rvar_col) if (length(x_rvar) > 0L) { cl <- match.call() cl[[1]] <- bayestestR::p_map cl$x <- x_rvar cl$rvar_col <- NULL out <- eval.parent(cl) obj_name <- insight::safe_deparse_symbol(substitute(x)) attr(out, "object_name") <- sprintf('%s[["%s"]]', obj_name, rvar_col) return(.append_datagrid(out, x)) } x <- .select_nums(x) if (ncol(x) == 1) { p_MAP <- .p_map(x[, 1], null = null, precision = precision, method = method, ...) } else { p_MAP <- sapply(x, .p_map, null = null, precision = precision, method = method, simplify = TRUE, ...) } out <- data.frame( Parameter = names(x), p_MAP = p_MAP, row.names = NULL, stringsAsFactors = FALSE ) class(out) <- c("p_map", class(out)) out } #' @export p_map.draws <- function(x, null = 0, precision = 2^10, method = "kernel", ...) { p_map(.posterior_draws_to_df(x), null = null, precision = precision, method = method, ...) } #' @export p_map.rvar <- p_map.draws #' @export p_map.emmGrid <- function(x, null = 0, precision = 2^10, method = "kernel", ...) { xdf <- insight::get_parameters(x) out <- p_map(xdf, null = null, precision = precision, method = method, ...) out <- .append_datagrid(out, x) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) out } #' @export p_map.emm_list <- p_map.emmGrid #' @export p_map.slopes <- function(x, null = 0, precision = 2^10, method = "kernel", ...) { xrvar <- .get_marginaleffects_draws(x) out <- p_map(xrvar, null = null, precision = precision, method = method, ...) out <- .append_datagrid(out, x) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) out } #' @export p_map.comparisons <- p_map.slopes #' @export p_map.predictions <- p_map.slopes #' @keywords internal .p_map_models <- function(x, null, precision, method, effects, component, parameters, ...) { p_map( insight::get_parameters( x, effects = effects, component = component, parameters = parameters ), null = null, precision = precision, method = method, ... ) } #' @export p_map.mcmc <- function(x, null = 0, precision = 2^10, method = "kernel", parameters = NULL, ...) { out <- .p_map_models( x = x, null = null, precision = precision, method = method, effects = "fixed", component = "conditional", parameters = parameters, ... ) attr(out, "data") <- insight::get_parameters(x, parameters = parameters) out } #' @export p_map.bcplm <- p_map.mcmc #' @export p_map.blrm <- p_map.mcmc #' @export p_map.mcmc.list <- p_map.mcmc #' @export p_map.BGGM <- p_map.mcmc #' @export p_map.bamlss <- function(x, null = 0, precision = 2^10, method = "kernel", component = "all", parameters = NULL, ...) { out <- .p_map_models( x = x, null = null, precision = precision, method = method, effects = "all", component = component, parameters = parameters, ... ) out <- .add_clean_parameters_attribute(out, x) attr(out, "data") <- insight::get_parameters(x, parameters = parameters) out } #' @export p_map.sim.merMod <- function(x, null = 0, precision = 2^10, method = "kernel", effects = "fixed", parameters = NULL, ...) { out <- .p_map_models( x = x, null = null, precision = precision, method = method, effects = effects, component = "conditional", parameters = parameters, ... ) attr(out, "data") <- insight::get_parameters( x, effects = effects, parameters = parameters ) out } #' @export p_map.sim <- function(x, null = 0, precision = 2^10, method = "kernel", parameters = NULL, ...) { out <- .p_map_models( x = x, null = null, precision = precision, method = method, effects = "fixed", component = "conditional", parameters = parameters, ... ) attr(out, "data") <- insight::get_parameters(x, parameters = parameters) out } #' @export p_map.stanreg <- function(x, null = 0, precision = 2^10, method = "kernel", effects = "fixed", component = "location", parameters = NULL, ...) { cleaned_parameters <- .get_cleaned_parameters(x, ...) out <- .prepare_output( p_map( insight::get_parameters( x, effects = effects, component = component, parameters = parameters ), null = null, precision = precision, method = method ), cleaned_parameters, inherits(x, "stanmvreg") ) attr(out, "clean_parameters") <- cleaned_parameters class(out) <- unique(c("p_map", class(out))) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) out } #' @export p_map.stanfit <- p_map.stanreg #' @export p_map.blavaan <- p_map.stanreg #' @rdname p_map #' @export p_map.brmsfit <- function(x, null = 0, precision = 2^10, method = "kernel", effects = "fixed", component = "conditional", parameters = NULL, ...) { cleaned_parameters <- .get_cleaned_parameters(x, ...) out <- .prepare_output( p_map( insight::get_parameters( x, effects = effects, component = component, parameters = parameters, ... ), null = null, precision = precision, method = method, ... ), cleaned_parameters ) attr(out, "clean_parameters") <- cleaned_parameters class(out) <- unique(c("p_map", class(out))) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) out } #' @export p_map.BFBayesFactor <- function(x, null = 0, precision = 2^10, method = "kernel", ...) { out <- p_map(insight::get_parameters(x), null = null, precision = precision, method = method, ...) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) out } #' @export p_map.MCMCglmm <- function(x, null = 0, precision = 2^10, method = "kernel", ...) { nF <- x$Fixed$nfl out <- p_map(as.data.frame(x$Sol[, 1:nF, drop = FALSE]), null = null, precision = precision, method = method, ...) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) out } #' @export p_map.bayesQR <- function(x, null = 0, precision = 2^10, method = "kernel", ...) { out <- p_map(insight::get_parameters(x), null = null, precision = precision, method = method, ...) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) out } #' @keywords internal .p_map <- function(x, null = 0, precision = 2^10, method = "kernel", ...) { # Density at MAP map <- attributes(map_estimate(x, precision = precision, method = method, ...))$MAP_density # Density at 0 d_0 <- density_at(x, null, precision = precision, method = method, ...) if (is.na(d_0)) d_0 <- 0 # Odds p <- d_0 / map p } #' @rdname as.numeric.p_direction #' @method as.numeric p_map #' @export as.numeric.p_map <- function(x, ...) { if (inherits(x, "data.frame")) { return(as.numeric(as.vector(x$p_MAP))) } else { return(as.vector(x)) } } #' @method as.double p_map #' @export as.double.p_map <- as.numeric.p_map bayestestR/R/p_significance.R0000644000176200001440000003673015052646230015700 0ustar liggesusers#' Practical Significance (ps) #' #' Compute the probability of **Practical Significance** (***ps***), which can #' be conceptualized as a unidirectional equivalence test. It returns the #' probability that effect is above a given threshold corresponding to a #' negligible effect in the median's direction. Mathematically, it is defined as #' the proportion of the posterior distribution of the median sign above the #' threshold. #' #' @param threshold The threshold value that separates significant from #' negligible effect, which can have following possible values: #' - `"default"`, in which case the range is set to `0.1` if input is a vector, #' and based on [`rope_range()`] if a (Bayesian) model is provided. #' - a single numeric value (e.g., 0.1), which is used as range around zero #' (i.e. the threshold range is set to -0.1 and 0.1, i.e. reflects a symmetric #' interval) #' - a numeric vector of length two (e.g., `c(-0.2, 0.1)`), useful for #' asymmetric intervals #' - a list of numeric vectors, where each vector corresponds to a parameter #' - a list of *named* numeric vectors, where names correspond to parameter #' names. In this case, all parameters that have no matching name in `threshold` #' will be set to `"default"`. #' @inheritParams rope #' @inheritParams hdi #' #' @inheritSection hdi Model components #' #' @return Values between 0 and 1 corresponding to the probability of practical significance (ps). #' #' @details `p_significance()` returns the proportion of a probability #' distribution (`x`) that is outside a certain range (the negligible #' effect, or ROPE, see argument `threshold`). If there are values of the #' distribution both below and above the ROPE, `p_significance()` returns #' the higher probability of a value being outside the ROPE. Typically, this #' value should be larger than 0.5 to indicate practical significance. However, #' if the range of the negligible effect is rather large compared to the #' range of the probability distribution `x`, `p_significance()` #' will be less than 0.5, which indicates no clear practical significance. #' #' @note There is also a [`plot()`-method](https://easystats.github.io/see/articles/bayestestR.html) implemented in the \href{https://easystats.github.io/see/}{\pkg{see}-package}. #' #' @examplesIf require("rstanarm") #' library(bayestestR) #' #' # Simulate a posterior distribution of mean 1 and SD 1 #' # ---------------------------------------------------- #' posterior <- rnorm(1000, mean = 1, sd = 1) #' p_significance(posterior) #' #' # Simulate a dataframe of posterior distributions #' # ----------------------------------------------- #' df <- data.frame(replicate(4, rnorm(100))) #' p_significance(df) #' \donttest{ #' # rstanarm models #' # ----------------------------------------------- #' model <- rstanarm::stan_glm(mpg ~ wt + cyl, #' data = mtcars, #' chains = 2, refresh = 0 #' ) #' p_significance(model) #' # multiple thresholds - asymmetric, symmetric, default #' p_significance(model, threshold = list(c(-10, 5), 0.2, "default")) #' # named thresholds #' p_significance(model, threshold = list(wt = 0.2, `(Intercept)` = c(-10, 5))) #' } #' @export p_significance <- function(x, ...) { UseMethod("p_significance") } #' @export p_significance.default <- function(x, ...) { insight::format_error( paste0("'p_significance()' is not yet implemented for objects of class '", class(x)[1], "'.") ) } #' @rdname p_significance #' @export p_significance.numeric <- function(x, threshold = "default", ...) { threshold <- .select_threshold_ps(threshold = threshold) out <- p_significance(data.frame(Posterior = x), threshold = threshold) attr(out, "data") <- x out } #' @rdname p_significance #' @export p_significance.get_predicted <- function(x, threshold = "default", use_iterations = FALSE, verbose = TRUE, ...) { if (isTRUE(use_iterations)) { if ("iterations" %in% names(attributes(x))) { out <- p_significance( as.data.frame(t(attributes(x)$iterations)), threshold = threshold, verbose = verbose, ... ) } else { insight::format_error("No iterations present in the output.") } attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) } else { out <- p_significance(as.numeric(x), threshold = threshold, verbose = verbose, ... ) } out } #' @export #' @rdname p_significance #' @inheritParams p_direction p_significance.data.frame <- function(x, threshold = "default", rvar_col = NULL, ...) { obj_name <- insight::safe_deparse_symbol(substitute(x)) x_rvar <- .possibly_extract_rvar_col(x, rvar_col) if (length(x_rvar) > 0L) { cl <- match.call() cl[[1]] <- bayestestR::p_significance cl$x <- x_rvar cl$rvar_col <- NULL out <- eval.parent(cl) attr(out, "object_name") <- sprintf('%s[["%s"]]', obj_name, rvar_col) return(.append_datagrid(out, x)) } threshold <- .select_threshold_ps(threshold = threshold, params = x) x <- .select_nums(x) if (ncol(x) == 1) { ps <- .p_significance(x[, 1], threshold = threshold, ...) } else if (is.list(threshold)) { # check if list of values contains only valid values threshold <- .check_list_range(threshold, x, larger_two = TRUE) # apply thresholds to each column ps <- mapply( function(p, thres) { .p_significance( p, threshold = thres ) }, x, threshold, SIMPLIFY = FALSE ) } else { ps <- sapply(x, .p_significance, threshold = threshold, simplify = TRUE, ...) } out <- data.frame( Parameter = names(x), ps = as.numeric(ps), row.names = NULL, stringsAsFactors = FALSE ) attr(out, "threshold") <- threshold attr(out, "object_name") <- obj_name class(out) <- unique(c("p_significance", "see_p_significance", class(out))) out } #' @export p_significance.draws <- function(x, threshold = "default", ...) { p_significance(.posterior_draws_to_df(x), threshold = threshold, ...) } #' @export p_significance.rvar <- p_significance.draws #' @export p_significance.parameters_simulate_model <- function(x, threshold = "default", ...) { obj_name <- attr(x, "object_name") if (!is.null(obj_name)) { # first try, parent frame model <- .safe(get(obj_name, envir = parent.frame())) if (is.null(model)) { # second try, global env model <- .safe(get(obj_name, envir = globalenv())) } } threshold <- .select_threshold_ps(model = model, threshold = threshold) out <- p_significance.data.frame(x, threshold = threshold) attr(out, "object_name") <- obj_name out } #' @export p_significance.MCMCglmm <- function(x, threshold = "default", ...) { nF <- x$Fixed$nfl out <- p_significance(as.data.frame(x$Sol[, 1:nF, drop = FALSE]), threshold = threshold, ...) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) out } #' @export p_significance.BFBayesFactor <- function(x, threshold = "default", ...) { out <- p_significance(insight::get_parameters(x), threshold = threshold, ...) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) out } #' @export p_significance.mcmc <- function(x, threshold = "default", ...) { p_significance(as.data.frame(x), threshold = threshold, ...) } #' @export p_significance.bamlss <- function(x, threshold = "default", component = "all", ...) { out <- p_significance( insight::get_parameters(x, component = component), threshold = threshold, ... ) out <- .add_clean_parameters_attribute(out, x) out } #' @export p_significance.bcplm <- function(x, threshold = "default", ...) { p_significance(insight::get_parameters(x), threshold = threshold, ...) } #' @export p_significance.mcmc.list <- p_significance.bcplm #' @export p_significance.bayesQR <- p_significance.bcplm #' @export p_significance.blrm <- p_significance.bcplm #' @export p_significance.BGGM <- p_significance.bcplm #' @export p_significance.emmGrid <- function(x, threshold = "default", ...) { xdf <- insight::get_parameters(x) out <- p_significance(xdf, threshold = threshold, ...) out <- .append_datagrid(out, x) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) out } #' @export p_significance.emm_list <- p_significance.emmGrid #' @export p_significance.slopes <- function(x, threshold = "default", ...) { xrvar <- .get_marginaleffects_draws(x) out <- p_significance(xrvar, threshold = threshold, ...) out <- .append_datagrid(out, x) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) out } #' @export p_significance.comparisons <- p_significance.slopes #' @export p_significance.predictions <- p_significance.slopes #' @export p_significance.stanreg <- function(x, threshold = "default", effects = "fixed", component = "location", parameters = NULL, verbose = TRUE, ...) { params <- insight::get_parameters( x, effects = effects, component = component, parameters = parameters ) threshold <- .select_threshold_ps( model = x, threshold = threshold, params = params, verbose = verbose ) result <- p_significance(params, threshold = threshold) cleaned_parameters <- .get_cleaned_parameters(x, ...) out <- .prepare_output(result, cleaned_parameters, inherits(x, "stanmvreg")) attr(out, "clean_parameters") <- cleaned_parameters attr(out, "threshold") <- threshold attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) class(out) <- class(result) out } #' @export p_significance.stanfit <- p_significance.stanreg #' @export p_significance.blavaan <- p_significance.stanreg #' @rdname p_significance #' @export p_significance.brmsfit <- function(x, threshold = "default", effects = "fixed", component = "conditional", parameters = NULL, verbose = TRUE, ...) { params <- insight::get_parameters( x, effects = effects, component = component, parameters = parameters, ... ) threshold <- .select_threshold_ps( model = x, threshold = threshold, params = params, verbose = verbose ) result <- p_significance(params, threshold = threshold) cleaned_parameters <- .get_cleaned_parameters(x, ...) out <- .prepare_output(result, cleaned_parameters) attr(out, "clean_parameters") <- cleaned_parameters attr(out, "threshold") <- threshold attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) class(out) <- class(result) out } .p_significance <- function(x, threshold, ...) { if (length(threshold) == 1) { psig <- max( c( length(x[x > abs(threshold)]) / length(x), # ps positive length(x[x < -abs(threshold)]) / length(x) # ps negative ) ) } else { psig <- max( c( length(x[x > threshold[2]]) / length(x), # ps positive length(x[x < threshold[1]]) / length(x) # ps negative ) ) } psig } # methods --------------------------- #' @rdname as.numeric.p_direction #' @export as.numeric.p_significance <- function(x, ...) { if (inherits(x, "data.frame")) { as.numeric(as.vector(x$ps)) } else { as.vector(x) } } #' @method as.double p_significance #' @export as.double.p_significance <- as.numeric.p_significance # helpers -------------------------- #' @keywords internal .select_threshold_ps <- function(model = NULL, threshold = "default", params = NULL, verbose = TRUE) { if (is.list(threshold)) { # if we have named elements, complete list if (!is.null(params)) { named_threshold <- names(threshold) if (!is.null(named_threshold)) { # find out which name belongs to which parameter pos <- match(named_threshold, colnames(params)) # if not all element names were found, error if (anyNA(pos)) { insight::format_error(paste( "Not all elements of `threshold` were found in the parameters. Please check following names:", toString(named_threshold[is.na(pos)]) )) } # now "fill" non-specified elements with "default" out <- as.list(rep("default", ncol(params))) out[pos] <- threshold # overwrite former threshold threshold <- out } } lapply(threshold, function(i) { out <- .select_threshold_list(model = model, threshold = i, verbose = verbose) if (length(out) == 1) { out <- c(-1 * abs(out), abs(out)) } out }) } else { .select_threshold_list(model = model, threshold = threshold, verbose = verbose) } } #' @keywords internal .select_threshold_list <- function(model = NULL, threshold = "default", verbose = TRUE) { # If default if (all(threshold == "default")) { if (is.null(model)) { threshold <- 0.1 } else { threshold <- rope_range(model, verbose = verbose)[2] } } else if (!is.list(threshold) && (!all(is.numeric(threshold)) || length(threshold) > 2)) { insight::format_error( "`threshold` should be one of the following values:", "- \"default\", in which case the threshold is based on `rope_range()`", "- a single numeric value (e.g., 0.1), which is used as range around zero (i.e. the threshold range is set to -0.1 and 0.1)", # nolint "- a numeric vector of length two (e.g., `c(-0.2, 0.1)`)" ) } threshold } #' @keywords internal .check_list_range <- function(range, params, larger_two = FALSE) { # if we have named elements, complete list named_range <- names(range) if (!is.null(named_range)) { # find out which name belongs to which parameter pos <- match(named_range, colnames(params)) # if not all element names were found, error if (anyNA(pos)) { insight::format_error(paste( "Not all elements of `range` were found in the parameters. Please check following names:", toString(named_range[is.na(pos)]) )) } # now "fill" non-specified elements with "default" out <- as.list(rep("default", ncol(params))) out[pos] <- range # overwrite former range range <- out } if (length(range) != ncol(params)) { insight::format_error("Length of `range` (i.e. number of ROPE limits) should match the number of parameters.") } # check if list of values contains only valid values checks <- vapply(range, function(r) { if (larger_two) { !all(r == "default") || !all(is.numeric(r)) || length(r) > 2 } else { !all(r == "default") || !all(is.numeric(r)) || length(r) != 2 } }, logical(1)) if (!all(checks)) { insight::format_error("`range` should be 'default' or a vector of 2 numeric values (e.g., c(-0.1, 0.1)).") } range } bayestestR/R/print_html.R0000644000176200001440000001370015052646230015107 0ustar liggesusers#' @rdname display.describe_posterior #' @export print_html.describe_posterior <- function(x, digits = 2, caption = "Summary of Posterior Distribution", ...) { .print_html_default(x = x, digits = digits, caption = caption, ...) } #' @export print_html.point_estimate <- function(x, digits = 2, caption = "Point Estimate", ...) { .print_html_default(x = x, digits = digits, caption = caption, ...) } #' @export print_html.map_estimate <- function(x, digits = 2, caption = "MAP Estimate", ...) { .print_html_default(x = x, digits = digits, caption = caption, ...) } #' @export print_html.p_direction <- function(x, digits = 2, caption = "Probability of Direction (pd)", ...) { .print_html_default(x = x, digits = digits, caption = caption, ...) } #' @export print_html.p_map <- function(x, digits = 2, caption = "MAP-based p-value", ...) { .print_html_default(x = x, digits = digits, caption = caption, ...) } #' @export print_html.p_rope <- function(x, digits = 2, ...) { # check if we have multiple ROPE values if (insight::n_unique(x$ROPE_low) > 1) { caption <- "Proportion of samples inside the ROPE" } else { caption <- sprintf( "Proportion of samples inside the ROPE [%.*f, %.*f]", digits, x$ROPE_low[1], digits, x$ROPE_high[1] ) x$ROPE_low <- x$ROPE_high <- NULL } .print_html_default(x = x, digits = digits, caption = caption, ci_string = "ROPE", ...) } #' @export print_html.p_significance <- function(x, digits = 2, ...) { threshold <- attributes(x)$threshold if (is.list(threshold)) { caption <- "Practical Significance" out <- as.data.frame(do.call(rbind, threshold)) colnames(out) <- c("ROPE_low", "ROPE_high") x$ROPE_low <- out$ROPE_low x$ROPE_high <- out$ROPE_high ci_string <- "ROPE" } else { caption <- sprintf( "Practical Significance (threshold: %s)", insight::format_value(attributes(x)$threshold, digits = digits) ) ci_string <- NULL } .print_html_default(x = x, digits = digits, caption = caption, ci_string = ci_string, ...) } #' @export print_html.bayestestR_hdi <- function(x, digits = 2, caption = "Highest Density Interval", ...) { .print_html_default(x = x, digits = digits, caption = caption, ci_string = "HDI", ...) } #' @export print_html.bayestestR_eti <- function(x, digits = 2, caption = "Equal-Tailed Interval", ...) { .print_html_default(x = x, digits = digits, caption = caption, ci_string = "ETI", ...) } #' @export print_html.bayestestR_si <- function(x, digits = 2, caption = "Support Interval", ...) { .print_html_default(x = x, digits = digits, caption = caption, ci_string = "SI", ...) } # special handling for bayes factors ------------------ #' @export print_html.bayesfactor_models <- function(x, digits = 3, log = FALSE, show_names = TRUE, caption = "Bayes Factors for Model Comparison", ...) { .print_bf_html_default( x = x, digits = digits, log = log, show_names = show_names, caption = caption, align = "llr", ... ) } #' @export print_html.bayesfactor_inclusion <- function(x, digits = 3, log = FALSE, caption = "Inclusion Bayes Factors (Model Averaged)", ...) { .print_bf_html_default( x = x, digits = digits, log = log, caption = caption, align = "lrrr", ... ) } #' @export print_html.bayesfactor_restricted <- function(x, digits = 3, log = FALSE, caption = "Bayes Factor (Order-Restriction)", ...) { .print_bf_html_default(x = x, digits = digits, log = log, caption = caption, ...) } #' @export print_html.bayesfactor_parameters <- function(x, digits = 3, log = FALSE, ...) { # retrieve information with cleaned parameter names cp <- attr(x, "clean_parameters") # format data frame and columns formatted_table <- format( x, cp = cp, digits = digits, log = log, format = "html", ... ) insight::export_table(formatted_table, format = .check_format_backend(...), ...) } # util --------------- .print_html_default <- function(x, digits = 2, caption = NULL, subtitles = NULL, ci_string = "CI", ...) { # retrieve information with cleaned parameter names cp <- attr(x, "clean_parameters") # format data frame and columns formatted_table <- format( x, cp = cp, digits = digits, format = "html", ci_string = ci_string, caption = caption, subtitles = subtitles, ... ) # print for data frame - I don't think we need a special handling for # numeric values to have a markdown-table output insight::export_table( formatted_table, caption = caption, format = .check_format_backend(...), ... ) } .print_bf_html_default <- function(x, digits = 3, log = FALSE, show_names = NULL, caption = NULL, align = NULL, ...) { formatted_table <- format( x, digits = digits, log = log, show_names = show_names, caption = caption, format = "html", ... ) insight::export_table( formatted_table, align = align, caption = caption, format = .check_format_backend(...), ... ) } bayestestR/R/utils_check_collinearity.R0000644000176200001440000000520114542333405017777 0ustar liggesusers#' @keywords internal .check_multicollinearity <- function(model, method = "equivalence_test", threshold = 0.7, ...) { valid_parameters <- insight::find_parameters( model, parameters = "^(?!(r_|sd_|prior_|cor_|lp__|b\\[))", flatten = TRUE ) if (inherits(model, "stanfit")) { dat <- insight::get_parameters(model)[, valid_parameters, drop = FALSE] } else { dat <- as.data.frame(model, optional = FALSE)[, valid_parameters, drop = FALSE] } # need at least three columns, one is removed anyway... if (ncol(dat) > 2) { dat <- dat[, -1, drop = FALSE] if (ncol(dat) > 1) { parameter_correlation <- stats::cor(dat) parameter <- expand.grid(colnames(dat), colnames(dat), stringsAsFactors = FALSE) results <- cbind( parameter, corr = abs(as.vector(expand.grid(parameter_correlation)[[1]])), pvalue = apply(parameter, 1, function(r) stats::cor.test(dat[[r[1]]], dat[[r[2]]])$p.value) ) # Filter results <- results[results$pvalue < 0.05 & results$Var1 != results$Var2, ] if (nrow(results) > 0) { # Remove duplicates results$where <- paste0(results$Var1, " and ", results$Var2) results$where2 <- paste0(results$Var2, " and ", results$Var1) to_remove <- NULL for (i in seq_len(nrow(results))) { if (results$where2[i] %in% results$where[1:i]) { to_remove <- c(to_remove, i) } } results <- results[-to_remove, ] # Filter by first threshold threshold <- pmin(threshold, 0.9) results <- results[results$corr > threshold & results$corr <= 0.9, ] if (nrow(results) > 0) { where <- paste0("between ", toString(paste0(results$where, " (r = ", round(results$corr, 2), ")")), "") insight::format_alert(paste0( "Possible multicollinearity ", where, ". This might lead to inappropriate results. See 'Details' in '?", method, "'." )) } # Filter by second threshold results <- results[results$corr > 0.9, ] if (nrow(results) > 0) { where <- paste0("between ", toString(paste0(results$where, " (r = ", round(results$corr, 2), ")")), "") insight::format_alert(paste0( "Probable multicollinearity ", where, ". This might lead to inappropriate results. See 'Details' in '?", method, "'." )) } } } } } bayestestR/R/map_estimate.R0000644000176200001440000002140615052646230015401 0ustar liggesusers#' Maximum A Posteriori probability estimate (MAP) #' #' Find the **Highest Maximum A Posteriori probability estimate (MAP)** of a #' posterior, i.e., the value associated with the highest probability density #' (the "peak" of the posterior distribution). In other words, it is an estimation #' of the *mode* for continuous parameters. Note that this function relies on #' [`estimate_density()`], which by default uses a different smoothing bandwidth #' (`"SJ"`) compared to the legacy default implemented the base R [`density()`] #' function (`"nrd0"`). #' #' @inheritParams hdi #' @inheritParams estimate_density #' #' @inheritSection hdi Model components #' #' @return A numeric value if `x` is a vector. If `x` is a model-object, #' returns a data frame with following columns: #' #' - `Parameter`: The model parameter(s), if `x` is a model-object. If `x` is a #' vector, this column is missing. #' - `MAP_Estimate`: The MAP estimate for the posterior or each model parameter. #' #' @examplesIf require("rstanarm") && require("brms") #' \donttest{ #' library(bayestestR) #' #' posterior <- rnorm(10000) #' map_estimate(posterior) #' #' plot(density(posterior)) #' abline(v = as.numeric(map_estimate(posterior)), col = "red") #' #' model <- rstanarm::stan_glm(mpg ~ wt + cyl, data = mtcars) #' map_estimate(model) #' #' model <- brms::brm(mpg ~ wt + cyl, data = mtcars) #' map_estimate(model) #' } #' #' @export map_estimate <- function(x, ...) { UseMethod("map_estimate") } # numeric ----------------------- #' @rdname map_estimate #' @export map_estimate.numeric <- function(x, precision = 2^10, method = "kernel", verbose = TRUE, ...) { out <- map_estimate( data.frame(x = x), precision, method = method, verbose = verbose, ... ) attr(out, "data") <- x out } .map_estimate <- function(x, precision = 2^10, method = "kernel", verbose = TRUE, ...) { # sanity check - if we have only one unique value (a vector of constant values) # density estimation doesn't work if (insight::n_unique(x) == 1) { if (verbose) { insight::format_alert("Data is singular, MAP estimate equals the unique value of the data.") } out <- stats::na.omit(x)[1] attr(out, "MAP_density") <- 1 } else { d <- try(estimate_density(x, precision = precision, method = method, ...), silent = TRUE) if (inherits(d, "try-error")) { if (verbose) { msg <- "Could not calculate MAP estimate." if (grepl("too sparse", d, fixed = TRUE)) { msg <- paste(msg, "The provided data is probably too sparse to calculate the density.") } insight::format_alert(msg) } return(NA) } out <- d$x[which.max(d$y)] attr(out, "MAP_density") <- max(d$y) } out } # other models ----------------------- #' @export map_estimate.bayesQR <- function(x, precision = 2^10, method = "kernel", ...) { x <- insight::get_parameters(x) map_estimate(x, precision = precision, method = method, ...) } #' @export map_estimate.BGGM <- map_estimate.bayesQR #' @export map_estimate.mcmc <- map_estimate.bayesQR #' @export map_estimate.bamlss <- map_estimate.bayesQR #' @export map_estimate.bcplm <- map_estimate.bayesQR #' @export map_estimate.blrm <- map_estimate.bayesQR #' @export map_estimate.mcmc.list <- map_estimate.bayesQR # stan / posterior models ----------------------- #' @keywords internal .map_estimate_models <- function(x, precision, method, verbose = TRUE, ...) { l <- sapply( x, .map_estimate, precision = precision, method = method, verbose = verbose, simplify = FALSE, ... ) out <- data.frame( Parameter = colnames(x), MAP_Estimate = unlist(l, use.names = FALSE), stringsAsFactors = FALSE, row.names = NULL ) out <- .add_clean_parameters_attribute(out, x, ...) attr(out, "MAP_density") <- sapply(l, attr, "MAP_density") attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) attr(out, "centrality") <- "map" class(out) <- unique(c("map_estimate", "see_point_estimate", class(out))) out } #' @export map_estimate.stanreg <- function(x, precision = 2^10, method = "kernel", effects = "fixed", component = "location", parameters = NULL, verbose = TRUE, ...) { .map_estimate_models( x = insight::get_parameters( x, effects = effects, component = component, parameters = parameters ), precision = precision, method = method, verbose = verbose, ... ) } #' @export map_estimate.stanfit <- map_estimate.stanreg #' @export map_estimate.blavaan <- map_estimate.stanreg #' @rdname map_estimate #' @export map_estimate.brmsfit <- function(x, precision = 2^10, method = "kernel", effects = "fixed", component = "conditional", parameters = NULL, verbose = TRUE, ...) { .map_estimate_models( x = insight::get_parameters( x, effects = effects, component = component, parameters = parameters ), precision = precision, method = method, verbose = verbose, ... ) } #' @rdname map_estimate #' @inheritParams p_direction #' @export map_estimate.data.frame <- function(x, precision = 2^10, method = "kernel", rvar_col = NULL, verbose = TRUE, ...) { x_rvar <- .possibly_extract_rvar_col(x, rvar_col) if (length(x_rvar) > 0L) { cl <- match.call() cl[[1]] <- bayestestR::map_estimate cl$x <- x_rvar cl$rvar_col <- NULL out <- eval.parent(cl) obj_name <- insight::safe_deparse_symbol(substitute(x)) attr(out, "object_name") <- sprintf('%s[["%s"]]', obj_name, rvar_col) return(.append_datagrid(out, x)) } .map_estimate_models(x, precision = precision, method = method, verbose = verbose, ...) } #' @export map_estimate.draws <- function(x, precision = 2^10, method = "kernel", ...) { .map_estimate_models(.posterior_draws_to_df(x), precision = precision, method = method, ...) } #' @export map_estimate.rvar <- map_estimate.draws #' @export map_estimate.emmGrid <- function(x, precision = 2^10, method = "kernel", ...) { xdf <- insight::get_parameters(x) out <- .map_estimate_models(xdf, precision = precision, method = method, ...) .append_datagrid(out, x) } #' @export map_estimate.emm_list <- map_estimate.emmGrid #' @export map_estimate.slopes <- function(x, precision = 2^10, method = "kernel", ...) { xrvar <- .get_marginaleffects_draws(x) out <- map_estimate(xrvar, precision = precision, method = method, ...) .append_datagrid(out, x) } #' @export map_estimate.comparisons <- map_estimate.slopes #' @export map_estimate.predictions <- map_estimate.slopes #' @rdname map_estimate #' @export map_estimate.get_predicted <- function(x, precision = 2^10, method = "kernel", use_iterations = FALSE, verbose = TRUE, ...) { if (isTRUE(use_iterations)) { if ("iterations" %in% names(attributes(x))) { out <- map_estimate( as.data.frame(t(attributes(x)$iterations)), precision = precision, method = method, verbose = verbose, ... ) } else { insight::format_error("No iterations present in the output.") } attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) } else { out <- map_estimate( as.numeric(x), precision = precision, method = method, verbose = verbose, ... ) } out } # Methods ----------------------------------------------------------------- #' @rdname as.numeric.p_direction #' @method as.numeric map_estimate #' @export as.numeric.map_estimate <- function(x, ...) { if (inherits(x, "data.frame")) { me <- as.numeric(as.vector(x$MAP_Estimate)) names(me) <- x$Parameter me } else { as.vector(x) } } #' @method as.double map_estimate #' @export as.double.map_estimate <- as.numeric.map_estimate bayestestR/R/si.R0000644000176200001440000002550014765755711013362 0ustar liggesusers#' Compute Support Intervals #' #' A support interval contains only the values of the parameter that predict the observed data better #' than average, by some degree *k*; these are values of the parameter that are associated with an #' updating factor greater or equal than *k*. From the perspective of the Savage-Dickey Bayes factor, testing #' against a point null hypothesis for any value within the support interval will yield a Bayes factor smaller #' than *1/k*. #' #' **For more info, in particular on specifying correct priors for factors with more than 2 levels, #' see [the Bayes factors vignette](https://easystats.github.io/bayestestR/articles/bayes_factors.html).** #' #' @param BF The amount of support required to be included in the support interval. #' @inheritParams bayesfactor_parameters #' @inheritParams hdi #' @inherit hdi seealso #' @family ci #' #' @details This method is used to compute support intervals based on prior and posterior distributions. #' For the computation of support intervals, the model priors must be proper priors (at the very least #' they should be *not flat*, and it is preferable that they be *informative* - note #' that by default, `brms::brm()` uses flat priors for fixed-effects; see example below). #' #' @section Choosing a value of `BF`: #' The choice of `BF` (the level of support) depends on what we want our interval #' to represent: #' #' - A `BF` = 1 contains values whose credibility is not decreased by observing the data. #' - A `BF` > 1 contains values who received more impressive support from the data. #' - A `BF` < 1 contains values whose credibility has *not* been impressively #' decreased by observing the data. Testing against values outside this interval #' will produce a Bayes factor larger than 1/`BF` in support of the alternative. #' E.g., if an SI (BF = 1/3) excludes 0, the Bayes factor against the point-null #' will be larger than 3. #' #' @inheritSection bayesfactor_parameters Setting the correct `prior` #' #' @note There is also a [`plot()`-method](https://easystats.github.io/see/articles/bayestestR.html) implemented in the \href{https://easystats.github.io/see/}{\pkg{see}-package}. #' #' @return #' A data frame containing the lower and upper bounds of the SI. #' #' Note that if the level of requested support is higher than observed in the data, the #' interval will be `[NA,NA]`. #' #' @examplesIf require("logspline") && require("rstanarm") && require("brms") && require("emmeans") #' library(bayestestR) #' #' prior <- distribution_normal(1000, mean = 0, sd = 1) #' posterior <- distribution_normal(1000, mean = 0.5, sd = 0.3) #' #' si(posterior, prior, verbose = FALSE) #' \donttest{ #' # rstanarm models #' # --------------- #' library(rstanarm) #' contrasts(sleep$group) <- contr.equalprior_pairs # see vignette #' stan_model <- stan_lmer(extra ~ group + (1 | ID), data = sleep) #' si(stan_model, verbose = FALSE) #' si(stan_model, BF = 3, verbose = FALSE) #' #' # emmGrid objects #' # --------------- #' library(emmeans) #' group_diff <- pairs(emmeans(stan_model, ~group)) #' si(group_diff, prior = stan_model, verbose = FALSE) #' #' # brms models #' # ----------- #' library(brms) #' contrasts(sleep$group) <- contr.equalprior_pairs # see vingette #' my_custom_priors <- #' set_prior("student_t(3, 0, 1)", class = "b") + #' set_prior("student_t(3, 0, 1)", class = "sd", group = "ID") #' #' brms_model <- suppressWarnings(brm(extra ~ group + (1 | ID), #' data = sleep, #' prior = my_custom_priors, #' refresh = 0 #' )) #' si(brms_model, verbose = FALSE) #' } #' @references #' Wagenmakers, E., Gronau, Q. F., Dablander, F., & Etz, A. (2018, November 22). #' The Support Interval. \doi{10.31234/osf.io/zwnxb} #' #' @export si <- function(posterior, ...) { UseMethod("si") } #' @rdname si #' @export si.numeric <- function(posterior, prior = NULL, BF = 1, verbose = TRUE, ...) { if (is.null(prior)) { prior <- posterior if (verbose) { insight::format_warning( "Prior not specified!", "Support intervals ('si') can only be computed for Bayesian models with proper priors.", "Please specify priors (with column order matching 'posterior')." ) } } prior <- data.frame(X = prior) posterior <- data.frame(X = posterior) # Get SIs out <- si.data.frame( posterior = posterior, prior = prior, BF = BF, verbose = verbose, ... ) out$Parameter <- NULL out } #' @rdname si #' @export si.stanreg <- function(posterior, prior = NULL, BF = 1, verbose = TRUE, effects = "fixed", component = "location", parameters = NULL, ...) { cleaned_parameters <- insight::clean_parameters(posterior) samps <- .clean_priors_and_posteriors(posterior, prior, effects = effects, component = component, parameters = parameters, verbose = verbose ) # Get SIs temp <- si.data.frame( posterior = samps$posterior, prior = samps$prior, BF = BF, verbose = verbose, ... ) out <- .prepare_output(temp, cleaned_parameters, inherits(posterior, "stanmvreg")) attr(out, "ci_method") <- "SI" attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(posterior)) class(out) <- class(temp) attr(out, "plot_data") <- attr(temp, "plot_data") out } #' @export si.brmsfit <- si.stanreg #' @export si.blavaan <- si.stanreg #' @export si.emmGrid <- function(posterior, prior = NULL, BF = 1, verbose = TRUE, ...) { samps <- .clean_priors_and_posteriors(posterior, prior, verbose = verbose) # Get SIs out <- si.data.frame( posterior = samps$posterior, prior = samps$prior, BF = BF, verbose = verbose, ... ) out <- .append_datagrid(out, posterior, long = length(BF) > 1L) attr(out, "ci_method") <- "SI" attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(posterior)) out } #' @export si.emm_list <- si.emmGrid #' @export si.slopes <- si.emmGrid #' @export si.comparisons <- si.emmGrid #' @export si.predictions <- si.emmGrid #' @export si.stanfit <- function(posterior, prior = NULL, BF = 1, verbose = TRUE, effects = "fixed", ...) { out <- si(insight::get_parameters(posterior, effects = effects), prior = prior, BF = BF, verbose = verbose ) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(posterior)) out } #' @rdname si #' @export si.get_predicted <- function(posterior, prior = NULL, BF = 1, use_iterations = FALSE, verbose = TRUE, ...) { if (isTRUE(use_iterations)) { if ("iterations" %in% names(attributes(posterior))) { out <- si( as.data.frame(t(attributes(posterior)$iterations)), prior = prior, BF = BF, verbose = verbose, ... ) } else { insight::format_error("No iterations present in the output.") } attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(posterior)) } else { out <- si(insight::get_parameters(posterior), prior = prior, BF = BF, verbose = verbose, ...) } out } #' @rdname si #' @inheritParams p_direction #' @export si.data.frame <- function(posterior, prior = NULL, BF = 1, rvar_col = NULL, verbose = TRUE, ...) { x_rvar <- .possibly_extract_rvar_col(posterior, rvar_col) if (length(x_rvar) > 0L) { cl <- match.call() cl[[1]] <- bayestestR::si cl$posterior <- x_rvar cl$rvar_col <- NULL prior_rvar <- .possibly_extract_rvar_col(posterior, prior) if (length(prior_rvar) > 0L) { cl$prior <- prior_rvar } out <- eval.parent(cl) obj_name <- insight::safe_deparse_symbol(substitute(posterior)) attr(out, "object_name") <- sprintf('%s[["%s"]]', obj_name, rvar_col) return(.append_datagrid(out, posterior, long = length(BF) > 1L)) } if (is.null(prior)) { prior <- posterior insight::format_warning( "Prior not specified!", "Support intervals ('si') can only be computed for Bayesian models with proper priors.", "Please specify priors (with column order matching 'posterior')." ) } if (verbose && (nrow(posterior) < 4e4 || nrow(prior) < 4e4)) { insight::format_warning( "Support intervals might not be precise.", "For precise support intervals, sampling at least 40,000 posterior samples is recommended." ) } out <- lapply(BF, function(BFi) { .si.data.frame(posterior, prior, BFi, verbose = verbose) }) out <- do.call(rbind, out) attr(out, "ci_method") <- "SI" attr(out, "ci") <- BF attr(out, "plot_data") <- .make_BF_plot_data(posterior, prior, 0, 0, ...)$plot_data class(out) <- unique(c("bayestestR_si", "see_si", "bayestestR_ci", "see_ci", class(out))) out } #' @export si.draws <- function(posterior, prior = NULL, BF = 1, verbose = TRUE, ...) { si(.posterior_draws_to_df(posterior), prior = if (!is.null(prior)) .posterior_draws_to_df(prior), BF = BF, verbose = verbose, ... ) } #' @export si.rvar <- si.draws # Helper ------------------------------------------------------------------ .si.data.frame <- function(posterior, prior, BF, verbose = TRUE, ...) { sis <- matrix(NA, nrow = ncol(posterior), ncol = 2) for (par in seq_along(posterior)) { sis[par, ] <- .si(posterior[[par]], prior[[par]], BF = BF, verbose = verbose, ... ) } data.frame( Parameter = colnames(posterior), CI = BF, CI_low = sis[, 1], CI_high = sis[, 2], stringsAsFactors = FALSE ) } #' @keywords internal .si <- function(posterior, prior, BF = 1, extend_scale = 0.05, precision = 2^8, verbose = TRUE, ...) { insight::check_if_installed("logspline") if (isTRUE(all.equal(prior, posterior))) { return(c(NA, NA)) } x <- c(prior, posterior) x_range <- range(x) x_rangex <- stats::median(x) + 7 * stats::mad(x) * c(-1, 1) x_range <- c( max(c(x_range[1], x_rangex[1])), min(c(x_range[2], x_rangex[2])) ) extension_scale <- diff(x_range) * extend_scale x_range <- x_range + c(-1, 1) * extension_scale x_axis <- seq(x_range[1], x_range[2], length.out = precision) f_prior <- .logspline(prior, ...) f_posterior <- .logspline(posterior, ...) d_prior <- logspline::dlogspline(x_axis, f_prior, log = TRUE) d_posterior <- logspline::dlogspline(x_axis, f_posterior, log = TRUE) relative_d <- d_posterior - d_prior crit <- relative_d >= log(BF) cp <- rle(stats::na.omit(crit)) if (length(cp$lengths) > 3 && verbose) { insight::format_warning("More than 1 SI detected. Plot the result to investigate.") } x_supported <- stats::na.omit(x_axis[crit]) if (length(x_supported) < 2) { return(c(NA, NA)) } range(x_supported) } bayestestR/R/rope.R0000644000176200001440000006447615052646230013714 0ustar liggesusers#' Region of Practical Equivalence (ROPE) #' #' Compute the proportion of the HDI (default to the `89%` HDI) of a posterior #' distribution that lies within a region of practical equivalence. #' #' @param x Vector representing a posterior distribution. Can also be a #' `stanreg` or `brmsfit` model. #' @param range ROPE's lower and higher bounds. Should be `"default"` or #' depending on the number of outcome variables a vector or a list. For models #' with one response, `range` can be: #' #' - a vector of length two (e.g., `c(-0.1, 0.1)`), #' - a list of numeric vector of the same length as numbers of parameters (see #' 'Examples'). #' - a list of *named* numeric vectors, where names correspond to parameter #' names. In this case, all parameters that have no matching name in `range` #' will be set to `"default"`. #' #' In multivariate models, `range` should be a list with another list (one for #' each response variable) of numeric vectors . Vector names should correspond to #' the name of the response variables. If `"default"` and input is a vector, the #' range is set to `c(-0.1, 0.1)`. If `"default"` and input is a Bayesian model, #' [`rope_range()`] is used. See 'Examples'. #' @param ci The Credible Interval (CI) probability, corresponding to the #' proportion of HDI, to use for the percentage in ROPE. #' @param ci_method The type of interval to use to quantify the percentage in #' ROPE. Can be 'HDI' (default) or 'ETI'. See [`ci()`]. #' @param complement Should the probabilities above/below the ROPE (the #' _complementary_ probabilities) be returned as well? See #' [equivalence_test()] as well. #' #' @inheritParams hdi #' #' @inheritSection hdi Model components #' #' @section ROPE: #' #' Statistically, the probability of a posterior distribution of being different #' from 0 does not make much sense (the probability of a single value null #' hypothesis in a continuous distribution is 0). Therefore, the idea #' underlining ROPE is to let the user define an area around the null value #' enclosing values that are *equivalent to the null* value for practical #' purposes (_Kruschke 2010, 2011, 2014_). #' #' Kruschke (2018) suggests that such null value could be set, by default, to #' the -0.1 to 0.1 range of a standardized parameter (negligible effect size #' according to Cohen, 1988). This could be generalized: For instance, for #' linear models, the ROPE could be set as `0 +/- .1 * sd(y)`. This ROPE range #' can be automatically computed for models using the [`rope_range()`] function. #' #' Kruschke (2010, 2011, 2014) suggests using the proportion of the `95%` (or #' `89%`, considered more stable) [HDI][hdi] that falls within the ROPE as an #' index for "null-hypothesis" testing (as understood under the Bayesian #' framework, see [`equivalence_test()`]). #' #' @section Sensitivity to parameter's scale: #' #' It is important to consider the unit (i.e., the scale) of the predictors when #' using an index based on the ROPE, as the correct interpretation of the ROPE #' as representing a region of practical equivalence to zero is dependent on the #' scale of the predictors. Indeed, the percentage in ROPE depend on the unit of #' its parameter. In other words, as the ROPE represents a fixed portion of the #' response's scale, its proximity with a coefficient depends on the scale of #' the coefficient itself. #' #' @section Multicollinearity - Non-independent covariates: #' #' When parameters show strong correlations, i.e. when covariates are not #' independent, the joint parameter distributions may shift towards or away from #' the ROPE. Collinearity invalidates ROPE and hypothesis testing based on #' univariate marginals, as the probabilities are conditional on independence. #' Most problematic are parameters that only have partial overlap with the ROPE #' region. In case of collinearity, the (joint) distributions of these #' parameters may either get an increased or decreased ROPE, which means that #' inferences based on `rope()` are inappropriate (_Kruschke 2014, 340f_). #' #' `rope()` performs a simple check for pairwise correlations between #' parameters, but as there can be collinearity between more than two variables, #' a first step to check the assumptions of this hypothesis testing is to look #' at different pair plots. An even more sophisticated check is the projection #' predictive variable selection (_Piironen and Vehtari 2017_). #' #' @section Strengths and Limitations: #' **Strengths:** Provides information related to the practical relevance of #' the effects. #' #' **Limitations:** A ROPE range needs to be arbitrarily defined. Sensitive to #' the scale (the unit) of the predictors. Not sensitive to highly significant #' effects. #' #' @note There is also a [`plot()`-method](https://easystats.github.io/see/articles/bayestestR.html) implemented in the \href{https://easystats.github.io/see/}{\pkg{see}-package}. #' #' @references #' - Cohen, J. (1988). Statistical power analysis for the behavioural sciences. #' - Kruschke, J. K. (2010). What to believe: Bayesian methods for data analysis. #' Trends in cognitive sciences, 14(7), 293-300. \doi{10.1016/j.tics.2010.05.001}. #' - Kruschke, J. K. (2011). Bayesian assessment of null values via parameter #' estimation and model comparison. Perspectives on Psychological Science, #' 6(3), 299-312. \doi{10.1177/1745691611406925}. #' - Kruschke, J. K. (2014). Doing Bayesian data analysis: A tutorial with R, #' JAGS, and Stan. Academic Press. \doi{10.1177/2515245918771304}. #' - Kruschke, J. K. (2018). Rejecting or accepting parameter values in Bayesian #' estimation. Advances in Methods and Practices in Psychological Science, #' 1(2), 270-280. \doi{10.1177/2515245918771304}. #' - Makowski D, Ben-Shachar MS, Chen SHA, Lüdecke D (2019) Indices of Effect #' Existence and Significance in the Bayesian Framework. Frontiers in #' Psychology 2019;10:2767. \doi{10.3389/fpsyg.2019.02767} #' - Piironen, J., & Vehtari, A. (2017). Comparison of Bayesian predictive #' methods for model selection. Statistics and Computing, 27(3), 711–735. #' \doi{10.1007/s11222-016-9649-y} #' #' @examplesIf all(insight::check_if_installed(c("rstanarm", "emmeans", "brms", "BayesFactor"), quietly = TRUE)) #' library(bayestestR) #' #' rope(x = rnorm(1000, 0, 0.01), range = c(-0.1, 0.1)) #' rope(x = rnorm(1000, 0, 1), range = c(-0.1, 0.1)) #' rope(x = rnorm(1000, 1, 0.01), range = c(-0.1, 0.1)) #' rope(x = rnorm(1000, 1, 1), ci = c(0.90, 0.95)) #' \donttest{ #' model <- suppressWarnings( #' rstanarm::stan_glm(mpg ~ wt + gear, data = mtcars, chains = 2, iter = 200, refresh = 0) #' ) #' rope(model) #' rope(model, ci = c(0.90, 0.95)) #' #' # multiple ROPE ranges #' rope(model, range = list(c(-10, 5), c(-0.2, 0.2), "default")) #' #' # named ROPE ranges #' rope(model, range = list(gear = c(-3, 2), wt = c(-0.2, 0.2))) #' #' rope(emmeans::emtrends(model, ~1, "wt"), ci = c(0.90, 0.95)) #' #' model <- brms::brm(mpg ~ wt + cyl, data = mtcars, refresh = 0) #' rope(model) #' rope(model, ci = c(0.90, 0.95)) #' #' model <- brms::brm( #' brms::bf(brms::mvbind(mpg, disp) ~ wt + cyl) + brms::set_rescor(rescor = TRUE), #' data = mtcars, #' refresh = 0 #' ) #' rope(model) #' rope(model, ci = c(0.90, 0.95)) #' #' # different ROPE ranges for model parameters. For each response, a named #' # list (with the name of the response variable) is required as list-element #' # for the `range` argument. #' rope( #' model, #' range = list( #' mpg = list(b_mpg_wt = c(-1, 1), b_mpg_cyl = c(-2, 2)), #' disp = list(b_disp_wt = c(-5, 5), b_disp_cyl = c(-4, 4)) #' ) #' ) #' #' bf <- BayesFactor::ttestBF(x = rnorm(100, 1, 1)) #' rope(bf) #' rope(bf, ci = c(0.90, 0.95)) #' } #' @export rope <- function(x, ...) { UseMethod("rope") } #' @method as.double rope #' @export as.double.rope <- function(x, ...) { x$ROPE_Percentage } #' @export rope.default <- function(x, ...) { NULL } #' @rdname rope #' @export rope.numeric <- function(x, range = "default", ci = 0.95, ci_method = "ETI", complement = FALSE, verbose = TRUE, ...) { if (all(range == "default")) { range <- c(-0.1, 0.1) } else if (!all(is.numeric(range)) || length(range) != 2) { insight::format_error("`range` should be 'default' or a vector of 2 numeric values (e.g., c(-0.1, 0.1)).") } rope_values <- lapply(ci, function(i) { .rope( x, range = range, ci = i, ci_method = ci_method, complement = complement, verbose = verbose ) }) # "do.call(rbind)" does not bind attribute values together # so we need to capture the information about HDI separately out <- do.call(rbind, rope_values) if (nrow(out) > 1) { iv <- intersect( colnames(out), c("ROPE_Percentage", "Superiority_Percentage", "Inferiority_Percentage") ) out[iv] <- lapply(out[iv], as.numeric) } # Attributes hdi_area <- cbind( CI = ci, data.frame(do.call(rbind, lapply(rope_values, attr, "HDI_area"))) ) names(hdi_area) <- c("CI", "CI_low", "CI_high") attr(out, "HDI_area") <- hdi_area attr(out, "data") <- x class(out) <- unique(c("rope", "see_rope", class(out))) out } #' @export rope.get_predicted <- function(x, range = "default", ci = 0.95, ci_method = "ETI", complement = FALSE, use_iterations = FALSE, verbose = TRUE, ...) { if (isTRUE(use_iterations)) { if ("iterations" %in% names(attributes(x))) { out <- rope( as.data.frame(t(attributes(x)$iterations)), range = range, ci = ci, ci_method = ci_method, complement = complement, verbose = verbose, ... ) } else { insight::format_error("No iterations present in the output.") } attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) } else { out <- rope( as.numeric(x), range = range, ci = ci, ci_method = ci_method, complement = complement, verbose = verbose, ... ) } out } #' @export #' @rdname rope #' @inheritParams p_direction rope.data.frame <- function(x, range = "default", ci = 0.95, ci_method = "ETI", complement = FALSE, rvar_col = NULL, verbose = TRUE, ...) { obj_name <- insight::safe_deparse_symbol(substitute(x)) x_rvar <- .possibly_extract_rvar_col(x, rvar_col) if (length(x_rvar) > 0L) { cl <- match.call() cl[[1]] <- bayestestR::rope cl$x <- x_rvar cl$rvar_col <- NULL out <- eval.parent(cl) attr(out, "object_name") <- sprintf('%s[["%s"]]', obj_name, rvar_col) return(.append_datagrid(out, x)) } out <- .prepare_rope_df(x, range, ci, ci_method, complement, verbose) HDI_area_attributes <- insight::compact_list(out$HDI_area) dat <- data.frame( Parameter = rep(names(HDI_area_attributes), each = length(ci)), out$tmp, stringsAsFactors = FALSE ) row.names(dat) <- NULL attr(dat, "HDI_area") <- HDI_area_attributes attr(dat, "object_name") <- obj_name class(dat) <- c("rope", "see_rope", "data.frame") dat } #' @export rope.draws <- function(x, range = "default", ci = 0.95, ci_method = "ETI", complement = FALSE, verbose = TRUE, ...) { rope( .posterior_draws_to_df(x), range = range, ci = ci, ci_method = ci_method, complement = complement, verbose = verbose, ... ) } #' @export rope.rvar <- rope.draws #' @export rope.emmGrid <- function(x, range = "default", ci = 0.95, ci_method = "ETI", complement = FALSE, verbose = TRUE, ...) { xdf <- insight::get_parameters(x) dat <- rope( xdf, range = range, ci = ci, ci_method = ci_method, complement = complement, verbose = verbose, ... ) dat <- .append_datagrid(dat, x) attr(dat, "object_name") <- insight::safe_deparse_symbol(substitute(x)) dat } #' @export rope.emm_list <- rope.emmGrid #' @export rope.slopes <- function(x, range = "default", ci = 0.95, ci_method = "ETI", complement = FALSE, verbose = TRUE, ...) { xrvar <- .get_marginaleffects_draws(x) dat <- rope( xrvar, range = range, ci = ci, ci_method = ci_method, complement = complement, verbose = verbose, ... ) dat <- .append_datagrid(dat, x) attr(dat, "object_name") <- insight::safe_deparse_symbol(substitute(x)) dat } #' @export rope.comparisons <- rope.slopes #' @export rope.predictions <- rope.slopes #' @export rope.BFBayesFactor <- function(x, range = "default", ci = 0.95, ci_method = "ETI", complement = FALSE, verbose = TRUE, ...) { if (all(range == "default")) { range <- rope_range(x, verbose = verbose) } out <- rope( insight::get_parameters(x), range = range, ci = ci, ci_method = ci_method, complement = complement, verbose = verbose, ... ) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) out } #' @export rope.bamlss <- rope.BFBayesFactor #' @export rope.MCMCglmm <- function(x, range = "default", ci = 0.95, ci_method = "ETI", complement = FALSE, verbose = TRUE, ...) { nF <- x$Fixed$nfl out <- rope( as.data.frame(x$Sol[, 1:nF, drop = FALSE]), range = range, ci = ci, ci_method = ci_method, complement = complement, verbose = verbose, ... ) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) out } #' @export rope.mcmc <- function(x, range = "default", ci = 0.95, ci_method = "ETI", complement = FALSE, verbose = TRUE, ...) { out <- rope( as.data.frame(x), range = range, ci = ci, ci_method = ci_method, complement = complement, verbose = verbose, ... ) attr(out, "object_name") <- NULL attr(out, "data") <- insight::safe_deparse_symbol(substitute(x)) out } #' @export rope.bcplm <- function(x, range = "default", ci = 0.95, ci_method = "ETI", complement = FALSE, verbose = TRUE, ...) { out <- rope( insight::get_parameters(x), range = range, ci = ci, ci_method = ci_method, complement = complement, verbose = verbose, ... ) attr(out, "object_name") <- NULL attr(out, "data") <- insight::safe_deparse_symbol(substitute(x)) out } #' @export rope.bayesQR <- rope.bcplm #' @export rope.blrm <- rope.bcplm #' @export rope.BGGM <- rope.bcplm #' @export rope.mcmc.list <- rope.bcplm #' @rdname rope #' @export rope.stanreg <- function(x, range = "default", ci = 0.95, ci_method = "ETI", complement = FALSE, effects = "fixed", component = "location", parameters = NULL, verbose = TRUE, ...) { if (all(range == "default")) { range <- rope_range(x, verbose = verbose) } else if (!is.list(range) && (!all(is.numeric(range)) || length(range) != 2)) { insight::format_error("`range` should be 'default' or a vector of 2 numeric values (e.g., c(-0.1, 0.1)).") } # check for possible collinearity that might bias ROPE if (verbose && !inherits(x, "blavaan")) .check_multicollinearity(x, "rope") rope_data <- rope( insight::get_parameters( x, effects = effects, component = component, parameters = parameters ), range = range, ci = ci, ci_method = ci_method, complement = complement, verbose = verbose, ... ) out <- .prepare_output( rope_data, .get_cleaned_parameters(x, ...), inherits(x, "stanmvreg") ) attr(out, "HDI_area") <- attr(rope_data, "HDI_area") attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) class(out) <- class(rope_data) out } #' @export rope.stanfit <- rope.stanreg #' @export rope.blavaan <- rope.stanreg #' @rdname rope #' @export rope.brmsfit <- function(x, range = "default", ci = 0.95, ci_method = "ETI", complement = FALSE, effects = "fixed", component = "conditional", parameters = NULL, verbose = TRUE, ...) { # check range argument if (all(range == "default")) { range <- rope_range(x, verbose = verbose) # we expect a list with named vectors (length two) in the multivariate case. # Names state the response variable. } else if (insight::is_multivariate(x)) { if ( !is.list(range) || length(range) < length(insight::find_response(x)) || !all(names(range) %in% insight::find_response(x)) ) { insight::format_error( "With a multivariate model, `range` should be 'default' or a list with multiple lists (one for each response) of named numeric vectors with length 2." ) } } else if (!is.list(range) && (!all(is.numeric(range)) || length(range) != 2)) { insight::format_error( "`range` should be 'default' or a vector of 2 numeric values (e.g., c(-0.1, 0.1))." ) } # check for possible collinearity that might bias ROPE and print a warning if (verbose) .check_multicollinearity(x, "rope") # calc rope if (insight::is_multivariate(x)) { dv <- insight::find_response(x) # ROPE range / width differs between response varialbe. Thus ROPE is # calculated for every variable on its own. rope_data <- lapply( dv, function(dv_item) { ret <- rope( insight::get_parameters( x, effects = effects, component = component, parameters = parameters, ... ), range = range[[dv_item]], ci = ci, ci_method = ci_method, complement = complement, verbose = verbose, ... ) # It's a waste of performance to calculate ROPE for all parameters # with the ROPE width of a specific response variable and to throw # away the unwanted results. However, performance impact should not be # too high and this way it is much easier to handle the `parameters` # argument. ret[grepl(paste0("(.*)", dv_item), ret$Parameter), ] } ) rope_data <- do.call(rbind, rope_data) out <- .prepare_output( rope_data, .get_cleaned_parameters(x, ...), is_brms_mv = TRUE ) } else { rope_data <- rope( insight::get_parameters(x, effects = effects, component = component, parameters = parameters), range = range, ci = ci, ci_method = ci_method, complement = complement, verbose = verbose, ... ) out <- .prepare_output(rope_data, .get_cleaned_parameters(x, ...)) } attr(out, "HDI_area") <- attr(rope_data, "HDI_area") attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) class(out) <- class(rope_data) out } #' @export rope.sim.merMod <- function(x, range = "default", ci = 0.95, ci_method = "ETI", complement = FALSE, effects = "fixed", parameters = NULL, verbose = TRUE, ...) { if (all(range == "default")) { range <- rope_range(x, verbose = verbose) } else if (!is.list(range) && (!all(is.numeric(range)) || length(range) != 2)) { insight::format_error("`range` should be 'default' or a vector of 2 numeric values (e.g., c(-0.1, 0.1)).") } rope_list <- lapply(c("fixed", "random"), function(.x) { parms <- insight::get_parameters(x, effects = .x, parameters = parameters) getropedata <- .prepare_rope_df(parms, range, ci, ci_method, complement, verbose) tmp <- getropedata$tmp HDI_area <- getropedata$HDI_area if (insight::is_empty_object(tmp)) { tmp <- NULL } else { tmp <- .clean_up_tmp_stanreg( tmp, group = .x, cols = c("CI", "ROPE_low", "ROPE_high", "ROPE_Percentage", "Group"), parms = names(parms) ) if (!insight::is_empty_object(HDI_area)) { attr(tmp, "HDI_area") <- HDI_area } } tmp }) dat <- do.call( rbind, args = c(insight::compact_list(rope_list), make.row.names = FALSE) ) dat <- switch(effects, fixed = .select_rows(dat, "Group", "fixed"), random = .select_rows(dat, "Group", "random"), dat ) if (all(dat$Group == dat$Group[1])) { dat <- datawizard::data_remove(dat, "Group", verbose = FALSE) } HDI_area_attributes <- lapply(insight::compact_list(rope_list), attr, "HDI_area") if (effects != "all") { HDI_area_attributes <- HDI_area_attributes[[1]] } else { names(HDI_area_attributes) <- c("fixed", "random") } attr(dat, "HDI_area") <- HDI_area_attributes attr(dat, "object_name") <- insight::safe_deparse_symbol(substitute(x)) dat } #' @export rope.sim <- function(x, range = "default", ci = 0.95, ci_method = "ETI", complement = FALSE, parameters = NULL, verbose = TRUE, ...) { if (all(range == "default")) { range <- rope_range(x, verbose = verbose) } else if (!is.list(range) && (!all(is.numeric(range)) || length(range) != 2)) { insight::format_error("`range` should be 'default' or a vector of 2 numeric values (e.g., c(-0.1, 0.1)).") } parms <- insight::get_parameters(x, parameters = parameters) getropedata <- .prepare_rope_df(parms, range, ci, ci_method, complement, verbose) dat <- getropedata$tmp HDI_area <- getropedata$HDI_area if (insight::is_empty_object(dat)) { dat <- NULL } else { dat <- .clean_up_tmp_stanreg( dat, group = "fixed", cols = c("CI", "ROPE_low", "ROPE_high", "ROPE_Percentage"), parms = names(parms) ) if (!insight::is_empty_object(HDI_area)) { attr(dat, "HDI_area") <- HDI_area } } attr(dat, "object_name") <- insight::safe_deparse_symbol(substitute(x)) dat } # helper ------------------------------------------------------------------- #' @keywords internal .rope <- function(x, range = c(-0.1, 0.1), ci = 0.95, ci_method = "ETI", complement = FALSE, verbose = TRUE) { ci_bounds <- ci(x, ci = ci, method = ci_method, verbose = verbose) if (anyNA(ci_bounds)) { inferiority_percentage <- superiority_percentage <- rope_percentage <- NA } else { HDI_area <- x[x >= ci_bounds$CI_low & x <= ci_bounds$CI_high] rope_percentage <- mean(HDI_area >= min(range) & HDI_area <= max(range)) superiority_percentage <- mean(HDI_area > max(range)) inferiority_percentage <- mean(HDI_area < min(range)) } rope <- data.frame( CI = ci, ROPE_low = range[1], ROPE_high = range[2], ROPE_Percentage = rope_percentage ) if (isTRUE(complement)) { rope[["Superiority_Percentage"]] <- superiority_percentage rope[["Inferiority_Percentage"]] <- inferiority_percentage } attr(rope, "HDI_area") <- c(ci_bounds$CI_low, ci_bounds$CI_high) attr(rope, "CI_bounds") <- c(ci_bounds$CI_low, ci_bounds$CI_high) class(rope) <- unique(c("rope", "see_rope", class(rope))) rope } #' @keywords internal .prepare_rope_df <- function(parms, range, ci, ci_method, complement, verbose) { if (is.list(range)) { # check if list of values contains only valid values range <- .check_list_range(range, parms) # apply thresholds to each column tmp <- mapply( function(p, r) { rope( p, range = r, ci = ci, ci_method = ci_method, complement = complement, verbose = verbose ) }, parms, range, SIMPLIFY = FALSE ) } else { tmp <- sapply( parms, rope, range = range, ci = ci, ci_method = ci_method, complement = complement, verbose = verbose, simplify = FALSE ) } HDI_area <- lapply(tmp, attr, which = "HDI_area") # HDI_area <- lapply(HDI_area, function(.x) { # dat <- cbind(CI = ci, data.frame(do.call(rbind, .x))) # colnames(dat) <- c("CI", "HDI_low", "HDI_high") # dat # }) list( tmp = do.call(rbind, tmp), HDI_area = HDI_area ) } bayestestR/R/bayesfactor_restricted.R0000644000176200001440000002307314765755711017504 0ustar liggesusers#' Bayes Factors (BF) for Order Restricted Models #' #' This method computes Bayes factors for comparing a model with an order restrictions on its parameters #' with the fully unrestricted model. *Note that this method should only be used for confirmatory analyses*. #' \cr \cr #' The `bf_*` function is an alias of the main function. #' \cr \cr #' \strong{For more info, in particular on specifying correct priors for factors with more than 2 levels, #' see [the Bayes factors vignette](https://easystats.github.io/bayestestR/articles/bayes_factors.html).} #' #' @param posterior A `stanreg` / `brmsfit` object, `emmGrid` or a data frame - representing #' a posterior distribution(s) from (see Details). #' @param hypothesis A character vector specifying the restrictions as logical conditions (see examples below). #' @param prior An object representing a prior distribution (see Details). #' @inheritParams hdi #' #' @details This method is used to compute Bayes factors for order-restricted models vs un-restricted #' models by setting an order restriction on the prior and posterior distributions #' (\cite{Morey & Wagenmakers, 2013}). #' \cr\cr #' (Though it is possible to use `bayesfactor_restricted()` to test interval restrictions, #' it is more suitable for testing order restrictions; see examples). #' #' @inheritSection bayesfactor_parameters Setting the correct `prior` #' #' @inheritSection bayesfactor_parameters Interpreting Bayes Factors #' #' @return A data frame containing the (log) Bayes factor representing evidence #' *against* the un-restricted model (Use `as.numeric()` to extract the #' non-log Bayes factors; see examples). (A `bool_results` attribute contains #' the results for each sample, indicating if they are included or not in the #' hypothesized restriction.) #' #' @examples #' set.seed(444) #' library(bayestestR) #' prior <- data.frame( #' A = rnorm(500), #' B = rnorm(500), #' C = rnorm(500) #' ) #' #' posterior <- data.frame( #' A = rnorm(500, .4, 0.7), #' B = rnorm(500, -.2, 0.4), #' C = rnorm(500, 0, 0.5) #' ) #' #' hyps <- c( #' "A > B & B > C", #' "A > B & A > C", #' "C > A" #' ) #' #' #' (b <- bayesfactor_restricted(posterior, hypothesis = hyps, prior = prior)) #' #' bool <- as.logical(b, which = "posterior") #' head(bool) #' #' @examplesIf require("see") && require("patchwork") #' #' see::plots( #' plot(estimate_density(posterior)), #' # distribution **conditional** on the restrictions #' plot(estimate_density(posterior[bool[, hyps[1]], ])) + ggplot2::ggtitle(hyps[1]), #' plot(estimate_density(posterior[bool[, hyps[2]], ])) + ggplot2::ggtitle(hyps[2]), #' plot(estimate_density(posterior[bool[, hyps[3]], ])) + ggplot2::ggtitle(hyps[3]), #' guides = "collect" #' ) #' #' @examplesIf require("rstanarm") #' \donttest{ #' # rstanarm models #' # --------------- #' data("mtcars") #' #' fit_stan <- rstanarm::stan_glm(mpg ~ wt + cyl + am, #' data = mtcars, refresh = 0 #' ) #' hyps <- c( #' "am > 0 & cyl < 0", #' "cyl < 0", #' "wt - cyl > 0" #' ) #' #' bayesfactor_restricted(fit_stan, hypothesis = hyps) #' } #' #' @examplesIf require("rstanarm") && require("emmeans") #' \donttest{ #' # emmGrid objects #' # --------------- #' # replicating http://bayesfactor.blogspot.com/2015/01/multiple-comparisons-with-bayesfactor-2.html #' data("disgust") #' contrasts(disgust$condition) <- contr.equalprior_pairs # see vignette #' fit_model <- rstanarm::stan_glm(score ~ condition, data = disgust, family = gaussian()) #' #' em_condition <- emmeans::emmeans(fit_model, ~condition, data = disgust) #' hyps <- c("lemon < control & control < sulfur") #' #' bayesfactor_restricted(em_condition, prior = fit_model, hypothesis = hyps) #' # > # Bayes Factor (Order-Restriction) #' # > #' # > Hypothesis P(Prior) P(Posterior) BF #' # > lemon < control & control < sulfur 0.17 0.75 4.49 #' # > --- #' # > Bayes factors for the restricted model vs. the un-restricted model. #' } #' #' @references #' - Morey, R. D., & Wagenmakers, E. J. (2014). Simple relation between Bayesian order-restricted and #' point-null hypothesis tests. Statistics & Probability Letters, 92, 121-124. #' - Morey, R. D., & Rouder, J. N. (2011). Bayes factor approaches for testing interval null hypotheses. #' Psychological methods, 16(4), 406. #' - Morey, R. D. (Jan, 2015). Multiple Comparisons with BayesFactor, Part 2 – order restrictions. #' Retrieved from https://richarddmorey.org/category/order-restrictions/. #' #' @export bayesfactor_restricted <- function(posterior, ...) { UseMethod("bayesfactor_restricted") } #' @rdname bayesfactor_restricted #' @export bf_restricted <- bayesfactor_restricted #' @rdname bayesfactor_restricted #' @export bayesfactor_restricted.stanreg <- function(posterior, hypothesis, prior = NULL, verbose = TRUE, effects = "fixed", component = "conditional", ...) { samps <- .clean_priors_and_posteriors(posterior, prior, effects = effects, component = component, verbose = verbose ) # Get savage-dickey BFs bayesfactor_restricted.data.frame( posterior = samps$posterior, prior = samps$prior, hypothesis = hypothesis ) } #' @rdname bayesfactor_restricted #' @export bayesfactor_restricted.brmsfit <- bayesfactor_restricted.stanreg #' @rdname bayesfactor_restricted #' @export bayesfactor_restricted.blavaan <- function(posterior, hypothesis, prior = NULL, verbose = TRUE, ...) { samps <- .clean_priors_and_posteriors(posterior, prior, verbose = verbose ) # Get savage-dickey BFs bayesfactor_restricted.data.frame( posterior = samps$posterior, prior = samps$prior, hypothesis = hypothesis ) } #' @rdname bayesfactor_restricted #' @export bayesfactor_restricted.emmGrid <- function(posterior, hypothesis, prior = NULL, verbose = TRUE, ...) { samps <- .clean_priors_and_posteriors(posterior, prior, verbose = verbose ) bayesfactor_restricted.data.frame( posterior = samps$posterior, prior = samps$prior, hypothesis = hypothesis ) } #' @export bayesfactor_restricted.emm_list <- bayesfactor_restricted.emmGrid #' @export bayesfactor_restricted.slopes <- bayesfactor_restricted.emmGrid #' @export bayesfactor_restricted.predictions <- bayesfactor_restricted.emmGrid #' @export bayesfactor_restricted.comparisons <- bayesfactor_restricted.emmGrid #' @export #' @rdname bayesfactor_restricted #' @inheritParams p_direction bayesfactor_restricted.data.frame <- function(posterior, hypothesis, prior = NULL, rvar_col = NULL, ...) { x_rvar <- .possibly_extract_rvar_col(posterior, rvar_col) if (length(x_rvar) > 0L) { cl <- match.call() cl[[1]] <- bayestestR::bayesfactor_restricted cl$posterior <- x_rvar cl$rvar_col <- NULL prior_rvar <- .possibly_extract_rvar_col(posterior, prior) if (length(prior_rvar) > 0L) { cl$prior <- prior_rvar } return(eval.parent(cl)) } p_hypothesis <- parse(text = hypothesis) if (is.null(prior)) { prior <- posterior insight::format_warning( "Prior not specified! ", "Please specify priors (with column names matching 'posterior')", " to get meaningful results." ) } .test_hypothesis <- function(x, data) { x_logical <- try(eval(x, envir = data), silent = TRUE) if (inherits(x_logical, "try-error")) { cnames <- colnames(data) is_name <- make.names(cnames) == cnames cnames[!is_name] <- paste0("`", cnames[!is_name], "`") insight::format_error( x_logical, paste("Available parameters are:", toString(cnames)) ) } else if (!all(is.logical(x_logical))) { insight::format_error("Hypotheses must be logical.") } x_logical } posterior_l <- as.data.frame(lapply(p_hypothesis, .test_hypothesis, data = posterior)) prior_l <- as.data.frame(lapply(p_hypothesis, .test_hypothesis, data = prior)) colnames(posterior_l) <- colnames(prior_l) <- if (is.null(names(hypothesis))) hypothesis else names(hypothesis) posterior_p <- sapply(posterior_l, mean) prior_p <- sapply(prior_l, mean) log_BF <- log(posterior_p) - log(prior_p) res <- data.frame( Hypothesis = hypothesis, p_prior = prior_p, p_posterior = posterior_p, log_BF = log_BF ) attr(res, "bool_results") <- list(posterior = posterior_l, prior = prior_l) class(res) <- unique(c( "bayesfactor_restricted", class(res) )) res } #' @export bayesfactor_restricted.draws <- function(posterior, hypothesis, prior = NULL, ...) { bayesfactor_restricted(.posterior_draws_to_df(posterior), hypothesis = hypothesis, prior = if (!is.null(prior)) .posterior_draws_to_df(prior), ... ) } #' @export bayesfactor_restricted.rvar <- bayesfactor_restricted.draws # Methods ----------------------------------------------------------------- #' @export #' @rdname bayesfactor_restricted #' @param x An object of class `bayesfactor_restricted` #' @param which Should the logical matrix be of the posterior or prior distribution(s)? as.logical.bayesfactor_restricted <- function(x, which = c("posterior", "prior"), ...) { which <- match.arg(which) as.matrix(attr(x, "bool_results")[[which]]) } bayestestR/R/mediation.R0000644000176200001440000003236414765755711014726 0ustar liggesusers#' @title Summary of Bayesian multivariate-response mediation-models #' @name mediation #' #' @description `mediation()` is a short summary for multivariate-response #' mediation-models, i.e. this function computes average direct and average #' causal mediation effects of multivariate response models. #' #' @param model A `brmsfit` or `stanmvreg` object. #' @param treatment Character, name of the treatment variable (or direct effect) #' in a (multivariate response) mediator-model. If missing, `mediation()` #' tries to find the treatment variable automatically, however, this may fail. #' @param mediator Character, name of the mediator variable in a (multivariate #' response) mediator-model. If missing, `mediation()` tries to find the #' treatment variable automatically, however, this may fail. #' @param response A named character vector, indicating the names of the response #' variables to be used for the mediation analysis. Usually can be `NULL`, #' in which case these variables are retrieved automatically. If not `NULL`, #' names should match the names of the model formulas, #' `names(insight::find_response(model, combine = TRUE))`. This can be #' useful if, for instance, the mediator variable used as predictor has a different #' name from the mediator variable used as response. This might occur when the #' mediator is transformed in one model, but used "as is" as response variable #' in the other model. Example: The mediator `m` is used as response variable, #' but the centered version `m_center` is used as mediator variable. The #' second response variable (for the treatment model, with the mediator as #' additional predictor), `y`, is not transformed. Then we could use #' `response` like this: `mediation(model, response = c(m = "m_center", y = "y"))`. #' @param ... Not used. #' @inheritParams ci #' @inheritParams describe_posterior #' #' @return A data frame with direct, indirect, mediator and #' total effect of a multivariate-response mediation-model, as well as the #' proportion mediated. The effect sizes are median values of the posterior #' samples (use `centrality` for other centrality indices). #' #' @details `mediation()` returns a data frame with information on the #' *direct effect* (mean value of posterior samples from `treatment` #' of the outcome model), *mediator effect* (mean value of posterior #' samples from `mediator` of the outcome model), *indirect effect* #' (mean value of the multiplication of the posterior samples from #' `mediator` of the outcome model and the posterior samples from #' `treatment` of the mediation model) and the total effect (mean #' value of sums of posterior samples used for the direct and indirect #' effect). The *proportion mediated* is the indirect effect divided #' by the total effect. #' #' For all values, the `89%` credible intervals are calculated by default. #' Use `ci` to calculate a different interval. #' #' The arguments `treatment` and `mediator` do not necessarily #' need to be specified. If missing, `mediation()` tries to find the #' treatment and mediator variable automatically. If this does not work, #' specify these variables. #' #' The direct effect is also called *average direct effect* (ADE), #' the indirect effect is also called *average causal mediation effects* #' (ACME). See also _Tingley et al. 2014_ and _Imai et al. 2010_. #' #' @note There is an `as.data.frame()` method that returns the posterior #' samples of the effects, which can be used for further processing in the #' **bayestestR** package. #' #' @references #' #' - Imai, K., Keele, L. and Tingley, D. (2010) A General Approach to Causal #' Mediation Analysis, Psychological Methods, Vol. 15, No. 4 (December), pp. #' 309-334. #' #' - Tingley, D., Yamamoto, T., Hirose, K., Imai, K. and Keele, L. (2014). #' mediation: R package for Causal Mediation Analysis, Journal of Statistical #' Software, Vol. 59, No. 5, pp. 1-38. #' #' @seealso The \pkg{mediation} package for a causal mediation analysis in #' the frequentist framework. #' #' @examplesIf require("mediation") && require("brms") && require("rstanarm") #' \donttest{ #' library(mediation) #' library(brms) #' library(rstanarm) #' #' # load sample data #' data(jobs) #' set.seed(123) #' #' # linear models, for mediation analysis #' b1 <- lm(job_seek ~ treat + econ_hard + sex + age, data = jobs) #' b2 <- lm(depress2 ~ treat + job_seek + econ_hard + sex + age, data = jobs) #' # mediation analysis, for comparison with Stan models #' m1 <- mediate(b1, b2, sims = 1000, treat = "treat", mediator = "job_seek") #' #' # Fit Bayesian mediation model in brms #' f1 <- bf(job_seek ~ treat + econ_hard + sex + age) #' f2 <- bf(depress2 ~ treat + job_seek + econ_hard + sex + age) #' m2 <- brm(f1 + f2 + set_rescor(FALSE), data = jobs, refresh = 0) #' #' # Fit Bayesian mediation model in rstanarm #' m3 <- suppressWarnings(stan_mvmer( #' list( #' job_seek ~ treat + econ_hard + sex + age + (1 | occp), #' depress2 ~ treat + job_seek + econ_hard + sex + age + (1 | occp) #' ), #' data = jobs, #' refresh = 0 #' )) #' #' summary(m1) #' mediation(m2, centrality = "mean", ci = 0.95) #' mediation(m3, centrality = "mean", ci = 0.95) #' } #' @export mediation <- function(model, ...) { UseMethod("mediation") } #' @rdname mediation #' @export mediation.brmsfit <- function(model, treatment, mediator, response = NULL, centrality = "median", ci = 0.95, method = "ETI", ...) { .mediation( model = model, treatment = treatment, mediator = mediator, response = response, centrality = centrality, ci = ci, method = method, pattern = "b_%s_%s", ... ) } #' @export mediation.stanmvreg <- function(model, treatment, mediator, response = NULL, centrality = "median", ci = 0.95, method = "ETI", ...) { .mediation( model = model, treatment = treatment, mediator = mediator, response = response, centrality = centrality, ci = ci, method = method, pattern = "%s|%s", ... ) } # workhorse --------------------------------- .mediation <- function(model, treatment, mediator, response = NULL, centrality = "median", ci = 0.95, method = "ETI", pattern = "b_%s_%s", ...) { # only one HDI interval if (length(ci) > 1) ci <- ci[1] # check for binary response. In this case, user should rescale variables modelinfo <- insight::model_info(model, verbose = FALSE) if (any(sapply(modelinfo, function(i) i$is_binomial, simplify = TRUE))) { insight::format_alert("One of moderator or outcome is binary, so direct and indirect effects may be on different scales. Consider rescaling model predictors, e.g. with `effectsize::standardize()`.") } # model responses if (is.null(response)) { response <- insight::find_response(model, combine = TRUE) } fix_mediator <- FALSE # find mediator, if not specified if (missing(mediator)) { predictors <- insight::find_predictors(model, flatten = TRUE) mediator <- predictors[predictors %in% response] fix_mediator <- TRUE } # find treatment, if not specified if (missing(treatment)) { predictors <- lapply( insight::find_predictors(model), function(.f) .f$conditional ) treatment <- predictors[[1]][predictors[[1]] %in% predictors[[2]]][1] treatment <- .fix_factor_name(model, treatment) } mediator.model <- which(response == mediator) treatment.model <- which(response != mediator) if (fix_mediator) mediator <- .fix_factor_name(model, mediator) if (inherits(model, "brmsfit")) { response_name <- names(response) } else { response_name <- unname(response) } # brms removes underscores from variable names when naming estimates # so we need to fix variable names here response <- names(response) # Direct effect: coef(treatment) from model_y_treatment coef_treatment <- sprintf(pattern, response[treatment.model], treatment) effect_direct <- insight::get_parameters(model)[[coef_treatment]] # Mediator effect: coef(mediator) from model_y_treatment coef_mediator <- sprintf(pattern, response[treatment.model], mediator) effect_mediator <- insight::get_parameters(model)[[coef_mediator]] # Indirect effect: coef(treament) from model_m_mediator * coef(mediator) from model_y_treatment coef_indirect <- sprintf(pattern, response[mediator.model], treatment) tmp.indirect <- insight::get_parameters(model)[c(coef_indirect, coef_mediator)] effect_indirect <- tmp.indirect[[coef_indirect]] * tmp.indirect[[coef_mediator]] # Total effect effect_total <- effect_indirect + effect_direct # proportion mediated: indirect effect / total effect proportion_mediated <- as.numeric(point_estimate(effect_indirect, centrality = centrality)) / as.numeric(point_estimate(effect_total, centrality = centrality)) hdi_eff <- ci(effect_indirect / effect_total, ci = ci, method = method) prop_mediated_se <- (hdi_eff$CI_high - hdi_eff$CI_low) / 2 prop_mediated_ci <- proportion_mediated + c(-1, 1) * prop_mediated_se res <- cbind( data.frame( Effect = c("Direct Effect (ADE)", "Indirect Effect (ACME)", "Mediator Effect", "Total Effect", "Proportion Mediated"), Estimate = c( as.numeric(point_estimate(effect_direct, centrality = centrality)), as.numeric(point_estimate(effect_indirect, centrality = centrality)), as.numeric(point_estimate(effect_mediator, centrality = centrality)), as.numeric(point_estimate(effect_total, centrality = centrality)), proportion_mediated ), stringsAsFactors = FALSE ), as.data.frame(rbind( ci(effect_direct, ci = ci, method = method)[, -1], ci(effect_indirect, ci = ci, method = method)[, -1], ci(effect_mediator, ci = ci, method = method)[, -1], ci(effect_total, ci = ci, method = method)[, -1], prop_mediated_ci )) ) colnames(res) <- c("Effect", "Estimate", "CI_low", "CI_high") samples <- data.frame( effect_direct, effect_indirect, effect_mediator, effect_total, proportion_mediated = effect_indirect / effect_total ) attr(res, "ci") <- ci attr(res, "ci_method") <- method attr(res, "treatment") <- treatment attr(res, "mediator") <- mediator attr(res, "response") <- response_name[treatment.model] attr(res, "data") <- samples class(res) <- c("bayestestR_mediation", "see_bayestestR_mediation", class(res)) res } # methods --------------------- #' @export as.data.frame.bayestestR_mediation <- function(x, ...) { attributes(x)$data } # helper --------------------------------- .fix_factor_name <- function(model, variable) { # check for categorical. if user has not specified a treatment variable # and this variable is categorical, the posterior samples contain the # samples from each category of the treatment variable - so we need to # fix the variable name mf <- insight::get_data(model) if (variable %in% colnames(mf)) { check_fac <- mf[[variable]] if (is.factor(check_fac)) { variable <- sprintf("%s%s", variable, levels(check_fac)[nlevels(check_fac)]) } else if (is.logical(check_fac)) { variable <- sprintf("%sTRUE", variable) } } variable } # S3 --------------------------------- #' @export print.bayestestR_mediation <- function(x, digits = 3, ...) { attr(x, "data") <- NULL insight::print_color("# Causal Mediation Analysis for Stan Model\n\n", "blue") cat(sprintf( " Treatment: %s\n Mediator : %s\n Response : %s\n\n", attr(x, "treatment", exact = TRUE), attr(x, "mediator", exact = TRUE), attr(x, "response", exact = TRUE) )) prop_mediated <- prop_mediated_ori <- x[nrow(x), ] x <- x[-nrow(x), ] x$CI <- insight::format_ci(x$CI_low, x$CI_high, ci = NULL, digits = digits, width = "auto", missing = "NA") x <- datawizard::data_remove(x, c("CI_low", "CI_high"), verbose = FALSE) colnames(x)[ncol(x)] <- sprintf("%.5g%% %s", 100 * attributes(x)$ci, attributes(x)$ci_method) # remove class, to avoid conflicts with "as.data.frame.bayestestR_mediation()" class(x) <- "data.frame" cat(insight::export_table(x, digits = digits)) cat("\n") prop_mediated[] <- lapply(prop_mediated, insight::format_value, as_percent = TRUE) insight::print_color( sprintf( "Proportion mediated: %s [%s, %s]\n", prop_mediated$Estimate, prop_mediated$CI_low, prop_mediated$CI_high ), "red" ) if (any(prop_mediated_ori$Estimate < 0)) { insight::format_alert("\nDirect and indirect effects have opposite directions. The proportion mediated is not meaningful.") } } #' @export plot.bayestestR_mediation <- function(x, ...) { insight::check_if_installed("see", "to plot results from mediation analysis") NextMethod() } bayestestR/R/rope_range.R0000644000176200001440000001466214746110651015062 0ustar liggesusers#' @title Find Default Equivalence (ROPE) Region Bounds #' #' @description This function attempts at automatically finding suitable "default" #' values for the Region Of Practical Equivalence (ROPE). #' #' @details _Kruschke (2018)_ suggests that the region of practical equivalence #' could be set, by default, to a range from `-0.1` to `0.1` of a standardized #' parameter (negligible effect size according to _Cohen, 1988_). #' #' - For **linear models (lm)**, this can be generalised to #' \ifelse{html}{\out{-0.1 * SDy, 0.1 * SDy}}{\eqn{[-0.1*SD_{y}, 0.1*SD_{y}]}}. #' #' - For **logistic models**, the parameters expressed in log odds ratio can be #' converted to standardized difference through the formula #' \ifelse{html}{\out{π/√(3)}}{\eqn{\pi/\sqrt{3}}}, resulting in a #' range of `-0.18` to `0.18`. #' #' - For other models with **binary outcome**, it is strongly recommended to #' manually specify the rope argument. Currently, the same default is applied #' that for logistic models. #' #' - For models from **count data**, the residual variance is used. This is a #' rather experimental threshold and is probably often similar to `-0.1, 0.1`, #' but should be used with care! #' #' - For **t-tests**, the standard deviation of the response is used, similarly #' to linear models (see above). #' #' - For **correlations**, `-0.05, 0.05` is used, i.e., half the value of a #' negligible correlation as suggested by Cohen's (1988) rules of thumb. #' #' - For all other models, `-0.1, 0.1` is used to determine the ROPE limits, #' but it is strongly advised to specify it manually. #' #' @param x A `stanreg`, `brmsfit` or `BFBayesFactor` object, or a frequentist #' regression model. #' @param verbose Toggle warnings. #' @inheritParams rope #' #' @examplesIf require("rstanarm") && require("brms") && require("BayesFactor") #' \donttest{ #' model <- suppressWarnings(rstanarm::stan_glm( #' mpg ~ wt + gear, #' data = mtcars, #' chains = 2, #' iter = 200, #' refresh = 0 #' )) #' rope_range(model) #' #' model <- suppressWarnings( #' rstanarm::stan_glm(vs ~ mpg, data = mtcars, family = "binomial", refresh = 0) #' ) #' rope_range(model) #' #' model <- brms::brm(mpg ~ wt + cyl, data = mtcars) #' rope_range(model) #' #' model <- BayesFactor::ttestBF(mtcars[mtcars$vs == 1, "mpg"], mtcars[mtcars$vs == 0, "mpg"]) #' rope_range(model) #' #' model <- lmBF(mpg ~ vs, data = mtcars) #' rope_range(model) #' } #' #' @references Kruschke, J. K. (2018). Rejecting or accepting parameter values #' in Bayesian estimation. Advances in Methods and Practices in Psychological #' Science, 1(2), 270-280. \doi{10.1177/2515245918771304}. #' #' @export rope_range <- function(x, ...) { UseMethod("rope_range") } #' @rdname rope_range #' @export rope_range.default <- function(x, verbose = TRUE, ...) { # sanity check - if no model found, return default if (is.null(x)) { return(c(-0.1, 0.1)) } response <- insight::get_response(x, source = "mf") response_transform <- insight::find_transformation(x) information <- insight::model_info(x, verbose = FALSE) if (insight::is_multivariate(x)) { ret <- Map( function(i, j, ...) .rope_range(x, i, j), information, response, response_transform, verbose ) } else { ret <- .rope_range(x, information, response, response_transform, verbose) } ret } #' @export rope_range.parameters_model <- function(x, verbose = TRUE, ...) { model <- .retrieve_model(x) rope_range.default(x = model, verbose = verbose, ...) } #' @export rope_range.data.frame <- function(x, verbose = TRUE, ...) { # to avoid errors with "get_response()" in the default method c(-0.1, 0.1) } # Exceptions -------------------------------------------------------------- #' @export rope_range.mlm <- function(x, verbose = TRUE, ...) { response <- insight::get_response(x, source = "mf") information <- insight::model_info(x, verbose = FALSE) lapply(response, function(i) .rope_range(x, information, i, response_transform = NULL, verbose)) } # helper ------------------ .rope_range <- function(x, information = NULL, response = NULL, response_transform = NULL, verbose = TRUE) { negligible_value <- tryCatch( if (!is.null(response_transform) && all(grepl("log", response_transform, fixed = TRUE))) { # for log-transform, we assume that a 1% change represents the ROPE adequately # see https://github.com/easystats/bayestestR/issues/487 0.01 } else if (information$is_linear && information$link_function == "log") { # for log-transform, we assume that a 1% change represents the ROPE adequately # see https://github.com/easystats/bayestestR/issues/487 0.01 } else if (information$family == "lognormal") { # for log-transform, we assume that a 1% change represents the ROPE adequately # see https://github.com/easystats/bayestestR/issues/487 0.01 } else if (!is.null(response) && information$link_function == "identity") { # Linear Models 0.1 * stats::sd(response, na.rm = TRUE) # 0.1 * stats::sigma(x) # https://github.com/easystats/bayestestR/issues/364 } else if (information$is_logit) { # Logistic Models (any) # Sigma==pi / sqrt(3) 0.1 * pi / sqrt(3) } else if (information$is_probit) { # Probit models # Sigma==1 0.1 * 1 } else if (information$is_exponential) { # Gamma models sig <- insight::get_sigma(x, no_recursion = TRUE) if (is.null(sig) || length(sig) == 0 || is.na(sig)) stop(call. = FALSE) switch(information$link_function, inverse = , identity = stats::family(x)$variance(sig), log = 0.1 * log1p(1 / sig^-2) ) } else if (information$is_correlation) { # Correlations # https://github.com/easystats/bayestestR/issues/121 0.05 } else if (information$is_count) { # Not sure about this sig <- insight::get_sigma(x, no_recursion = TRUE) if (is.null(sig) || length(sig) == 0 || is.na(sig)) stop(call. = FALSE) 0.1 * sig } else { # Default stop(call. = FALSE) }, error = function(e) { if (isTRUE(verbose)) { insight::format_warning("Could not estimate a good default ROPE range. Using 'c(-0.1, 0.1)'.") } 0.1 } ) c(-1, 1) * negligible_value } bayestestR/R/equivalence_test.R0000644000176200001440000003634315052646230016277 0ustar liggesusers#' Test for Practical Equivalence #' #' Perform a **Test for Practical Equivalence** for Bayesian and frequentist models. #' #' Documentation is accessible for: #' #' - [Bayesian models](https://easystats.github.io/bayestestR/reference/equivalence_test.html) #' - [Frequentist models](https://easystats.github.io/parameters/reference/equivalence_test.lm.html) #' #' For Bayesian models, the **Test for Practical Equivalence** is based on the #' *"HDI+ROPE decision rule"* (\cite{Kruschke, 2014, 2018}) to check whether #' parameter values should be accepted or rejected against an explicitly #' formulated "null hypothesis" (i.e., a ROPE). In other words, it checks the #' percentage of the `89%` [HDI][hdi] that is the null region (the ROPE). If #' this percentage is sufficiently low, the null hypothesis is rejected. If this #' percentage is sufficiently high, the null hypothesis is accepted. #' #' @inheritParams rope #' #' @inheritSection hdi Model components #' #' @details Using the [ROPE][rope] and the [HDI][hdi], \cite{Kruschke (2018)} #' suggests using the percentage of the `95%` (or `89%`, considered more stable) #' HDI that falls within the ROPE as a decision rule. If the HDI #' is completely outside the ROPE, the "null hypothesis" for this parameter is #' "rejected". If the ROPE completely covers the HDI, i.e., all most credible #' values of a parameter are inside the region of practical equivalence, the #' null hypothesis is accepted. Else, it is undecided whether to accept or #' reject the null hypothesis. If the full ROPE is used (i.e., `100%` of the #' HDI), then the null hypothesis is rejected or accepted if the percentage #' of the posterior within the ROPE is smaller than to `2.5%` or greater than #' `97.5%`. Desirable results are low proportions inside the ROPE (the closer #' to zero the better). #' #' Some attention is required for finding suitable values for the ROPE limits #' (argument `range`). See 'Details' in [`rope_range()`] for further #' information. #' #' **Multicollinearity: Non-independent covariates** #' #' When parameters show strong correlations, i.e. when covariates are not #' independent, the joint parameter distributions may shift towards or #' away from the ROPE. In such cases, the test for practical equivalence may #' have inappropriate results. Collinearity invalidates ROPE and hypothesis #' testing based on univariate marginals, as the probabilities are conditional #' on independence. Most problematic are the results of the "undecided" #' parameters, which may either move further towards "rejection" or away #' from it (\cite{Kruschke 2014, 340f}). #' #' `equivalence_test()` performs a simple check for pairwise correlations #' between parameters, but as there can be collinearity between more than two variables, #' a first step to check the assumptions of this hypothesis testing is to look #' at different pair plots. An even more sophisticated check is the projection #' predictive variable selection (\cite{Piironen and Vehtari 2017}). #' #' #' @references #' - Kruschke, J. K. (2018). Rejecting or accepting parameter values in Bayesian estimation. Advances in Methods and Practices in Psychological Science, 1(2), 270-280. \doi{10.1177/2515245918771304} #' - Kruschke, J. K. (2014). Doing Bayesian data analysis: A tutorial with R, JAGS, and Stan. Academic Press #' - Piironen, J., & Vehtari, A. (2017). Comparison of Bayesian predictive methods for model selection. Statistics and Computing, 27(3), 711–735. \doi{10.1007/s11222-016-9649-y} #' #' @return A data frame with following columns: #' #' - `Parameter` The model parameter(s), if `x` is a model-object. If `x` is a vector, this column is missing. #' - `CI` The probability of the HDI. #' - `ROPE_low`, `ROPE_high` The limits of the ROPE. These values are identical for all parameters. #' - `ROPE_Percentage` The proportion of the HDI that lies inside the ROPE. #' - `ROPE_Equivalence` The "test result", as character. Either "rejected", "accepted" or "undecided". #' - `HDI_low` , `HDI_high` The lower and upper HDI limits for the parameters. #' #' @note There is a `print()`-method with a `digits`-argument to control #' the amount of digits in the output, and there is a #' [`plot()`-method](https://easystats.github.io/see/articles/bayestestR.html) #' to visualize the results from the equivalence-test (for models only). #' #' @examplesIf all(insight::check_if_installed(c("rstanarm", "brms", "emmeans", "BayesFactor", "see"), quietly = TRUE)) #' library(bayestestR) #' #' equivalence_test(x = rnorm(1000, 0, 0.01), range = c(-0.1, 0.1)) #' equivalence_test(x = rnorm(1000, 0, 1), range = c(-0.1, 0.1)) #' equivalence_test(x = rnorm(1000, 1, 0.01), range = c(-0.1, 0.1)) #' equivalence_test(x = rnorm(1000, 1, 1), ci = c(.50, .99)) #' #' # print more digits #' test <- equivalence_test(x = rnorm(1000, 1, 1), ci = c(.50, .99)) #' print(test, digits = 4) #' \donttest{ #' model <- rstanarm::stan_glm(mpg ~ wt + cyl, data = mtcars) #' equivalence_test(model) #' # multiple ROPE ranges - asymmetric, symmetric, default #' equivalence_test(model, range = list(c(10, 40), c(-5, -4), "default")) #' # named ROPE ranges #' equivalence_test(model, range = list(wt = c(-5, -4), `(Intercept)` = c(10, 40))) #' #' # plot result #' test <- equivalence_test(model) #' plot(test) #' #' equivalence_test(emmeans::emtrends(model, ~1, "wt", data = mtcars)) #' #' model <- brms::brm(mpg ~ wt + cyl, data = mtcars) #' equivalence_test(model) #' #' bf <- BayesFactor::ttestBF(x = rnorm(100, 1, 1)) #' # equivalence_test(bf) #' } #' @export equivalence_test <- function(x, ...) { UseMethod("equivalence_test") } #' @rdname equivalence_test #' @export equivalence_test.default <- function(x, ...) { NULL } #' @export equivalence_test.numeric <- function(x, range = "default", ci = 0.95, verbose = TRUE, ...) { rope_data <- rope(x, range = range, ci = ci, verbose = verbose) out <- as.data.frame(rope_data) if (all(ci < 1)) { out$ROPE_Equivalence <- datawizard::recode_into( out$ROPE_Percentage == 0 ~ "Rejected", out$ROPE_Percentage == 1 ~ "Accepted", default = "Undecided" ) } else { # Related to guidelines for full rope (https://easystats.github.io/bayestestR/articles/4_Guidelines.html) out$ROPE_Equivalence <- datawizard::recode_into( out$ROPE_Percentage < 0.025 ~ "Rejected", out$ROPE_Percentage > 0.975 ~ "Accepted", default = "Undecided" ) } out$HDI_low <- attr(rope_data, "HDI_area", exact = TRUE)$CI_low out$HDI_high <- attr(rope_data, "HDI_area", exact = TRUE)$CI_high # remove attribute attr(out, "HDI_area") <- NULL attr(out, "data") <- x class(out) <- unique(c("equivalence_test", "see_equivalence_test", class(out))) out } #' @rdname equivalence_test #' @inheritParams p_direction #' @export equivalence_test.data.frame <- function(x, range = "default", ci = 0.95, rvar_col = NULL, verbose = TRUE, ...) { obj_name <- insight::safe_deparse_symbol(substitute(x)) x_rvar <- .possibly_extract_rvar_col(x, rvar_col) if (length(x_rvar) > 0L) { cl <- match.call() cl[[1]] <- bayestestR::equivalence_test cl$x <- x_rvar cl$rvar_col <- NULL out <- eval.parent(cl) attr(out, "object_name") <- sprintf('%s[["%s"]]', obj_name, rvar_col) return(.append_datagrid(out, x)) } # multiple ranges for the parameters - iterate over parameters and range if (is.list(range)) { # check if list of values contains only valid values range <- .check_list_range(range, x) # apply thresholds to each column l <- insight::compact_list(mapply( function(p, r) { equivalence_test( p, range = r, ci = ci, verbose = verbose ) }, x, range, SIMPLIFY = FALSE )) } else { l <- insight::compact_list(lapply( x, equivalence_test, range = range, ci = ci, verbose = verbose )) } dat <- do.call(rbind, l) out <- data.frame( Parameter = rep(names(l), each = nrow(dat) / length(l)), dat, stringsAsFactors = FALSE ) row.names(out) <- NULL attr(out, "object_name") <- obj_name class(out) <- unique(c("equivalence_test", "see_equivalence_test_df", class(out))) out } #' @export equivalence_test.draws <- function(x, range = "default", ci = 0.95, verbose = TRUE, ...) { equivalence_test(.posterior_draws_to_df(x), range = range, ci = ci, verbose = verbose, ...) } #' @export equivalence_test.rvar <- equivalence_test.draws #' @export equivalence_test.emmGrid <- function(x, range = "default", ci = 0.95, verbose = TRUE, ...) { xdf <- insight::get_parameters(x) out <- equivalence_test(xdf, range = range, ci = ci, verbose = verbose, ...) out <- .append_datagrid(out, x) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) out } #' @export equivalence_test.emm_list <- equivalence_test.emmGrid #' @export equivalence_test.slopes <- function(x, range = "default", ci = 0.95, verbose = TRUE, ...) { xrvar <- .get_marginaleffects_draws(x) out <- equivalence_test(xrvar, range = range, ci = ci, verbose = verbose, ...) out <- .append_datagrid(out, x) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) out } #' @export equivalence_test.comparisons <- equivalence_test.slopes #' @export equivalence_test.predictions <- equivalence_test.slopes #' @export equivalence_test.BFBayesFactor <- function(x, range = "default", ci = 0.95, verbose = TRUE, ...) { out <- equivalence_test(insight::get_parameters(x), range = range, ci = ci, verbose = verbose, ...) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) out } #' @keywords internal .equivalence_test_models <- function(x, range = "default", ci = 0.95, effects = "fixed", component = "conditional", parameters = NULL, verbose = TRUE) { if (all(range == "default")) { range <- rope_range(x, verbose = verbose) } else if (!is.list(range) && (!all(is.numeric(range)) || length(range) != 2L)) { insight::format_error("`range` should be 'default' or a vector of 2 numeric values (e.g., c(-0.1, 0.1)).") } if (verbose && !inherits(x, "blavaan")) .check_multicollinearity(x) params <- insight::get_parameters( x, component = component, effects = effects, parameters = parameters, verbose = verbose ) equivalence_test(params, range = range, ci = ci, verbose = verbose) } #' @export equivalence_test.stanreg <- function(x, range = "default", ci = 0.95, effects = "fixed", component = "location", parameters = NULL, verbose = TRUE, ...) { out <- .equivalence_test_models( x, range, ci, effects, component, parameters, verbose ) out <- .prepare_output( out, .get_cleaned_parameters(x, ...), inherits(x, "stanmvreg") ) class(out) <- unique(c("equivalence_test", "see_equivalence_test", class(out))) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) out } #' @export equivalence_test.stanfit <- equivalence_test.stanreg #' @export equivalence_test.blavaan <- equivalence_test.stanreg #' @rdname equivalence_test #' @export equivalence_test.brmsfit <- function(x, range = "default", ci = 0.95, effects = "fixed", component = "conditional", parameters = NULL, verbose = TRUE, ...) { out <- .equivalence_test_models( x, range, ci, effects, component, parameters, verbose ) out <- .prepare_output( out, .get_cleaned_parameters(x, ...), inherits(x, "stanmvreg") ) class(out) <- unique(c("equivalence_test", "see_equivalence_test", class(out))) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) out } #' @export equivalence_test.sim.merMod <- function(x, range = "default", ci = 0.95, parameters = NULL, verbose = TRUE, ...) { out <- .equivalence_test_models( x, range, ci, effects = "fixed", component = "conditional", parameters, verbose = verbose ) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) out } #' @export equivalence_test.sim <- equivalence_test.sim.merMod #' @export equivalence_test.mcmc <- function(x, range = "default", ci = 0.95, parameters = NULL, verbose = TRUE, ...) { out <- .equivalence_test_models( as.data.frame(x), range, ci, effects = "fixed", component = "conditional", parameters, verbose = verbose ) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) out } #' @export equivalence_test.bcplm <- function(x, range = "default", ci = 0.95, parameters = NULL, verbose = TRUE, ...) { out <- .equivalence_test_models( insight::get_parameters(x), range, ci, effects = "fixed", component = "conditional", parameters, verbose = verbose ) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) out } #' @export equivalence_test.blrm <- equivalence_test.bcplm #' @export equivalence_test.mcmc.list <- equivalence_test.bcplm #' @export equivalence_test.bayesQR <- equivalence_test.bcplm #' @export equivalence_test.bamlss <- function(x, range = "default", ci = 0.95, component = "all", parameters = NULL, verbose = TRUE, ...) { out <- .equivalence_test_models( insight::get_parameters(x, component = component), range, ci, effects = "fixed", component = "conditional", parameters, verbose = verbose ) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) out } bayestestR/R/print.bayesfactor_models.R0000644000176200001440000000214614542333405017731 0ustar liggesusers#' @export print.bayesfactor_models_matrix <- function(x, digits = 2, log = FALSE, exact = TRUE, ...) { orig_x <- x # Format values x <- unclass(x) if (!log) x <- exp(x) sgn <- sign(x) < 0 x <- insight::format_bf(abs(x), name = NULL, exact = exact, ...) diag(x) <- if (log) "0" else "1" if (any(sgn)) x[sgn] <- paste0("-", x[sgn]) df <- as.data.frame(x) # Model names models <- colnames(df) models[models == "1"] <- "(Intercept only)" models <- paste0("[", seq_along(models), "] ", models) k <- max(vapply(c(models, "Denominator"), nchar, numeric(1))) + 2 rownames(df) <- colnames(df) <- NULL df <- cbind(Model = models, df) colnames(df) <- c("placeholder", paste0(" [", seq_along(models), "] ")) out <- insight::export_table( df, caption = c("# Bayes Factors for Model Comparison", "blue"), subtitle = c(sprintf("\n\n%sNumerator\nDenominator", strrep(" ", k)), "cyan"), footer = if (log) c("\nBayes Factors are on the log-scale.\n", "red") ) out <- sub("placeholder", "\b\b", out, fixed = TRUE) cat(out) invisible(orig_x) } bayestestR/R/sensitivity_to_prior.R0000644000176200001440000000707515052646230017246 0ustar liggesusers#' Sensitivity to Prior #' #' Computes the sensitivity to priors specification. This represents the #' proportion of change in some indices when the model is fitted with an #' antagonistic prior (a prior of same shape located on the opposite of the #' effect). #' #' @param model A Bayesian model (`stanreg` or `brmsfit`). #' @param index The indices from which to compute the sensitivity. Can be one or #' multiple names of the columns returned by `describe_posterior`. The case is #' important here (e.g., write 'Median' instead of 'median'). #' @param magnitude This represent the magnitude by which to shift the #' antagonistic prior (to test the sensitivity). For instance, a magnitude of #' 10 (default) means that the mode will be updated with a prior located at 10 #' standard deviations from its original location. #' @param ... Arguments passed to or from other methods. #' #' @examplesIf require("rstanarm") #' \donttest{ #' library(bayestestR) #' #' # rstanarm models #' # ----------------------------------------------- #' model <- rstanarm::stan_glm(mpg ~ wt, data = mtcars) #' sensitivity_to_prior(model) #' #' model <- rstanarm::stan_glm(mpg ~ wt + cyl, data = mtcars) #' sensitivity_to_prior(model, index = c("Median", "MAP")) #' } #' @seealso DescTools #' @export sensitivity_to_prior <- function(model, ...) { UseMethod("sensitivity_to_prior") } #' @rdname sensitivity_to_prior #' @export sensitivity_to_prior.stanreg <- function(model, index = "Median", magnitude = 10, ...) { # Original params <- .extract_parameters(model, index = index, ...) # Priors priors <- .extract_priors_rstanarm(model) new_priors <- .prior_new_location(prior = priors$prior, sign = sign(params$Median), magnitude = magnitude) model_updated <- stats::update(model, data = insight::get_data(model), prior = new_priors, refresh = 0) # New model params_updated <- .extract_parameters(model_updated, index = index, ...) # Compute index sensitivity <- abs(as.matrix(params_updated[-1]) - as.matrix(params[-1])) / abs(as.matrix(params[-1])) # Clean up sensitivity <- as.data.frame(sensitivity) names(sensitivity) <- paste0("Sensitivity_", names(params_updated)[-1]) sensitivity <- cbind(params_updated[1], sensitivity) row.names(sensitivity) <- NULL sensitivity } #' @export sensitivity_to_prior.default <- function(model, ...) { insight::format_error(sprintf("Models of class '%s' are not yet supported.", class(model)[1])) } #' @keywords internal .extract_parameters <- function(model, index = "Median", ...) { # Handle BF test <- c("pd", "rope", "p_map") if (any(c("bf", "bayesfactor", "bayes_factor") %in% index)) { test <- c(test, "bf") } params <- suppressMessages(describe_posterior( model, centrality = "all", dispersion = TRUE, test = test, ... )) params <- params[params$Parameter != "(Intercept)", ] params[unique(c("Parameter", "Median", index))] } #' Set a new location for a prior #' @keywords internal .prior_new_location <- function(prior, sign, magnitude = 10) { prior$location <- -1 * sign * magnitude * prior$scale prior } #' Extract and Returns the priors formatted for rstanarm #' @keywords internal .extract_priors_rstanarm <- function(model, ...) { priors <- rstanarm::prior_summary(model) # Deal with adjusted scale if (!is.null(priors$prior$adjusted_scale)) { priors$prior$scale <- priors$prior$adjusted_scale priors$prior$adjusted_scale <- NULL } priors$prior$autoscale <- FALSE priors } bayestestR/R/bayesfactor_models.R0000644000176200001440000004724414765755711016625 0ustar liggesusers#' Bayes Factors (BF) for model comparison #' #' @description This function computes or extracts Bayes factors from fitted #' models. The `bf_*` function is an alias of the main function. #' #' @author Mattan S. Ben-Shachar #' #' @param ... Fitted models (see details), all fit on the same data, or a single #' `BFBayesFactor` object (see 'Details'). Ignored in `as.matrix()`, #' `update()`. If the following named arguments are present, they are passed #' to [`insight::get_loglikelihood()`] (see details): #' - `estimator` (defaults to `"ML"`) #' - `check_response` (defaults to `FALSE`) #' @param denominator Either an integer indicating which of the models to use as #' the denominator, or a model to be used as a denominator. Ignored for #' `BFBayesFactor`. #' @param object,x A [`bayesfactor_models()`] object. #' @param subset Vector of model indices to keep or remove. #' @param reference Index of model to reference to, or `"top"` to #' reference to the best model, or `"bottom"` to reference to the worst #' model. #' @inheritParams hdi #' #' @note There is also a [`plot()`-method](https://easystats.github.io/see/articles/bayestestR.html) #' implemented in the \href{https://easystats.github.io/see/}{\pkg{see}-package}. #' #' @details #' If the passed models are supported by **insight** the DV of all models will #' be tested for equality (else this is assumed to be true), and the models' #' terms will be extracted (allowing for follow-up analysis with `bayesfactor_inclusion`). #' #' - For `brmsfit` or `stanreg` models, Bayes factors are computed using the \CRANpkg{bridgesampling} package. #' - `brmsfit` models must have been fitted with `save_pars = save_pars(all = TRUE)`. #' - `stanreg` models must have been fitted with a defined `diagnostic_file`. #' - For `BFBayesFactor`, `bayesfactor_models()` is mostly a wraparound `BayesFactor::extractBF()`. #' - For all other model types, Bayes factors are computed using the BIC approximation. #' Note that BICs are extracted from using [insight::get_loglikelihood], see documentation #' there for options for dealing with transformed responses and REML estimation. #' #' In order to correctly and precisely estimate Bayes factors, a rule of thumb #' are the 4 P's: **P**roper **P**riors and **P**lentiful #' **P**osteriors. How many? The number of posterior samples needed for #' testing is substantially larger than for estimation (the default of 4000 #' samples may not be enough in many cases). A conservative rule of thumb is to #' obtain 10 times more samples than would be required for estimation #' (_Gronau, Singmann, & Wagenmakers, 2017_). If less than 40,000 samples #' are detected, `bayesfactor_models()` gives a warning. #' #' See also [the Bayes factors vignette](https://easystats.github.io/bayestestR/articles/bayes_factors.html). #' #' @inheritSection bayesfactor_parameters Interpreting Bayes Factors #' #' @return A data frame containing the models' formulas (reconstructed fixed and #' random effects) and their `log(BF)`s (Use `as.numeric()` to extract the #' non-log Bayes factors; see examples), that prints nicely. #' #' @examplesIf require("lme4") && require("BayesFactor") && require("rstanarm") && require("brms") #' # With lm objects: #' # ---------------- #' lm1 <- lm(mpg ~ 1, data = mtcars) #' lm2 <- lm(mpg ~ hp, data = mtcars) #' lm3 <- lm(mpg ~ hp + drat, data = mtcars) #' lm4 <- lm(mpg ~ hp * drat, data = mtcars) #' (BFM <- bayesfactor_models(lm1, lm2, lm3, lm4, denominator = 1)) #' # bayesfactor_models(lm2, lm3, lm4, denominator = lm1) # same result #' # bayesfactor_models(lm1, lm2, lm3, lm4, denominator = lm1) # same result #' #' update(BFM, reference = "bottom") #' as.matrix(BFM) #' as.numeric(BFM) #' #' lm2b <- lm(sqrt(mpg) ~ hp, data = mtcars) #' # Set check_response = TRUE for transformed responses #' bayesfactor_models(lm2b, denominator = lm2, check_response = TRUE) #' #' \donttest{ #' # With lmerMod objects: #' # --------------------- #' lmer1 <- lme4::lmer(Sepal.Length ~ Petal.Length + (1 | Species), data = iris) #' lmer2 <- lme4::lmer(Sepal.Length ~ Petal.Length + (Petal.Length | Species), data = iris) #' lmer3 <- lme4::lmer( #' Sepal.Length ~ Petal.Length + (Petal.Length | Species) + (1 | Petal.Width), #' data = iris #' ) #' bayesfactor_models(lmer1, lmer2, lmer3, #' denominator = 1, #' estimator = "REML" #' ) #' #' # rstanarm models #' # --------------------- #' # (note that a unique diagnostic_file MUST be specified in order to work) #' stan_m0 <- suppressWarnings(rstanarm::stan_glm(Sepal.Length ~ 1, #' data = iris, #' family = gaussian(), #' diagnostic_file = file.path(tempdir(), "df0.csv") #' )) #' stan_m1 <- suppressWarnings(rstanarm::stan_glm(Sepal.Length ~ Species, #' data = iris, #' family = gaussian(), #' diagnostic_file = file.path(tempdir(), "df1.csv") #' )) #' stan_m2 <- suppressWarnings(rstanarm::stan_glm(Sepal.Length ~ Species + Petal.Length, #' data = iris, #' family = gaussian(), #' diagnostic_file = file.path(tempdir(), "df2.csv") #' )) #' bayesfactor_models(stan_m1, stan_m2, denominator = stan_m0, verbose = FALSE) #' #' #' # brms models #' # -------------------- #' # (note the save_pars MUST be set to save_pars(all = TRUE) in order to work) #' brm1 <- brms::brm(Sepal.Length ~ 1, data = iris, save_pars = save_pars(all = TRUE)) #' brm2 <- brms::brm(Sepal.Length ~ Species, data = iris, save_pars = save_pars(all = TRUE)) #' brm3 <- brms::brm( #' Sepal.Length ~ Species + Petal.Length, #' data = iris, #' save_pars = save_pars(all = TRUE) #' ) #' #' bayesfactor_models(brm1, brm2, brm3, denominator = 1, verbose = FALSE) #' #' #' # BayesFactor #' # --------------------------- #' data(puzzles) #' BF <- BayesFactor::anovaBF(RT ~ shape * color + ID, #' data = puzzles, #' whichRandom = "ID", progress = FALSE #' ) #' BF #' bayesfactor_models(BF) # basically the same #' } #' #' @references #' - Gronau, Q. F., Singmann, H., & Wagenmakers, E. J. (2017). Bridgesampling: An R package for estimating #' normalizing constants. arXiv preprint arXiv:1710.08162. #' #' - Kass, R. E., and Raftery, A. E. (1995). Bayes Factors. Journal of the American Statistical Association, #' 90(430), 773-795. #' #' - Robert, C. P. (2016). The expected demise of the Bayes factor. Journal of Mathematical Psychology, #' 72, 33–37. #' #' - Wagenmakers, E. J. (2007). A practical solution to the pervasive problems of p values. #' Psychonomic bulletin & review, 14(5), 779-804. #' #' - Wetzels, R., Matzke, D., Lee, M. D., Rouder, J. N., Iverson, G. J., and Wagenmakers, E.-J. (2011). #' Statistical Evidence in Experimental Psychology: An Empirical Comparison Using 855 t Tests. #' Perspectives on Psychological Science, 6(3), 291–298. \doi{10.1177/1745691611406923} #' #' @export bayesfactor_models <- function(..., denominator = 1, verbose = TRUE) { UseMethod("bayesfactor_models") } #' @rdname bayesfactor_models #' @export bf_models <- bayesfactor_models #' @export #' @rdname bayesfactor_models bayesfactor_models.default <- function(..., denominator = 1, verbose = TRUE) { # Organize the models and their names mods <- list(...) denominator <- list(denominator) cl <- match.call(expand.dots = FALSE) estimator <- mods[["estimator"]] check_response <- mods[["check_response"]] if (is.null(estimator)) estimator <- "ML" if (is.null(check_response)) check_response <- FALSE mods[["check_response"]] <- mods[["estimator"]] <- NULL cl$...$estimator <- cl$...$check_response <- NULL names(mods) <- sapply(cl[["..."]], insight::safe_deparse) names(denominator) <- insight::safe_deparse(cl$denominator) mods <- .cleanup_BF_models(mods, denominator, cl) mforms <- names(mods) denominator <- attr(mods, "denominator", exact = TRUE) # Get formula / model names # supported models supported_models <- vapply(mods, insight::is_model_supported, TRUE) if (all(supported_models)) { temp_forms <- sapply(mods, .find_full_formula) has_terms <- sapply(temp_forms, nchar) > 0 mforms[has_terms] <- temp_forms[has_terms] supported_models[!has_terms] <- FALSE } model_objects <- .safe(do.call(insight::ellipsis_info, c(mods, verbose = FALSE))) if (!is.null(model_objects)) { were_checked <- inherits(model_objects, "ListModels") # Validate response if (were_checked && verbose && !isTRUE(attr(model_objects, "same_response"))) { insight::format_warning( "When comparing models, please note that probably not all models were fit from same data." ) } # Get BIC if (were_checked && estimator == "REML" && any(vapply(mods, insight::is_mixed_model, TRUE)) && !isTRUE(attr(model_objects, "same_fixef")) && verbose) { insight::format_warning(paste( "Information criteria (like BIC) based on REML fits (i.e. `estimator=\"REML\"`)", "are not recommended for comparison between models with different fixed effects.", "Concider setting `estimator=\"ML\"`." )) } } else if (verbose) { insight::format_alert("Unable to validate that all models were fit with the same data.") } mBIC <- tryCatch(sapply(mods, function(m) { LL <- insight::get_loglikelihood( m, estimator = estimator, check_response = check_response ) stats::BIC(LL) }), error = function(...) NULL) if (is.null(mBIC)) mBIC <- sapply(mods, stats::BIC) # Get BF mBFs <- bic_to_bf(mBIC, denominator = mBIC[denominator], log = TRUE) res <- data.frame( Model = mforms, log_BF = mBFs, stringsAsFactors = FALSE ) .bf_models_output(res, denominator = denominator, bf_method = "BIC approximation", unsupported_models = !all(supported_models), model_names = names(mods) ) } .bayesfactor_models_stan <- function(mods, denominator = 1, verbose = TRUE) { # Warn n_samps <- sapply(mods, function(x) { alg <- insight::find_algorithm(x) if (is.null(alg$iterations)) alg$iterations <- alg$sample (alg$iterations - alg$warmup) * alg$chains }) if (any(n_samps < 4e4) && verbose) { insight::format_warning( "Bayes factors might not be precise.", "For precise Bayes factors, sampling at least 40,000 posterior samples is recommended." ) } if (inherits(mods[[1]], "blavaan")) { res <- .bayesfactor_models_stan_SEM(mods, denominator, verbose) bf_method <- "marginal likelihoods (Laplace approximation)" unsupported_models <- TRUE } else { res <- .bayesfactor_models_stan_REG(mods, denominator, verbose) bf_method <- "marginal likelihoods (bridgesampling)" unsupported_models <- FALSE } .bf_models_output(res, denominator = denominator, bf_method = bf_method, unsupported_models = unsupported_models ) } #' @keywords internal .bayesfactor_models_stan_REG <- function(mods, denominator, verbose = TRUE) { insight::check_if_installed("bridgesampling") # Test that all is good: resps <- lapply(mods, insight::get_response) from_same_data_as_den <- sapply(resps[-denominator], identical, y = resps[[denominator]] ) if (!all(from_same_data_as_den)) { insight::format_error("Models were not computed from the same data.") } mML <- lapply(mods, .get_marglik, verbose = verbose) mBFs <- sapply(mML, function(x) { bf <- bridgesampling::bf(x, mML[[denominator]], log = TRUE) bf[["bf"]] }) # Get formula mforms <- sapply(mods, .find_full_formula) res <- data.frame( Model = mforms, log_BF = mBFs, stringsAsFactors = FALSE ) } .bayesfactor_models_stan_SEM <- function(mods, denominator, verbose = TRUE) { utils::capture.output( suppressWarnings({ mBFs <- sapply(mods, function(m) { blavaan::blavCompare(m, mods[[denominator]])[["bf"]][1] }) }) ) res <- data.frame( Model = names(mods), log_BF = unname(mBFs), stringsAsFactors = FALSE ) } #' @export bayesfactor_models.stanreg <- function(..., denominator = 1, verbose = TRUE) { mods <- list(...) if (inherits(mods[[1]], "stanreg")) { insight::check_if_installed("rstanarm") } else if (inherits(mods[[1]], "brmsfit")) { insight::check_if_installed("brms") } else if (inherits(mods[[1]], "blavaan")) { insight::check_if_installed("blavaan") } # Organize the models and their names denominator <- list(denominator) cl <- match.call(expand.dots = FALSE) names(mods) <- sapply(cl[["..."]], insight::safe_deparse) names(denominator) <- insight::safe_deparse(cl$denominator) mods <- .cleanup_BF_models(mods, denominator, cl) denominator <- attr(mods, "denominator", exact = TRUE) .bayesfactor_models_stan(mods, denominator = denominator, verbose = verbose) } #' @export bayesfactor_models.brmsfit <- bayesfactor_models.stanreg #' @export bayesfactor_models.blavaan <- bayesfactor_models.stanreg #' @export bayesfactor_models.BFBayesFactor <- function(..., verbose = TRUE) { models <- c(...) insight::check_if_installed("BayesFactor") mBFs <- c(0, BayesFactor::extractBF(models, TRUE, TRUE)) mforms <- sapply(c(models@denominator, models@numerator), function(x) x@shortName) if (inherits(models@denominator, "BFlinearModel")) { mforms[mforms == "Intercept only"] <- "1" } else { mforms <- .clean_non_linBF_mods(mforms) } res <- data.frame( Model = unname(mforms), log_BF = mBFs, stringsAsFactors = FALSE ) .bf_models_output(res, denominator = 1, bf_method = "JZS (BayesFactor)", unsupported_models = !inherits(models@denominator, "BFlinearModel") ) } # Methods ----------------------------------------------------------------- #' @rdname bayesfactor_models #' @export update.bayesfactor_models <- function(object, subset = NULL, reference = NULL, ...) { if (!is.null(reference)) { if (reference == "top") { reference <- which.max(object$log_BF) } else if (reference == "bottom") { reference <- which.min(object$log_BF) } object$log_BF <- object$log_BF - object$log_BF[reference] attr(object, "denominator") <- reference } denominator <- attr(object, "denominator") if (!is.null(subset)) { if (all(subset < 0)) { subset <- seq_len(nrow(object))[subset] } object_subset <- object[subset, ] if (denominator %in% subset) { attr(object_subset, "denominator") <- which(denominator == subset) } else { object_subset <- rbind(object[denominator, ], object_subset) attr(object_subset, "denominator") <- 1 } object <- object_subset } object } #' @rdname bayesfactor_models #' @export as.matrix.bayesfactor_models <- function(x, ...) { out <- -outer(x$log_BF, x$log_BF, FUN = "-") rownames(out) <- colnames(out) <- x$Model # out <- exp(out) class(out) <- c("bayesfactor_models_matrix", class(out)) out } # Helpers ----------------------------------------------------------------- #' @keywords internal .cleanup_BF_models <- function(mods, denominator, cl) { if (length(mods) == 1 && inherits(mods[[1]], "list")) { mods <- mods[[1]] mod_names <- .safe(sapply(cl[["..."]][[1]][-1], insight::safe_deparse)) if (!is.null(mod_names) && length(mod_names) == length(mods)) { names(mods) <- mod_names } } if (is.numeric(denominator[[1]])) { denominator <- denominator[[1]] } else { denominator_model <- which(names(mods) == names(denominator)) if (length(denominator_model) == 0) { mods <- c(mods, denominator) denominator <- length(mods) } else { denominator <- denominator_model } } attr(mods, "denominator") <- denominator mods } #' @keywords internal .bf_models_output <- function(res, denominator = 1, bf_method = "method", unsupported_models = FALSE, model_names = NULL) { # sanity check - are all BF NA? if (!is.null(res$log_BF) && all(is.na(res$log_BF))) { insight::format_error("Could not calculate Bayes Factor for these models. You may report this problem at {https://github.com/easystats/bayestestR/issues/}.") # nolint } attr(res, "denominator") <- denominator attr(res, "BF_method") <- bf_method attr(res, "unsupported_models") <- unsupported_models attr(res, "model_names") <- model_names class(res) <- c("bayesfactor_models", "see_bayesfactor_models", class(res)) res } #' @keywords internal .find_full_formula <- function(mod) { formulas <- insight::find_formula(mod) conditional <- random <- NULL if (!is.null(formulas$conditional)) { conditional <- as.character(formulas$conditional)[3] } if (!is.null(formulas$random)) { if (!is.list(formulas$random)) { formulas$random <- list(formulas$random) } random <- sapply(formulas$random, function(x) { paste0("(", as.character(x)[2], ")") }) } paste(c(conditional, random), collapse = " + ") } #' @keywords internal .clean_non_linBF_mods <- function(m_names) { tryCatch( { m_txt <- character(length = length(m_names)) ## Detect types ## is_null <- startsWith(m_names, "Null") is_rho <- grepl("rho", m_names, fixed = TRUE) is_mu <- grepl("mu", m_names, fixed = TRUE) is_d <- grepl("d", m_names, fixed = TRUE) is_p <- grepl("p", m_names, fixed = TRUE) is_range <- grepl("<", m_names, fixed = TRUE) ## Range Alts ## m_txt[!is_null & is_range] <- sub("^[^\\s]*\\s[^\\s]*\\s", "", m_names[!is_null & is_range]) ## Null models + Not nulls ## if (any(is_d & is_p)) { is_null <- !startsWith(m_names, "Non") temp <- m_names[is_null][1] mi <- gregexpr("\\(.*\\)", temp) aa <- unlist(regmatches(temp, m = mi), use.names = FALSE) m_txt[is_null] <- sub("a=", "a = ", aa, fixed = TRUE) m_txt[!is_null & !is_range] <- sub("a=", "a != ", aa, fixed = TRUE) } else if (any(is_rho)) { m_txt[is_null] <- "rho = 0" m_txt[!is_null & !is_range] <- "rho != 0" m_txt <- sub(" 1) { caption <- "Proportion of samples inside the ROPE" } else { caption <- sprintf( "Proportion of samples inside the ROPE [%.*f, %.*f]", digits, x$ROPE_low[1], digits, x$ROPE_high[1] ) x$ROPE_low <- x$ROPE_high <- NULL } .print_md_default(x = x, digits = digits, caption = caption, ci_string = "ROPE", ...) } #' @export print_md.p_significance <- function(x, digits = 2, ...) { threshold <- attributes(x)$threshold if (is.list(threshold)) { caption <- "Practical Significance" out <- as.data.frame(do.call(rbind, threshold)) colnames(out) <- c("ROPE_low", "ROPE_high") x$ROPE_low <- out$ROPE_low x$ROPE_high <- out$ROPE_high ci_string <- "ROPE" } else { caption <- sprintf( "Practical Significance (threshold: %s)", insight::format_value(attributes(x)$threshold, digits = digits) ) ci_string <- NULL } .print_md_default(x = x, digits = digits, caption = caption, ci_string = ci_string, ...) } #' @export print_md.bayestestR_hdi <- function(x, digits = 2, caption = "Highest Density Interval", ...) { .print_md_default(x = x, digits = digits, caption = caption, ci_string = "HDI", ...) } #' @export print_md.bayestestR_eti <- function(x, digits = 2, caption = "Equal-Tailed Interval", ...) { .print_md_default(x = x, digits = digits, caption = caption, ci_string = "ETI", ...) } #' @export print_md.bayestestR_si <- function(x, digits = 2, caption = "Support Interval", ...) { .print_md_default(x = x, digits = digits, caption = caption, ci_string = "SI", ...) } # special handling for bayes factors ------------------ #' @export print_md.bayesfactor_models <- function(x, digits = 3, log = FALSE, show_names = TRUE, caption = "Bayes Factors for Model Comparison", ...) { .print_bf_md_default( x = x, digits = digits, log = log, show_names = show_names, caption = caption, align = "llr", ... ) } #' @export print_md.bayesfactor_inclusion <- function(x, digits = 3, log = FALSE, caption = "Inclusion Bayes Factors (Model Averaged)", ...) { .print_bf_md_default( x = x, digits = digits, log = log, caption = caption, align = "lrrr", ... ) } #' @export print_md.bayesfactor_restricted <- function(x, digits = 3, log = FALSE, caption = "Bayes Factor (Order-Restriction)", ...) { .print_bf_md_default(x = x, digits = digits, log = log, caption = caption, ...) } #' @export print_md.bayesfactor_parameters <- function(x, digits = 3, log = FALSE, ...) { # retrieve information with cleaned parameter names cp <- attr(x, "clean_parameters") # format data frame and columns formatted_table <- format( x, cp = cp, digits = digits, log = log, format = "markdown", ... ) insight::export_table(formatted_table, format = "markdown") } # util --------------- .print_md_default <- function(x, digits = 2, caption = NULL, subtitles = NULL, ci_string = "CI", ...) { # retrieve information with cleaned parameter names cp <- attr(x, "clean_parameters") # format data frame and columns formatted_table <- format( x, cp = cp, digits = digits, format = "markdown", ci_string = ci_string, caption = caption, subtitles = subtitles, ... ) # print for data frame - I don't think we need a special handling for # numeric values to have a markdown-table output insight::export_table( formatted_table, caption = caption, format = "markdown" ) } .print_bf_md_default <- function(x, digits = 3, log = FALSE, show_names = NULL, caption = NULL, align = NULL, ...) { formatted_table <- format( x, digits = digits, log = log, show_names = show_names, caption = caption, format = "markdown", ... ) insight::export_table( formatted_table, align = align, caption = caption, format = "markdown" ) } bayestestR/R/zzz.R0000644000176200001440000000023414542333405013562 0ustar liggesusers.onAttach <- function(libname, pkgname) { if (format(Sys.time(), "%m%d") == "0504") { packageStartupMessage("May the fourth be with you!") } } bayestestR/R/convert_bayesian_to_frequentist.R0000644000176200001440000001531614652220356021424 0ustar liggesusers#' Convert (refit) a Bayesian model to frequentist #' #' Refit Bayesian model as frequentist. Can be useful for comparisons. #' #' @param model A Bayesian model. #' @param data Data used by the model. If `NULL`, will try to extract it #' from the model. #' @param REML For mixed effects, should models be estimated using #' restricted maximum likelihood (REML) (`TRUE`, default) or maximum #' likelihood (`FALSE`)? #' @examplesIf require("rstanarm") #' \donttest{ #' # Rstanarm ---------------------- #' # Simple regressions #' model <- rstanarm::stan_glm(Sepal.Length ~ Species, #' data = iris, chains = 2, refresh = 0 #' ) #' bayesian_as_frequentist(model) #' #' model <- rstanarm::stan_glm(vs ~ mpg, #' family = "binomial", #' data = mtcars, chains = 2, refresh = 0 #' ) #' bayesian_as_frequentist(model) #' #' # Mixed models #' model <- rstanarm::stan_glmer( #' Sepal.Length ~ Petal.Length + (1 | Species), #' data = iris, chains = 2, refresh = 0 #' ) #' bayesian_as_frequentist(model) #' #' model <- rstanarm::stan_glmer(vs ~ mpg + (1 | cyl), #' family = "binomial", #' data = mtcars, chains = 2, refresh = 0 #' ) #' bayesian_as_frequentist(model) #' } #' #' @export convert_bayesian_as_frequentist <- function(model, data = NULL, REML = TRUE) { if (is.null(data)) { data <- insight::get_data(model) } info <- insight::model_info(model, verbose = FALSE) model_formula <- insight::find_formula(model) model_family <- insight::get_family(model) # fix exception: The 0 + Intercept syntax in brms can be used to facilitate # prior specification for the intercept, but but it leads to issues where it # wrongly can be believed that Intercept is a variable and not a special term. f_string <- insight::safe_deparse(model_formula$conditional) if (grepl("0 + Intercept", f_string, fixed = TRUE)) { model_formula$conditional <- stats::as.formula(gsub("0 + Intercept", "1", f_string, fixed = TRUE)) } if (inherits(model_family, "brmsfamily")) { insight::check_if_installed("glmmTMB") # exception: ordbetareg() if ("custom" %in% model_family$family && all(model_family$name == "ord_beta_reg")) { model_family <- glmmTMB::ordbeta() } else { # not all families return proper objects from "get", so we capture # some families via switch here... model_family <- .safe(switch(model_family$family, beta = glmmTMB::beta_family(link = model_family$link), beta_binomial = glmmTMB::betabinomial(link = model_family$link), negbinomial = glmmTMB::nbinom1(link = model_family$link), lognormal = glmmTMB::lognormal(link = model_family$link), student = glmmTMB::t_family(link = model_family$link), get(model_family$family)(link = model_family$link) )) } } # if family could not be identified, stop here if (is.null(model_family)) { insight::format_error("Model could not be automatically converted to frequentist model.") } # first attempt freq <- tryCatch(.convert_bayesian_as_frequentist( info = info, formula = model_formula, data = data, family = model_family, REML = REML ), error = function(e) e) if (inherits(freq, "error")) { # try again to extract family, using generic approach model_family <- get(model_family$family)(link = model_family$link) freq <- .convert_bayesian_as_frequentist( info = info, formula = model_formula, data = data, family = model_family, REML = REML ) } if (inherits(freq, "error")) { insight::format_error("Model could not be automatically converted to frequentist model.") } freq } # internal .convert_bayesian_as_frequentist <- function(info, formula, data, family, REML = TRUE) { # TODO: Check for # nonlinear formulas, # correlation structures, # weights, # offset, # subset, # knots, # meta-analysis if (info$is_dispersion || info$is_orderedbeta || info$is_beta || info$is_betabinomial || info$is_zero_inflated || info$is_zeroinf || info$is_hurdle || info$is_negbin) { # nolint insight::check_if_installed("glmmTMB") cond_formula <- .rebuild_cond_formula(formula) dispformula <- formula$dispersion if (is.null(dispformula)) dispformula <- formula$sigma if (is.null(dispformula)) dispformula <- ~1 ziformula <- formula$zero_inflated if (is.null(ziformula)) ziformula <- formula$zi if (is.null(ziformula)) ziformula <- ~0 freq <- tryCatch( glmmTMB::glmmTMB( formula = cond_formula, ziformula = ziformula, dispformula = dispformula, family = family, data = data, REML = REML ), error = function(e) e ) } else if (info$is_gam) { insight::check_if_installed("gamm4") freq <- tryCatch( gamm4::gamm4( formula = formula$conditional, random = formula$random, family = family, data = data ), error = function(e) e ) } else if (info$is_mixed) { insight::check_if_installed("lme4") insight::check_if_installed("glmmTMB") cond_formula <- .rebuild_cond_formula(formula) if (info$is_linear) { freq <- tryCatch( lme4::lmer( formula = cond_formula, data = data ), error = function(e) e ) } else { ## TODO: check if beta/Gamma are correctly captured freq <- tryCatch( lme4::glmer( formula = cond_formula, family = family, data = data ), error = function(e) e ) if (inherits(freq, "error")) { freq <- tryCatch( glmmTMB::glmmTMB( formula = cond_formula, family = family, data = data ), error = function(e) e ) } } } else if (info$is_linear) { freq <- stats::lm(formula$conditional, data = data) } else { freq <- stats::glm(formula$conditional, data = data, family = family) } freq } .rebuild_cond_formula <- function(formula) { if (is.null(formula$random)) { return(formula$conditional) } if (is.list(formula$random)) { random_formula <- paste( lapply( formula$random, function(x) { paste0("(", as.character(x)[-1], ")") } ), collapse = " + " ) } else { random_formula <- paste0("(", as.character(formula$random)[-1], ")") } fixed_formula <- paste(as.character(formula$conditional)[c(2, 1, 3)], collapse = " ") stats::as.formula(paste(fixed_formula, random_formula, sep = " + ")) } #' @rdname convert_bayesian_as_frequentist #' @export bayesian_as_frequentist <- convert_bayesian_as_frequentist bayestestR/R/utils_print_data_frame.R0000644000176200001440000000556315001512506017446 0ustar liggesusers.print_data_frame <- function(x, digits) { out <- list(x) names(out) <- "fixed" if (all(c("Effects", "Component") %in% colnames(x))) { x$split <- sprintf("%s_%s", x$Effects, x$Component) } else if ("Effects" %in% colnames(x)) { colnames(x)[which(colnames(x) == "Effects")] <- "split" } else if ("Component" %in% colnames(x)) { colnames(x)[which(colnames(x) == "Component")] <- "split" } if ("split" %in% colnames(x)) { if (anyNA(x$split)) { x$split[is.na(x$split)] <- "{other}" } out <- lapply( split(x, f = x$split), datawizard::data_remove, select = c("split", "Component", "Effects"), verbose = FALSE ) } for (i in names(out)) { header <- switch(i, conditional = , fixed_conditional = , fixed = "# Fixed Effects (Conditional Model)", fixed_sigma = "# Sigma (fixed effects)", sigma = "# Sigma (fixed effects)", zi = , zero_inflated = , fixed_zero_inflated = , fixed_zi = "# Fixed Effects (Zero-Inflated Model)", random = , random_conditional = "# Random Effects (Conditional Model)", random_zero_inflated = , random_zi = "# Random Effects (Zero-Inflated Model)", smooth_sd = , fixed_smooth_sd = "# Smooth Terms", # blavaan latent = "# Latent Loading", residual = "# Residual Variance", intercept = "# Intercept", regression = "# Regression", # Default paste0("# ", i) ) if ("Parameter" %in% colnames(out[[i]])) { # clean parameters names out[[i]]$Parameter <- gsub("(b_|bs_|bsp_|bcs_)(?!zi_)(.*)", "\\2", out[[i]]$Parameter, perl = TRUE) out[[i]]$Parameter <- gsub("(b_zi_|bs_zi_|bsp_zi_|bcs_zi_)(.*)", "\\2", out[[i]]$Parameter, perl = TRUE) # clean random effect parameters names out[[i]]$Parameter <- gsub("r_(.*)\\.(.*)\\.", "\\1", out[[i]]$Parameter) out[[i]]$Parameter <- gsub("b\\[\\(Intercept\\) (.*)\\]", "\\1", out[[i]]$Parameter) out[[i]]$Parameter <- gsub("b\\[(.*) (.*)\\]", "\\2", out[[i]]$Parameter) # clean smooth terms out[[i]]$Parameter <- gsub("^smooth_sd\\[(.*)\\]", "\\1", out[[i]]$Parameter) out[[i]]$Parameter <- gsub("^sds_", "\\1", out[[i]]$Parameter) # SD out[[i]]$Parameter <- gsub( "(.*)(__Intercept|__zi_Intercept)(.*)", "\\1 (Intercept)\\3", gsub("^sd_(.*)", "SD \\1", out[[i]]$Parameter) ) # remove ".1" etc. suffix out[[i]]$Parameter <- gsub("(.*)(\\.)(\\d)$", "\\1 \\3", out[[i]]$Parameter) # remove "__zi" out[[i]]$Parameter <- gsub("__zi", "", out[[i]]$Parameter, fixed = TRUE) } if (length(out) > 1) { insight::print_color(header, "blue") cat("\n\n") } cat(insight::export_table(out[[i]], digits = digits)) cat("\n") } } bayestestR/R/as.list.R0000644000176200001440000000140214542333405014300 0ustar liggesusers# as.list ----------------------------------------------------------------- #' @export as.list.bayestestR_hdi <- function(x, ...) { if (nrow(x) == 1) { out <- list(CI = x$CI, CI_low = x$CI_low, CI_high = x$CI_high) out$Parameter <- x$Parameter } else { out <- list() for (param in x$Parameter) { out[[param]] <- list() out[[param]][["CI"]] <- x[x$Parameter == param, "CI"] out[[param]][["CI_low"]] <- x[x$Parameter == param, "CI_low"] out[[param]][["CI_high"]] <- x[x$Parameter == param, "CI_high"] } } out } #' @export as.list.bayestestR_eti <- as.list.bayestestR_hdi #' @export as.list.bayestestR_si <- as.list.bayestestR_hdi #' @export as.list.bayestestR_ci <- as.list.bayestestR_hdi bayestestR/R/bayesfactor_inclusion.R0000644000176200001440000001616214765755711017340 0ustar liggesusers#' Inclusion Bayes Factors for testing predictors across Bayesian models #' #' The `bf_*` function is an alias of the main function. For more info, see #' [the Bayes factors vignette](https://easystats.github.io/bayestestR/articles/bayes_factors.html). #' #' @author Mattan S. Ben-Shachar #' @param models An object of class [bayesfactor_models()] or `BFBayesFactor`. #' @param match_models See details. #' @param prior_odds Optional vector of prior odds for the models. See #' `BayesFactor::priorOdds<-`. #' @param ... Arguments passed to or from other methods. #' #' @return a data frame containing the prior and posterior probabilities, and #' log(BF) for each effect (Use `as.numeric()` to extract the non-log Bayes #' factors; see examples). #' #' @details Inclusion Bayes factors answer the question: Are the observed data #' more probable under models with a particular effect, than they are under #' models without that particular effect? In other words, on average - are #' models with effect \eqn{X} more likely to have produced the observed data #' than models without effect \eqn{X}? #' #' \subsection{Match Models}{ #' If `match_models=FALSE` (default), Inclusion BFs are computed by comparing #' all models with a term against all models without that term. If `TRUE`, #' comparison is restricted to models that (1) do not include any interactions #' with the term of interest; (2) for interaction terms, averaging is done only #' across models that containe the main effect terms from which the interaction #' term is comprised. #' } #' #' @inheritSection bayesfactor_parameters Interpreting Bayes Factors #' #' @note Random effects in the `lmer` style are converted to interaction terms: #' i.e., `(X|G)` will become the terms `1:G` and `X:G`. #' #' @seealso [weighted_posteriors()] for Bayesian parameter averaging. #' #' @examplesIf require("BayesFactor") #' library(bayestestR) #' #' # Using bayesfactor_models: #' # ------------------------------ #' mo0 <- lm(Sepal.Length ~ 1, data = iris) #' mo1 <- lm(Sepal.Length ~ Species, data = iris) #' mo2 <- lm(Sepal.Length ~ Species + Petal.Length, data = iris) #' mo3 <- lm(Sepal.Length ~ Species * Petal.Length, data = iris) #' #' BFmodels <- bayesfactor_models(mo1, mo2, mo3, denominator = mo0) #' (bf_inc <- bayesfactor_inclusion(BFmodels)) #' #' as.numeric(bf_inc) #' #' \donttest{ #' # BayesFactor #' # ------------------------------- #' BF <- BayesFactor::generalTestBF(len ~ supp * dose, ToothGrowth, progress = FALSE) #' bayesfactor_inclusion(BF) #' #' # compare only matched models: #' bayesfactor_inclusion(BF, match_models = TRUE) #' } #' #' @references #' - Hinne, M., Gronau, Q. F., van den Bergh, D., and Wagenmakers, E. (2019, March 25). #' A conceptual introduction to Bayesian Model Averaging. \doi{10.31234/osf.io/wgb64} #' #' - Clyde, M. A., Ghosh, J., & Littman, M. L. (2011). Bayesian adaptive sampling #' for variable selection and model averaging. Journal of Computational and Graphical Statistics, #' 20(1), 80-101. #' #' - Mathot, S. (2017). Bayes like a Baws: Interpreting Bayesian Repeated Measures in JASP. #' [Blog post](https://www.cogsci.nl/blog/interpreting-bayesian-repeated-measures-in-jasp). #' #' @export bayesfactor_inclusion <- function(models, match_models = FALSE, prior_odds = NULL, ...) { UseMethod("bayesfactor_inclusion") } #' @rdname bayesfactor_inclusion #' @export bf_inclusion <- bayesfactor_inclusion #' @export bayesfactor_inclusion.bayesfactor_models <- function(models, match_models = FALSE, prior_odds = NULL, ...) { if (isTRUE(attr(models, "unsupported_models"))) { insight::format_error( "Can not compute inclusion Bayes factors - passed models are not (yet) supported." ) } # Build Models Table # df.model <- .get_model_table(models, priorOdds = prior_odds) effnames <- colnames(df.model)[-(1:3)] # Build Interaction Matrix # if (isTRUE(match_models)) { effects.matrix <- as.matrix(df.model[, -(1:3)]) df.interaction <- data.frame(effnames, stringsAsFactors = FALSE) for (eff in effnames) { df.interaction[, eff] <- sapply(effnames, .includes_interaction, effnames = eff) } rownames(df.interaction) <- effnames df.interaction <- as.matrix(df.interaction[, -1]) } # Build Effect Table # df.effect <- data.frame( effnames, Pinc = rep(NA, length(effnames)), PincD = rep(NA, length(effnames)), log_BF = rep(NA, length(effnames)), stringsAsFactors = FALSE ) for (eff in effnames) { if (isTRUE(match_models)) { idx1 <- df.interaction[eff, ] idx2 <- df.interaction[, eff] has_not_high_order_interactions <- !apply(effects.matrix[, idx1, drop = FALSE], 1, any) ind_include <- has_not_high_order_interactions & effects.matrix[, eff] ind_exclude <- apply(effects.matrix[, idx2, drop = FALSE], 1, all) & has_not_high_order_interactions & !effects.matrix[, eff] df.model_temp <- df.model[ind_include | ind_exclude, , drop = FALSE] } else { df.model_temp <- df.model } # models with effect mwith <- which(df.model_temp[[eff]]) mwithprior <- sum(df.model_temp[mwith, "priorProbs"]) mwithpost <- sum(df.model_temp[mwith, "postProbs"]) # models without effect mwithoutprior <- sum(df.model_temp[-mwith, "priorProbs"]) mwithoutpost <- sum(df.model_temp[-mwith, "postProbs"]) # Save results df.effect$Pinc[effnames == eff] <- mwithprior df.effect$PincD[effnames == eff] <- mwithpost df.effect$log_BF[effnames == eff] <- (log(mwithpost) - log(mwithoutpost)) - (log(mwithprior) - log(mwithoutprior)) } df.effect <- df.effect[, -1, drop = FALSE] colnames(df.effect) <- c("p_prior", "p_posterior", "log_BF") rownames(df.effect) <- effnames class(df.effect) <- c("bayesfactor_inclusion", class(df.effect)) attr(df.effect, "matched") <- match_models attr(df.effect, "priorOdds") <- prior_odds df.effect } #' @export bayesfactor_inclusion.BFBayesFactor <- function(models, match_models = FALSE, prior_odds = NULL, ...) { models <- bayesfactor_models.BFBayesFactor(models) bayesfactor_inclusion.bayesfactor_models(models, match_models = match_models, prior_odds = prior_odds ) } #' @keywords internal .includes_interaction <- function(eff, effnames) { eff_b <- strsplit(eff, ":", fixed = TRUE) effnames_b <- strsplit(effnames, ":", fixed = TRUE) is_int <- lengths(effnames_b) > 1 temp <- logical(length(effnames)) for (rr in seq_along(effnames)) { if (is_int[rr]) { temp[rr] <- all(eff_b[[1]] %in% effnames_b[[rr]]) & !all(effnames_b[[rr]] %in% eff_b[[1]]) } } temp } bayestestR/R/point_estimate.R0000644000176200001440000003357415052646230015766 0ustar liggesusers#' Point-estimates of posterior distributions #' #' Compute various point-estimates, such as the mean, the median or the MAP, to describe posterior distributions. #' #' @param centrality The point-estimates (centrality indices) to compute. Character #' (vector) or list with one or more of these options: `"median"`, `"mean"`, `"MAP"` #' (see [`map_estimate()`]), `"trimmed"` (which is just `mean(x, trim = threshold)`), #' `"mode"` or `"all"`. #' @param dispersion Logical, if `TRUE`, computes indices of dispersion related #' to the estimate(s) (`SD` and `MAD` for `mean` and `median`, respectively). #' Dispersion is not available for `"MAP"` or `"mode"` centrality indices. #' @param threshold For `centrality = "trimmed"` (i.e. trimmed mean), indicates #' the fraction (0 to 0.5) of observations to be trimmed from each end of the #' vector before the mean is computed. #' @param ... Additional arguments to be passed to or from methods. #' @inheritParams hdi #' #' @inheritSection hdi Model components #' #' @references Makowski, D., Ben-Shachar, M. S., Chen, S. H. A., and Lüdecke, D. #' (2019). *Indices of Effect Existence and Significance in the Bayesian Framework*. #' Frontiers in Psychology 2019;10:2767. \doi{10.3389/fpsyg.2019.02767} #' #' @note There is also a [`plot()`-method](https://easystats.github.io/see/articles/bayestestR.html) implemented in the \href{https://easystats.github.io/see/}{\pkg{see}-package}. #' #' @examplesIf require("rstanarm") && require("emmeans") && require("brms") && require("BayesFactor") #' library(bayestestR) #' #' point_estimate(rnorm(1000)) #' point_estimate(rnorm(1000), centrality = "all", dispersion = TRUE) #' point_estimate(rnorm(1000), centrality = c("median", "MAP")) #' #' df <- data.frame(replicate(4, rnorm(100))) #' point_estimate(df, centrality = "all", dispersion = TRUE) #' point_estimate(df, centrality = c("median", "MAP")) #' \donttest{ #' # rstanarm models #' # ----------------------------------------------- #' model <- rstanarm::stan_glm(mpg ~ wt + cyl, data = mtcars) #' point_estimate(model, centrality = "all", dispersion = TRUE) #' point_estimate(model, centrality = c("median", "MAP")) #' #' #' # emmeans estimates #' # ----------------------------------------------- #' point_estimate( #' emmeans::emtrends(model, ~1, "wt", data = mtcars), #' centrality = c("median", "MAP") #' ) #' #' # brms models #' # ----------------------------------------------- #' model <- brms::brm(mpg ~ wt + cyl, data = mtcars) #' point_estimate(model, centrality = "all", dispersion = TRUE) #' point_estimate(model, centrality = c("median", "MAP")) #' #' # BayesFactor objects #' # ----------------------------------------------- #' bf <- BayesFactor::ttestBF(x = rnorm(100, 1, 1)) #' point_estimate(bf, centrality = "all", dispersion = TRUE) #' point_estimate(bf, centrality = c("median", "MAP")) #' } #' #' @export point_estimate <- function(x, ...) { UseMethod("point_estimate") } #' @export point_estimate.default <- function(x, ...) { insight::format_error( paste0("'point_estimate()' is not yet implemented for objects of class '", class(x)[1], "'.") ) } #' @rdname point_estimate #' @export point_estimate.numeric <- function(x, centrality = "all", dispersion = FALSE, threshold = 0.1, ...) { centrality <- match.arg(tolower(centrality), c("median", "mean", "map", "trimmed", "mode", "all"), several.ok = TRUE) if ("all" %in% centrality) { estimate_list <- c("median", "mean", "map") } else { estimate_list <- centrality } out <- data.frame(.temp = 0) # Median if ("median" %in% estimate_list) { out$Median <- stats::median(x) if (dispersion) { out$MAD <- stats::mad(x) } } # Mean if ("mean" %in% estimate_list) { out$Mean <- mean(x) if (dispersion) { out$SD <- stats::sd(x) } } # trimmed mean if ("trimmed" %in% estimate_list) { out$Trimmed_Mean <- mean(x, trim = threshold) if (dispersion) { out$SD <- stats::sd(x) } } # MAP if ("map" %in% estimate_list) { out$MAP <- as.numeric(map_estimate(x, ...)) } # MODE if ("mode" %in% estimate_list) { out$Mode <- .mode_estimate(x) } out <- out[names(out) != ".temp"] attr(out, "data") <- x attr(out, "centrality") <- centrality class(out) <- unique(c("point_estimate", "see_point_estimate", class(out))) out } #' @export #' @rdname point_estimate #' @inheritParams p_direction point_estimate.data.frame <- function(x, centrality = "all", dispersion = FALSE, threshold = 0.1, rvar_col = NULL, ...) { x_rvar <- .possibly_extract_rvar_col(x, rvar_col) if (length(x_rvar) > 0L) { cl <- match.call() cl[[1]] <- bayestestR::point_estimate cl$x <- x_rvar cl$rvar_col <- NULL out <- eval.parent(cl) obj_name <- insight::safe_deparse_symbol(substitute(x)) attr(out, "object_name") <- sprintf('%s[["%s"]]', obj_name, rvar_col) return(.append_datagrid(out, x)) } x <- .select_nums(x) if (ncol(x) == 1) { estimates <- point_estimate(x[, 1], centrality = centrality, dispersion = dispersion, threshold = threshold, ...) } else { estimates <- sapply(x, point_estimate, centrality = centrality, dispersion = dispersion, simplify = FALSE, ...) estimates <- do.call(rbind, estimates) } out <- cbind(data.frame(Parameter = names(x), stringsAsFactors = FALSE), estimates) rownames(out) <- NULL attr(out, "data") <- x attr(out, "centrality") <- centrality class(out) <- unique(c("point_estimate", "see_point_estimate", class(out))) out } #' @export point_estimate.draws <- function(x, centrality = "all", dispersion = FALSE, threshold = 0.1, ...) { point_estimate( .posterior_draws_to_df(x), centrality = centrality, dispersion = dispersion, threshold = threshold, ... ) } #' @export point_estimate.rvar <- point_estimate.draws #' @export point_estimate.mcmc <- function(x, centrality = "all", dispersion = FALSE, ...) { point_estimate(as.data.frame(x), centrality = centrality, dispersion = dispersion, ...) } #' @export point_estimate.bcplm <- function(x, centrality = "all", dispersion = FALSE, ...) { point_estimate(insight::get_parameters(x), centrality = centrality, dispersion = dispersion, ...) } #' @export point_estimate.bayesQR <- point_estimate.bcplm #' @export point_estimate.blrm <- point_estimate.bcplm #' @export point_estimate.mcmc.list <- point_estimate.bcplm #' @export point_estimate.BGGM <- point_estimate.bcplm #' @export point_estimate.bamlss <- function(x, centrality = "all", dispersion = FALSE, component = "conditional", ...) { out <- point_estimate( insight::get_parameters(x, component = component), centrality = centrality, dispersion = dispersion, ... ) .add_clean_parameters_attribute(out, x) } #' @export point_estimate.MCMCglmm <- function(x, centrality = "all", dispersion = FALSE, ...) { nF <- x$Fixed$nfl point_estimate( as.data.frame(x$Sol[, 1:nF, drop = FALSE]), centrality = centrality, dispersion = dispersion, ... ) } #' @export point_estimate.emmGrid <- function(x, centrality = "all", dispersion = FALSE, ...) { xdf <- insight::get_parameters(x) out <- point_estimate(xdf, centrality = centrality, dispersion = dispersion, ...) out <- .append_datagrid(out, x) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) out } #' @export point_estimate.emm_list <- point_estimate.emmGrid #' @export point_estimate.slopes <- function(x, centrality = "all", dispersion = FALSE, ...) { xrvar <- .get_marginaleffects_draws(x) out <- point_estimate(xrvar, centrality = centrality, dispersion = dispersion, ...) out <- .append_datagrid(out, x) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) out } #' @export point_estimate.comparisons <- point_estimate.slopes #' @export point_estimate.predictions <- point_estimate.slopes #' @export point_estimate.stanreg <- function(x, centrality = "all", dispersion = FALSE, effects = "fixed", component = "location", parameters = NULL, ...) { cleaned_parameters <- .get_cleaned_parameters(x, ...) out <- .prepare_output( point_estimate( insight::get_parameters( x, effects = effects, component = component, parameters = parameters ), centrality = centrality, dispersion = dispersion, ... ), cleaned_parameters, inherits(x, "stanmvreg") ) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) attr(out, "centrality") <- centrality attr(out, "clean_parameters") <- cleaned_parameters class(out) <- unique(c("point_estimate", "see_point_estimate", class(out))) out } #' @export point_estimate.stanfit <- point_estimate.stanreg #' @export point_estimate.blavaan <- point_estimate.stanreg #' @rdname point_estimate #' @export point_estimate.brmsfit <- function(x, centrality = "all", dispersion = FALSE, effects = "fixed", component = "conditional", parameters = NULL, ...) { cleaned_parameters <- .get_cleaned_parameters(x, ...) out <- .prepare_output( point_estimate( insight::get_parameters( x, effects = effects, component = component, parameters = parameters, ... ), centrality = centrality, dispersion = dispersion, ... ), cleaned_parameters ) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) attr(out, "centrality") <- centrality attr(out, "clean_parameters") <- cleaned_parameters class(out) <- unique(c("point_estimate", "see_point_estimate", class(out))) out } #' @export point_estimate.sim.merMod <- function(x, centrality = "all", dispersion = FALSE, effects = "fixed", parameters = NULL, ...) { out <- .point_estimate_models( x = x, effects = effects, component = "conditional", parameters = parameters, centrality = centrality, dispersion = dispersion, ... ) attr(out, "data") <- insight::get_parameters( x, effects = effects, parameters = parameters ) attr(out, "centrality") <- centrality out <- .add_clean_parameters_attribute(out, x) class(out) <- unique(c("point_estimate", "see_point_estimate", class(out))) out } #' @export point_estimate.sim <- function(x, centrality = "all", dispersion = FALSE, parameters = NULL, ...) { out <- .point_estimate_models( x = x, effects = "fixed", component = "conditional", parameters = parameters, centrality = centrality, dispersion = dispersion, ... ) attr(out, "data") <- insight::get_parameters(x, parameters = parameters) attr(out, "centrality") <- centrality class(out) <- unique(c("point_estimate", "see_point_estimate", class(out))) out } #' @export point_estimate.BFBayesFactor <- function(x, centrality = "all", dispersion = FALSE, ...) { out <- point_estimate(insight::get_parameters(x), centrality = centrality, dispersion = dispersion, ...) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) attr(out, "centrality") <- centrality class(out) <- unique(c("point_estimate", "see_point_estimate", class(out))) out } #' @export point_estimate.matrix <- function(x, ...) { point_estimate(as.data.frame(x), ...) } #' @rdname point_estimate #' @export point_estimate.get_predicted <- function(x, centrality = "all", dispersion = FALSE, use_iterations = FALSE, verbose = TRUE, ...) { if (isTRUE(use_iterations)) { if ("iterations" %in% names(attributes(x))) { out <- point_estimate( as.data.frame(t(attributes(x)$iterations)), centrality = centrality, dispersion = dispersion, verbose = verbose, ... ) } else { insight::format_error("No iterations present in the output.") } attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) } else { out <- point_estimate(as.numeric(x), centrality = centrality, dispersion = dispersion, verbose = verbose, ... ) } out } # Helper ------------------------------------------------------------------ #' @keywords internal .point_estimate_models <- function(x, effects, component, parameters, centrality = "all", dispersion = FALSE, ...) { point_estimate( insight::get_parameters(x, effects = effects, component = component, parameters = parameters), centrality = centrality, dispersion = dispersion, ... ) } #' @keywords internal .mode_estimate <- function(x) { ux <- unique(x) ux[which.max(tabulate(match(x, ux)))] } bayestestR/R/simulate_priors.R0000644000176200001440000000664314765755711016177 0ustar liggesusers#' Returns Priors of a Model as Empirical Distributions #' #' Transforms priors information to actual distributions. #' #' @inheritParams effective_sample #' @param n Size of the simulated prior distributions. #' @inheritParams hdi #' #' @seealso [`unupdate()`] for directly sampling from the prior #' distribution (useful for complex priors and designs). #' #' @examples #' \donttest{ #' library(bayestestR) #' if (require("rstanarm")) { #' model <- suppressWarnings( #' stan_glm(mpg ~ wt + am, data = mtcars, chains = 1, refresh = 0) #' ) #' simulate_prior(model) #' } #' } #' @export simulate_prior <- function(model, n = 1000, ...) { UseMethod("simulate_prior") } #' @export simulate_prior.stanreg <- function(model, n = 1000, effects = "fixed", component = "location", parameters = NULL, verbose = TRUE, ...) { priors <- insight::get_priors( model, effects = effects, component = component, parameters = parameters, verbose = verbose ) .simulate_prior(priors, n = n, verbose = verbose) } #' @export simulate_prior.blavaan <- simulate_prior.stanreg #' @rdname simulate_prior #' @export simulate_prior.brmsfit <- function(model, n = 1000, effects = "fixed", component = "conditional", parameters = NULL, verbose = TRUE, ...) { priors <- insight::get_priors( model, effects = effects, component = component, parameters = parameters, verbose = verbose ) .simulate_prior(priors, n = n, verbose = verbose) } #' @export simulate_prior.bcplm <- function(model, n = 1000, verbose = TRUE, ...) { .simulate_prior(insight::get_priors(model, verbose = verbose), n = n, verbose = verbose) } #' @keywords internal .simulate_prior <- function(priors, n = 1000, verbose = TRUE) { simulated <- data.frame(.bamboozled = 1:n) sim_error_msg <- FALSE # iterate over parameters for (param in priors$Parameter) { prior <- priors[priors$Parameter == param, ] # edge cases if (nrow(prior) > 1) { prior <- prior[1, ] } # Get actual scale if ("Adjusted_Scale" %in% names(prior)) { scale <- prior$Adjusted_Scale # is autoscale = FALSE, scale contains NA values - replace # with non-adjusted then. if (anyNA(scale)) scale[is.na(scale)] <- prior$Scale[is.na(scale)] } else { scale <- prior$Scale } # Simulate prior prior <- tryCatch( { if (prior$Distribution %in% c("t", "student_t", "Student's t")) { distribution(prior$Distribution, n, prior$df, prior$Location) } else { distribution(prior$Distribution, n, prior$Location, scale) } }, error = function(e) { sim_error_msg <- TRUE NA } ) simulated[param] <- prior } if (sim_error_msg && verbose) { insight::format_warning(paste0("Can't simulate priors from a ", prior$Distribution, " distribution.")) } simulated$.bamboozled <- NULL simulated } bayestestR/R/describe_prior.R0000644000176200001440000000645214765755711015747 0ustar liggesusers#' Describe Priors #' #' Returns a summary of the priors used in the model. #' #' @param model A Bayesian model. #' @param ... Currently not used. #' @inheritParams describe_posterior #' #' @examples #' \donttest{ #' library(bayestestR) #' #' # rstanarm models #' # ----------------------------------------------- #' if (require("rstanarm")) { #' model <- rstanarm::stan_glm(mpg ~ wt + cyl, data = mtcars) #' describe_prior(model) #' } #' #' # brms models #' # ----------------------------------------------- #' if (require("brms")) { #' model <- brms::brm(mpg ~ wt + cyl, data = mtcars) #' describe_prior(model) #' } #' #' # BayesFactor objects #' # ----------------------------------------------- #' if (require("BayesFactor")) { #' bf <- ttestBF(x = rnorm(100, 1, 1)) #' describe_prior(bf) #' } #' } #' @export describe_prior <- function(model, ...) { UseMethod("describe_prior") } #' @rdname describe_prior #' @export describe_prior.brmsfit <- function(model, parameters = NULL, ...) { .describe_prior(model, parameters = parameters, ...) } # Internal ---------------------------------------------------------------- #' @keywords internal .describe_prior <- function(model, parameters = NULL, ...) { priors <- insight::get_priors(model, ...) # Format names names(priors)[-1] <- paste0("Prior_", names(priors)[-1]) # If the prior scale has been adjusted, it is the actual scale that was used. if ("Prior_Adjusted_Scale" %in% names(priors)) { priors$Prior_Scale[!is.na(priors$Prior_Adjusted_Scale)] <- priors$Prior_Adjusted_Scale[!is.na(priors$Prior_Adjusted_Scale)] # nolint priors$Prior_Adjusted_Scale <- NULL } if ("Prior_Response" %in% names(priors)) { names(priors)[names(priors) == "Prior_Response"] <- "Response" } # make sure parameter names match between prior output and model cp <- insight::clean_parameters(model) ## TODO for now, only fixed effects if ("Effects" %in% names(cp)) { cp <- cp[cp$Effects == "fixed", ] } if (!is.null(parameters) && !all(priors$Parameter %in% parameters)) { cp$Cleaned_Parameter <- gsub("(.*)(\\.|\\[)\\d+(\\.|\\])", "\\1", cp$Cleaned_Parameter) cp$Cleaned_Parameter[cp$Cleaned_Parameter == "Intercept"] <- "(Intercept)" colnames(priors)[1] <- "Cleaned_Parameter" out <- merge(cp, priors, by = "Cleaned_Parameter", all = TRUE) out <- out[!duplicated(out$Parameter), ] priors <- out[intersect(colnames(out), c("Parameter", "Prior_Distribution", "Prior_df", "Prior_Location", "Prior_Scale", "Response"))] # nolint } priors } #' @export describe_prior.stanreg <- .describe_prior #' @export describe_prior.bcplm <- .describe_prior #' @export describe_prior.blavaan <- .describe_prior #' @export describe_prior.BFBayesFactor <- function(model, ...) { priors <- insight::get_priors(model) # Format names names(priors)[-1] <- paste0("Prior_", names(priors)[-1]) priors } # unsupported ---------------- #' @export describe_prior.BGGM <- function(model, ...) { NULL } #' @export describe_prior.BGGM <- describe_prior.BGGM #' @export describe_prior.bamlss <- describe_prior.BGGM #' @export describe_prior.draws <- describe_prior.BGGM #' @export describe_prior.rvar <- describe_prior.BGGM bayestestR/R/mcse.R0000644000176200001440000000535214765755711013701 0ustar liggesusers#' Monte-Carlo Standard Error (MCSE) #' #' This function returns the Monte Carlo Standard Error (MCSE). #' #' @inheritParams effective_sample #' #' @inheritSection hdi Model components #' #' @details **Monte Carlo Standard Error (MCSE)** is another measure of #' accuracy of the chains. It is defined as standard deviation of the chains #' divided by their effective sample size (the formula for `mcse()` is #' from Kruschke 2015, p. 187). The MCSE \dQuote{provides a quantitative #' suggestion of how big the estimation noise is}. #' #' @references Kruschke, J. (2014). Doing Bayesian data analysis: A tutorial with R, JAGS, and Stan. Academic Press. #' #' @examplesIf require("rstanarm") #' \donttest{ #' library(bayestestR) #' #' model <- suppressWarnings( #' rstanarm::stan_glm(mpg ~ wt + am, data = mtcars, chains = 1, refresh = 0) #' ) #' mcse(model) #' } #' @export mcse <- function(model, ...) { UseMethod("mcse") } #' @export mcse.brmsfit <- function(model, effects = "fixed", component = "conditional", parameters = NULL, ...) { # check arguments params <- insight::get_parameters( model, effects = effects, component = component, parameters = parameters ) ess <- effective_sample( model, effects = effects, component = component, parameters = parameters ) .mcse(params, stats::setNames(ess$ESS, ess$Parameter)) } #' @rdname mcse #' @export mcse.stanreg <- function(model, effects = "fixed", component = "location", parameters = NULL, ...) { params <- insight::get_parameters( model, effects = effects, component = component, parameters = parameters ) ess <- effective_sample( model, effects = effects, component = component, parameters = parameters ) .mcse(params, stats::setNames(ess$ESS, ess$Parameter)) } #' @export mcse.stanfit <- mcse.stanreg #' @export mcse.blavaan <- mcse.stanreg #' @keywords internal .mcse <- function(params, ess) { # get standard deviations from posterior samples stddev <- sapply(params, stats::sd) # check proper length, and for unequal length, shorten all # objects to common parameters if (length(stddev) != length(ess)) { common <- stats::na.omit(match(names(stddev), names(ess))) stddev <- stddev[common] ess <- ess[common] params <- params[common] } # compute mcse data.frame( Parameter = colnames(params), MCSE = stddev / sqrt(ess), stringsAsFactors = FALSE, row.names = NULL ) } bayestestR/R/contr.equalprior.R0000644000176200001440000001573614746106624016260 0ustar liggesusers#' Contrast Matrices for Equal Marginal Priors in Bayesian Estimation #' #' Build contrasts for factors with equal marginal priors on all levels. The 3 #' functions give the same orthogonal contrasts, but are scaled differently to #' allow different prior specifications (see 'Details'). Implementation from #' Singmann & Gronau's [`bfrms`](https://github.com/bayesstuff/bfrms/), #' following the description in Rouder, Morey, Speckman, & Province (2012, p. #' 363). #' #' @inheritParams stats::contr.treatment #' #' @details #' When using [`stats::contr.treatment`], each dummy variable is the difference #' between each level and the reference level. While this is useful if setting #' different priors for each coefficient, it should not be used if one is trying #' to set a general prior for differences between means, as it (as well as #' [`stats::contr.sum`] and others) results in unequal marginal priors on the #' means the the difference between them. #' #' ``` #' library(brms) #' #' data <- data.frame( #' group = factor(rep(LETTERS[1:4], each = 3)), #' y = rnorm(12) #' ) #' #' contrasts(data$group) # R's default contr.treatment #' #> B C D #' #> A 0 0 0 #' #> B 1 0 0 #' #> C 0 1 0 #' #> D 0 0 1 #' #' model_prior <- brm( #' y ~ group, data = data, #' sample_prior = "only", #' # Set the same priors on the 3 dummy variable #' # (Using an arbitrary scale) #' prior = set_prior("normal(0, 10)", coef = c("groupB", "groupC", "groupD")) #' ) #' #' est <- emmeans::emmeans(model_prior, pairwise ~ group) #' #' point_estimate(est, centr = "mean", disp = TRUE) #' #> Point Estimate #' #> #' #> Parameter | Mean | SD #' #> ------------------------- #' #> A | -0.01 | 6.35 #' #> B | -0.10 | 9.59 #' #> C | 0.11 | 9.55 #' #> D | -0.16 | 9.52 #' #> A - B | 0.10 | 9.94 #' #> A - C | -0.12 | 9.96 #' #> A - D | 0.15 | 9.87 #' #> B - C | -0.22 | 14.38 #' #> B - D | 0.05 | 14.14 #' #> C - D | 0.27 | 14.00 #' ``` #' #' We can see that the priors for means aren't all the same (`A` having a more #' narrow prior), and likewise for the pairwise differences (priors for #' differences from `A` are more narrow). #' #' The solution is to use one of the methods provided here, which *do* result in #' marginally equal priors on means differences between them. Though this will #' obscure the interpretation of parameters, setting equal priors on means and #' differences is important for they are useful for specifying equal priors on #' all means in a factor and their differences correct estimation of Bayes #' factors for contrasts and order restrictions of multi-level factors (where #' `k>2`). See info on specifying correct priors for factors with more than 2 #' levels in [the Bayes factors vignette](https://easystats.github.io/bayestestR/articles/bayes_factors.html). #' #' ***NOTE:*** When setting priors on these dummy variables, always: #' 1. Use priors that are **centered on 0**! Other location/centered priors are meaningless! #' 2. Use **identically-scaled priors** on all the dummy variables of a single factor! #' #' `contr.equalprior` returns the original orthogonal-normal contrasts as #' described in Rouder, Morey, Speckman, & Province (2012, p. 363). Setting #' `contrasts = FALSE` returns the \eqn{I_{n} - \frac{1}{n}} matrix. #' #' ## `contr.equalprior_pairs` #' #' Useful for setting priors in terms of pairwise differences between means - #' the scales of the priors defines the prior distribution of the pair-wise #' differences between all pairwise differences (e.g., `A - B`, `B - C`, etc.). #' #' ``` #' contrasts(data$group) <- contr.equalprior_pairs #' contrasts(data$group) #' #> [,1] [,2] [,3] #' #> A 0.0000000 0.6123724 0.0000000 #' #> B -0.1893048 -0.2041241 0.5454329 #' #> C -0.3777063 -0.2041241 -0.4366592 #' #> D 0.5670111 -0.2041241 -0.1087736 #' #' model_prior <- brm( #' y ~ group, data = data, #' sample_prior = "only", #' # Set the same priors on the 3 dummy variable #' # (Using an arbitrary scale) #' prior = set_prior("normal(0, 10)", coef = c("group1", "group2", "group3")) #' ) #' #' est <- emmeans(model_prior, pairwise ~ group) #' #' point_estimate(est, centr = "mean", disp = TRUE) #' #> Point Estimate #' #> #' #> Parameter | Mean | SD #' #> ------------------------- #' #> A | -0.31 | 7.46 #' #> B | -0.24 | 7.47 #' #> C | -0.34 | 7.50 #' #> D | -0.30 | 7.25 #' #> A - B | -0.08 | 10.00 #' #> A - C | 0.03 | 10.03 #' #> A - D | -0.01 | 9.85 #' #> B - C | 0.10 | 10.28 #' #> B - D | 0.06 | 9.94 #' #> C - D | -0.04 | 10.18 #' ``` #' #' All means have the same prior distribution, and the distribution of the #' differences matches the prior we set of `"normal(0, 10)"`. Success! #' #' ## `contr.equalprior_deviations` #' #' Useful for setting priors in terms of the deviations of each mean from the #' grand mean - the scales of the priors defines the prior distribution of the #' distance (above, below) the mean of one of the levels might have from the #' overall mean. (See examples.) #' #' #' @references #' Rouder, J. N., Morey, R. D., Speckman, P. L., & Province, J. M. (2012). #' Default Bayes factors for ANOVA designs. *Journal of Mathematical #' Psychology*, 56(5), 356-374. https://doi.org/10.1016/j.jmp.2012.08.001 #' #' @return A `matrix` with n rows and k columns, with k=n-1 if contrasts is #' `TRUE` and k=n if contrasts is `FALSE`. #' #' @aliases contr.bayes contr.orthonorm #' #' @examples #' contr.equalprior(2) # Q_2 in Rouder et al. (2012, p. 363) #' #' contr.equalprior(5) # equivalent to Q_5 in Rouder et al. (2012, p. 363) #' #' ## check decomposition #' Q3 <- contr.equalprior(3) #' Q3 %*% t(Q3) ## 2/3 on diagonal and -1/3 on off-diagonal elements #' @export contr.equalprior <- function(n, contrasts = TRUE, sparse = FALSE) { contr <- stats::contr.treatment(n, contrasts = FALSE, base = 1, sparse = sparse & !contrasts ) k <- nrow(contr) contr <- contr - 1 / k if (contrasts) { contr <- eigen(contr)$vectors[, seq_len(k - 1), drop = FALSE] } contr } #' @export #' @rdname contr.equalprior contr.equalprior_pairs <- function(n, contrasts = TRUE, sparse = FALSE) { contr <- contr.equalprior(n, contrasts, sparse) / sqrt(2) contr } #' @export #' @rdname contr.equalprior contr.equalprior_deviations <- function(n, contrasts = TRUE, sparse = FALSE) { contr <- contr.equalprior(n, contrasts, sparse) n <- nrow(contr) contr / sqrt(1 - 1 / n) } # OLD ------------------------------ #' @export contr.orthonorm <- function(n, contrasts = TRUE) { .Deprecated(new = "contr.equalprior", old = "contr.orthonorm") contr.equalprior(n, contrasts = contrasts) } #' @export contr.bayes <- function(n, contrasts = TRUE) { .Deprecated(new = "contr.equalprior", old = "contr.bayes") contr.equalprior(n, contrasts = contrasts) } bayestestR/R/reshape_iterations.R0000644000176200001440000000510714542333405016621 0ustar liggesusers#' Reshape estimations with multiple iterations (draws) to long format #' #' Reshape a wide data.frame of iterations (such as posterior draws or #' bootsrapped samples) as columns to long format. Instead of having all #' iterations as columns (e.g., `iter_1, iter_2, ...`), will return 3 columns #' with the `\*_index` (the previous index of the row), the `\*_group` (the #' iteration number) and the `\*_value` (the value of said iteration). #' #' @param x A data.frame containing posterior draws obtained from #' `estimate_response` or `estimate_link`. #' @param prefix The prefix of the draws (for instance, `"iter_"` for columns #' named as `iter_1, iter_2, iter_3`). If more than one are provided, will #' search for the first one that matches. #' @examples #' \donttest{ #' if (require("rstanarm")) { #' model <- stan_glm(mpg ~ am, data = mtcars, refresh = 0) #' draws <- insight::get_predicted(model) #' long_format <- reshape_iterations(draws) #' head(long_format) #' } #' } #' @return Data frame of reshaped draws in long format. #' @export reshape_iterations <- function(x, prefix = c("draw", "iter", "iteration", "sim")) { # Accomodate output from get_predicted if (inherits(x, "get_predicted") && "iterations" %in% names(attributes(x))) { x <- as.data.frame(x) } # Find columns' name prefix <- prefix[min(which(sapply(tolower(prefix), function(prefix) sum(grepl(prefix, tolower(names(x)), fixed = TRUE)) > 1)))] if (is.na(prefix) || is.null(prefix)) { insight::format_error( "Couldn't find columns corresponding to iterations in your dataframe, please specify the correct prefix." ) } # Get column names iter_cols <- tolower(names(x))[grepl(prefix, tolower(names(x)), fixed = TRUE)] # Drop "_" if prefix ends with it newname <- ifelse(endsWith(prefix, "_"), substr(prefix, 1, nchar(prefix) - 1), prefix) # Create Index column index_col <- paste0(newname, "_index") if (index_col %in% names(x)) index_col <- paste0(".", newname, "_index") x[[index_col]] <- seq_len(nrow(x)) # Reshape long <- stats::reshape(x, varying = iter_cols, idvar = index_col, v.names = paste0(newname, "_value"), timevar = paste0(newname, "_group"), direction = "long" ) row.names(long) <- NULL class(long) <- class(long)[which(inherits(long, "data.frame")):length(class(long))] long } #' @rdname reshape_iterations #' @export reshape_draws <- function(x, prefix = c("draw", "iter", "iteration", "sim")) { .Deprecated("reshape_iterations") reshape_iterations(x, prefix) } bayestestR/R/bayesfactor_parameters.R0000644000176200001440000005241615052646230017463 0ustar liggesusers#' Bayes Factors (BF) for a Single Parameter #' #' This method computes Bayes factors against the null (either a point or an #' interval), based on prior and posterior samples of a single parameter. This #' Bayes factor indicates the degree by which the mass of the posterior #' distribution has shifted further away from or closer to the null value(s) #' (relative to the prior distribution), thus indicating if the null value has #' become less or more likely given the observed data. #' \cr \cr #' When the null is an interval, the Bayes factor is computed by comparing the #' prior and posterior odds of the parameter falling within or outside the null #' interval (Morey & Rouder, 2011; Liao et al., 2020); When the null is a point, #' a Savage-Dickey density ratio is computed, which is also an approximation of #' a Bayes factor comparing the marginal likelihoods of the model against a #' model in which the tested parameter has been restricted to the point null #' (Wagenmakers et al., 2010; Heck, 2019). #' \cr \cr #' Note that the `logspline` package is used for estimating densities and #' probabilities, and must be installed for the function to work. #' \cr \cr #' `bayesfactor_pointnull()` and `bayesfactor_rope()` are wrappers #' around `bayesfactor_parameters` with different defaults for the null to #' be tested against (a point and a range, respectively). Aliases of the main #' functions are prefixed with `bf_*`, like `bf_parameters()` or #' `bf_pointnull()`. #' \cr \cr #' \strong{For more info, in particular on specifying correct priors for factors #' with more than 2 levels, see #' [the #' Bayes factors vignette](https://easystats.github.io/bayestestR/articles/bayes_factors.html).} #' #' @param posterior A numerical vector, `stanreg` / `brmsfit` object, #' `emmGrid` or a data frame - representing a posterior distribution(s) #' from (see 'Details'). #' @param prior An object representing a prior distribution (see 'Details'). #' @param direction Test type (see 'Details'). One of `0`, #' `"two-sided"` (default, two tailed), `-1`, `"left"` (left #' tailed) or `1`, `"right"` (right tailed). #' @param null Value of the null, either a scalar (for point-null) or a range #' (for a interval-null). #' @param ... Arguments passed to and from other methods. (Can be used to pass #' arguments to internal [logspline::logspline()].) #' @inheritParams hdi #' #' @inheritSection hdi Model components #' #' @return A data frame containing the (log) Bayes factor representing evidence #' *against* the null (Use `as.numeric()` to extract the non-log Bayes #' factors; see examples). #' #' @note There is also a #' [`plot()`-method](https://easystats.github.io/see/articles/bayestestR.html) #' implemented in the #' \href{https://easystats.github.io/see/}{\pkg{see}-package}. #' #' @details #' This method is used to compute Bayes factors based on prior and posterior #' distributions. #' #' \subsection{One-sided & Dividing Tests (setting an order restriction)}{ #' One sided tests (controlled by `direction`) are conducted by restricting #' the prior and posterior of the non-null values (the "alternative") to one #' side of the null only (\cite{Morey & Wagenmakers, 2014}). For example, if we #' have a prior hypothesis that the parameter should be positive, the #' alternative will be restricted to the region to the right of the null (point #' or interval). For example, for a Bayes factor comparing the "null" of `0-0.1` #' to the alternative `>0.1`, we would set #' `bayesfactor_parameters(null = c(0, 0.1), direction = ">")`. #' \cr\cr #' It is also possible to compute a Bayes factor for **dividing** #' hypotheses - that is, for a null and alternative that are complementary, #' opposing one-sided hypotheses (\cite{Morey & Wagenmakers, 2014}). For #' example, for a Bayes factor comparing the "null" of `<0` to the alternative #' `>0`, we would set `bayesfactor_parameters(null = c(-Inf, 0))`. #' } #' #' @section Setting the correct `prior`: #' For the computation of Bayes factors, the model priors must be proper priors #' (at the very least they should be *not flat*, and it is preferable that #' they be *informative*); As the priors for the alternative get wider, the #' likelihood of the null value(s) increases, to the extreme that for completely #' flat priors the null is infinitely more favorable than the alternative (this #' is called *the Jeffreys-Lindley-Bartlett paradox*). Thus, you should #' only ever try (or want) to compute a Bayes factor when you have an informed #' prior. #' \cr\cr #' (Note that by default, `brms::brm()` uses flat priors for fixed-effects; #' See example below.) #' \cr\cr #' It is important to provide the correct `prior` for meaningful results, #' to match the `posterior`-type input: #' #' - **A numeric vector** - `prior` should also be a _numeric vector_, representing the prior-estimate. #' - **A data frame** - `prior` should also be a _data frame_, representing the prior-estimates, in matching column order. #' - If `rvar_col` is specified, `prior` should be _the name of an `rvar` column_ that represents the prior-estimates. #' - **Supported Bayesian model (`stanreg`, `brmsfit`, etc.)** #' - `prior` should be _a model an equivalent model with MCMC samples from the priors **only**_. See [unupdate()]. #' - If `prior` is set to `NULL`, [unupdate()] is called internally (not supported for `brmsfit_multiple` model). #' - **Output from a `{marginaleffects}` function** - `prior` should also be _an equivalent output_ from a `{marginaleffects}` function based on a prior-model #' (See [unupdate()]). #' - **Output from an `{emmeans}` function** #' - `prior` should also be _an equivalent output_ from an `{emmeans}` function based on a prior-model (See [unupdate()]). #' - `prior` can also be _the original (posterior) model_, in which case the function #' will try to "unupdate" the estimates (not supported if the estimates have undergone #' any transformations -- `"log"`, `"response"`, etc. -- or any `regrid`ing). #' #' @section Interpreting Bayes Factors: #' A Bayes factor greater than 1 can be interpreted as evidence against the #' null, at which one convention is that a Bayes factor greater than 3 can be #' considered as "substantial" evidence against the null (and vice versa, a #' Bayes factor smaller than 1/3 indicates substantial evidence in favor of the #' null-model) (\cite{Wetzels et al. 2011}). #' #' @examplesIf require("logspline") #' library(bayestestR) #' prior <- distribution_normal(1000, mean = 0, sd = 1) #' posterior <- distribution_normal(1000, mean = .5, sd = .3) #' (BF_pars <- bayesfactor_parameters(posterior, prior, verbose = FALSE)) #' #' as.numeric(BF_pars) #' #' @examplesIf require("rstanarm") && require("emmeans") && require("logspline") #' \donttest{ #' # rstanarm models #' # --------------- #' contrasts(sleep$group) <- contr.equalprior_pairs # see vingette #' stan_model <- suppressWarnings(stan_lmer( #' extra ~ group + (1 | ID), #' data = sleep, #' refresh = 0 #' )) #' bayesfactor_parameters(stan_model, verbose = FALSE) #' bayesfactor_parameters(stan_model, null = rope_range(stan_model)) #' #' # emmGrid objects #' # --------------- #' group_diff <- pairs(emmeans(stan_model, ~group, data = sleep)) #' bayesfactor_parameters(group_diff, prior = stan_model, verbose = FALSE) #' #' # Or #' # group_diff_prior <- pairs(emmeans(unupdate(stan_model), ~group)) #' # bayesfactor_parameters(group_diff, prior = group_diff_prior, verbose = FALSE) #' } #' @examplesIf require("brms") && require("logspline") #' # brms models #' # ----------- #' \dontrun{ #' contrasts(sleep$group) <- contr.equalprior_pairs # see vingette #' my_custom_priors <- #' set_prior("student_t(3, 0, 1)", class = "b") + #' set_prior("student_t(3, 0, 1)", class = "sd", group = "ID") #' #' brms_model <- suppressWarnings(brm(extra ~ group + (1 | ID), #' data = sleep, #' prior = my_custom_priors, #' refresh = 0 #' )) #' bayesfactor_parameters(brms_model, verbose = FALSE) #' } #' #' @references #' #' - Wagenmakers, E. J., Lodewyckx, T., Kuriyal, H., and Grasman, R. (2010). #' Bayesian hypothesis testing for psychologists: A tutorial on the #' Savage-Dickey method. Cognitive psychology, 60(3), 158-189. #' #' - Heck, D. W. (2019). A caveat on the Savage–Dickey density ratio: The #' case of computing Bayes factors for regression parameters. British Journal of #' Mathematical and Statistical Psychology, 72(2), 316-333. #' #' - Morey, R. D., & Wagenmakers, E. J. (2014). Simple relation between #' Bayesian order-restricted and point-null hypothesis tests. Statistics & #' Probability Letters, 92, 121-124. #' #' - Morey, R. D., & Rouder, J. N. (2011). Bayes factor approaches for #' testing interval null hypotheses. Psychological methods, 16(4), 406. #' #' - Liao, J. G., Midya, V., & Berg, A. (2020). Connecting and contrasting #' the Bayes factor and a modified ROPE procedure for testing interval null #' hypotheses. The American Statistician, 1-19. #' #' - Wetzels, R., Matzke, D., Lee, M. D., Rouder, J. N., Iverson, G. J., and #' Wagenmakers, E.-J. (2011). Statistical Evidence in Experimental Psychology: #' An Empirical Comparison Using 855 t Tests. Perspectives on Psychological #' Science, 6(3), 291–298. \doi{10.1177/1745691611406923} #' #' @author Mattan S. Ben-Shachar #' #' @export bayesfactor_parameters <- function(posterior, prior = NULL, direction = "two-sided", null = 0, ..., verbose = TRUE) { UseMethod("bayesfactor_parameters") } #' @rdname bayesfactor_parameters #' @export bayesfactor_pointnull <- function(posterior, prior = NULL, direction = "two-sided", null = 0, ..., verbose = TRUE) { if (length(null) > 1L && verbose) { insight::format_alert("`null` is a range - computing a ROPE based Bayes factor.") } bayesfactor_parameters( posterior = posterior, prior = prior, direction = direction, null = null, verbose = verbose, ... ) } #' @rdname bayesfactor_parameters #' @export bayesfactor_rope <- function(posterior, prior = NULL, direction = "two-sided", null = rope_range(posterior, verbose = FALSE), ..., verbose = TRUE) { if (length(null) < 2 && verbose) { insight::format_alert("'null' is a point - computing a Savage-Dickey (point null) Bayes factor.") } bayesfactor_parameters( posterior = posterior, prior = prior, direction = direction, null = null, verbose = verbose, ... ) } #' @rdname bayesfactor_parameters #' @export bf_parameters <- bayesfactor_parameters #' @rdname bayesfactor_parameters #' @export bf_pointnull <- bayesfactor_pointnull #' @rdname bayesfactor_parameters #' @export bf_rope <- bayesfactor_rope #' @rdname bayesfactor_parameters #' @export bayesfactor_parameters.numeric <- function(posterior, prior = NULL, direction = "two-sided", null = 0, ..., verbose = TRUE) { # nm <- insight::safe_deparse(substitute(posterior) if (is.null(prior)) { prior <- posterior if (verbose) { insight::format_warning( "Prior not specified! Please specify a prior (in the form 'prior = distribution_normal(1000, 0, 1)') to get meaningful results." ) } } prior <- data.frame(X = prior) posterior <- data.frame(X = posterior) # colnames(posterior) <- colnames(prior) <- nm # Get BFs sdbf <- bayesfactor_parameters.data.frame( posterior = posterior, prior = prior, direction = direction, null = null, verbose = verbose, ... ) sdbf$Parameter <- NULL sdbf } #' @rdname bayesfactor_parameters #' @export bayesfactor_parameters.stanreg <- function(posterior, prior = NULL, direction = "two-sided", null = 0, effects = "fixed", component = "conditional", parameters = NULL, ..., verbose = TRUE) { cleaned_parameters <- .get_cleaned_parameters(posterior, ...) samps <- .clean_priors_and_posteriors(posterior, prior, effects = effects, component = component, parameters = parameters, verbose = verbose ) # Get BFs temp <- bayesfactor_parameters.data.frame( posterior = samps$posterior, prior = samps$prior, direction = direction, null = null, verbose = verbose, ... ) bf_val <- .prepare_output(temp, cleaned_parameters, inherits(posterior, "stanmvreg")) class(bf_val) <- class(temp) attr(bf_val, "clean_parameters") <- cleaned_parameters # don't change the name of this attribute - it is used only internally for "see" and printing attr(bf_val, "hypothesis") <- attr(temp, "hypothesis") attr(bf_val, "direction") <- attr(temp, "direction") attr(bf_val, "plot_data") <- attr(temp, "plot_data") bf_val } #' @export bayesfactor_parameters.brmsfit <- bayesfactor_parameters.stanreg #' @export bayesfactor_parameters.blavaan <- function(posterior, prior = NULL, direction = "two-sided", null = 0, ..., verbose = TRUE) { cleaned_parameters <- insight::clean_parameters(posterior) samps <- .clean_priors_and_posteriors(posterior, prior, verbose = verbose ) # Get BFs temp <- bayesfactor_parameters.data.frame( posterior = samps$posterior, prior = samps$prior, direction = direction, null = null, verbose = verbose, ... ) bf_val <- .prepare_output(temp, cleaned_parameters) class(bf_val) <- class(temp) attr(bf_val, "clean_parameters") <- cleaned_parameters # don't change the name of this attribute - it is used only internally for "see" and printing attr(bf_val, "hypothesis") <- attr(temp, "hypothesis") attr(bf_val, "direction") <- attr(temp, "direction") attr(bf_val, "plot_data") <- attr(temp, "plot_data") bf_val } #' @export bayesfactor_parameters.emmGrid <- function(posterior, prior = NULL, direction = "two-sided", null = 0, ..., verbose = TRUE) { samps <- .clean_priors_and_posteriors( posterior, prior, verbose = verbose ) # Get BFs out <- bayesfactor_parameters.data.frame( posterior = samps$posterior, prior = samps$prior, direction = direction, null = null, verbose = verbose, ... ) .append_datagrid(out, posterior) } #' @export bayesfactor_parameters.emm_list <- bayesfactor_parameters.emmGrid #' @export bayesfactor_parameters.slopes <- bayesfactor_parameters.emmGrid #' @export bayesfactor_parameters.predictions <- bayesfactor_parameters.emmGrid #' @export bayesfactor_parameters.comparisons <- bayesfactor_parameters.emmGrid #' @rdname bayesfactor_parameters #' @inheritParams p_direction #' @export bayesfactor_parameters.data.frame <- function(posterior, prior = NULL, direction = "two-sided", null = 0, rvar_col = NULL, ..., verbose = TRUE) { x_rvar <- .possibly_extract_rvar_col(posterior, rvar_col) if (length(x_rvar) > 0L) { cl <- match.call() cl[[1]] <- bayestestR::bayesfactor_parameters cl$posterior <- x_rvar cl$rvar_col <- NULL prior_rvar <- .possibly_extract_rvar_col(posterior, prior) if (length(prior_rvar) > 0L) { cl$prior <- prior_rvar } out <- eval.parent(cl) obj_name <- insight::safe_deparse_symbol(substitute(posterior)) attr(out, "object_name") <- sprintf('%s[["%s"]]', obj_name, rvar_col) return(.append_datagrid(out, posterior)) } # find direction direction <- .get_direction(direction) if (is.null(prior)) { prior <- posterior if (verbose) { insight::format_warning( "Prior not specified! Please specify priors (with column order matching 'posterior') to get meaningful results." ) } } if (verbose && length(null) == 1L && (nrow(posterior) < 4e4 || nrow(prior) < 4e4)) { insight::format_warning( "Bayes factors might not be precise.", "For precise Bayes factors, sampling at least 40,000 posterior samples is recommended." ) } sdlogbf <- numeric(ncol(posterior)) for (par in seq_along(posterior)) { sdlogbf[par] <- .logbayesfactor_parameters( posterior[[par]], prior[[par]], direction = direction, null = null, ... ) } bf_val <- data.frame( Parameter = colnames(posterior), log_BF = sdlogbf, stringsAsFactors = FALSE ) class(bf_val) <- unique(c( "bayesfactor_parameters", "see_bayesfactor_parameters", class(bf_val) )) attr(bf_val, "hypothesis") <- null # don't change the name of this attribute - it is used only internally for "see" and printing attr(bf_val, "direction") <- direction attr(bf_val, "plot_data") <- .make_BF_plot_data(posterior, prior, direction, null, ...) bf_val } #' @export bayesfactor_parameters.draws <- function(posterior, prior = NULL, direction = "two-sided", null = 0, ..., verbose = TRUE) { bayesfactor_parameters( .posterior_draws_to_df(posterior), prior = if (!is.null(prior)) .posterior_draws_to_df(prior), direction = direction, null = null, verbose = verbose, ... ) } #' @export bayesfactor_parameters.rvar <- bayesfactor_parameters.draws #' @keywords internal .logbayesfactor_parameters <- function(posterior, prior, direction = 0, null = 0, ...) { stopifnot(length(null) %in% c(1, 2)) if (isTRUE(all.equal(posterior, prior))) { return(0) } insight::check_if_installed("logspline") if (length(null) == 1) { relative_loglikelihood <- function(samples) { f_samples <- .logspline(samples, ...) d_samples <- logspline::dlogspline(null, f_samples, log = TRUE) if (direction < 0) { norm_samples <- logspline::plogspline(null, f_samples) } else if (direction > 0) { norm_samples <- 1 - logspline::plogspline(null, f_samples) } else { norm_samples <- 1 } d_samples - log(norm_samples) } } else if (length(null) == 2) { null <- sort(null) null[is.infinite(null)] <- 1.797693e+308 * sign(null[is.infinite(null)]) relative_loglikelihood <- function(samples) { f_samples <- .logspline(samples, ...) p_samples <- diff(logspline::plogspline(null, f_samples)) if (direction < 0) { norm_samples <- logspline::plogspline(min(null), f_samples) } else if (direction > 0) { norm_samples <- 1 - logspline::plogspline(max(null), f_samples) } else { norm_samples <- 1 - p_samples } log(p_samples) - log(norm_samples) } } relative_loglikelihood(prior) - relative_loglikelihood(posterior) } # Bad Methods ------------------------------------------------------------- #' @export bayesfactor_parameters.bayesfactor_models <- function(...) { insight::format_error( "Oh no, 'bayesfactor_parameters()' does not know how to deal with multiple models :(", "You might want to use 'bayesfactor_inclusion()' here to test specific terms across models." ) } #' @export bayesfactor_parameters.sim <- function(...) { insight::format_error( "Bayes factors are based on the shift from a prior to a posterior.", "Since simulated draws are not based on any priors, computing Bayes factors does not make sense :(", "You might want to try `rope`, `ci`, `pd` or `pmap` for posterior-based inference." ) } #' @export bayesfactor_parameters.sim.merMod <- bayesfactor_parameters.sim bayestestR/R/print.rope.R0000644000176200001440000000617715052646230015041 0ustar liggesusers#' @export print.rope <- function(x, digits = 2, ...) { orig_x <- x # If the model is multivariate, we have have different ROPES depending on # the outcome variable. is_multivariate <- length(unique(x$Response)) > 1 if (isTRUE(is_multivariate)) { insight::print_color(sprintf( "# Proportion%s of samples inside the ROPE.\nROPE with depends on outcome variable.\n\n", ifelse(all(x$CI[1] == x$CI), "", "s") ), "blue") } else { insight::print_color(sprintf( "# Proportion%s of samples inside the ROPE [%.*f, %.*f]:\n\n", ifelse(all(x$CI[1] == x$CI), "", "s"), digits, x$ROPE_low[1], digits, x$ROPE_high[1] ), "blue") } # I think this is something nobody will understand and we'll probably forget # why we did this, so I'll comment a bit... # These are the base columns we want to print cols <- c( attr(x, "idvars"), "Parameter", "ROPE_Percentage", "Superiority_Percentage", "Inferiority_Percentage", "Effects", "Component", if (is_multivariate) c("ROPE_low", "ROPE_high") ) # In case we have ropes for different CIs, we also want this information # So we first check if values in the CI column differ, and if so, we also # keep this column for printing if (!all(x$CI[1] == x$CI)) { cols <- c("CI", cols) } # Either way, we need to know the different CI-values, so we can # split the data frame for printing later... ci <- unique(x$CI) # now we check which of the requested columns are actually in our data frame "x" # "x" may differ, depending on if "rope()" was called with a model-object, # or with a simple vector. So we can't hard-code this x <- subset(x, select = intersect(cols, colnames(x))) # This is just cosmetics, to have nicer column names and values iv <- intersect(colnames(x), c("ROPE_Percentage", "Superiority_Percentage", "Inferiority_Percentage")) x[iv] <- lapply(x[iv], function(v) sprintf("%.*f %%", digits, v * 100)) colnames(x)[colnames(x) == "ROPE_Percentage"] <- "Inside ROPE" colnames(x)[colnames(x) == "Superiority_Percentage"] <- "Above ROPE" colnames(x)[colnames(x) == "Inferiority_Percentage"] <- "Below ROPE" # Add ROPE width for multivariate models if (isTRUE(is_multivariate)) { # This is just cosmetics, to have nicer column names and values x$ROPE_low <- sprintf("[%.*f, %.*f]", digits, x$ROPE_low, digits, x$ROPE_high) colnames(x)[which(colnames(x) == "ROPE_low")] <- "ROPE width" x$ROPE_high <- NULL } # In case we have multiple CI values, we create a subset for each CI value. # Else, parameter-rows would be mixed up with both CIs, which is a bit # more difficult to read... if (length(ci) == 1) { # print complete data frame, because we have no different CI values here .print_data_frame(x, digits = digits) } else { for (i in ci) { xsub <- x[x$CI == i, -which(colnames(x) == "CI"), drop = FALSE] insight::print_color(sprintf("ROPE for the %s%% HDI:\n\n", 100 * i), "cyan") .print_data_frame(xsub, digits = digits) cat("\n") } } invisible(orig_x) } bayestestR/R/bayestestR-package.R0000644000176200001440000000166214542333405016451 0ustar liggesusers#' \code{bayestestR} #' #' @title bayestestR: Describing Effects and their Uncertainty, Existence and #' Significance within the Bayesian Framework #' #' @description #' #' Existing R packages allow users to easily fit a large variety of models #' and extract and visualize the posterior draws. However, most of these #' packages only return a limited set of indices (e.g., point-estimates and #' CIs). **bayestestR** provides a comprehensive and consistent set of #' functions to analyze and describe posterior distributions generated by a #' variety of models objects, including popular modeling packages such as #' **rstanarm**, **brms** or **BayesFactor**. #' #' References: #' #' - Makowski et al. (2019) \doi{10.21105/joss.01541} #' - Makowski et al. (2019) \doi{10.3389/fpsyg.2019.02767} #' #' @docType package #' @aliases bayestestR bayestestR-package #' @name bayestestR-package #' @keywords internal "_PACKAGE" bayestestR/R/utils_clean_stan_parameters.R0000644000176200001440000000174015052646230020502 0ustar liggesusers#' @keywords internal .clean_up_tmp_stanreg <- function(tmp, group, cols, parms) { tmp$Group <- group tmp$Parameter <- rep(parms, each = nrow(tmp) / length(parms)) rownames(tmp) <- NULL tmp <- tmp[, c("Parameter", cols)] # clean random effects notation from parameters # tmp$Parameter <- gsub("b\\[(.*) (.*)\\]", "\\2", tmp$Parameter) tmp } #' @keywords internal .clean_up_tmp_brms <- function(tmp, group, component, cols, parms) { tmp$Group <- group tmp$Component <- component tmp$Parameter <- rep(parms, each = nrow(tmp) / length(parms)) rownames(tmp) <- NULL tmp <- tmp[, c("Parameter", cols)] # clean random effects notation from parameters # tmp$Parameter <- gsub("r_(.*)\\.(.*)\\.", "\\1", tmp$Parameter) tmp } #' @keywords internal .get_cleaned_parameters <- function(x, ...) { dots <- list(...) if ("cleaned_parameters" %in% names(dots)) { return(dots$cleaned_parameters) } insight::clean_parameters(x) } bayestestR/R/bayesfactor.R0000644000176200001440000000640314765755711015252 0ustar liggesusers#' Bayes Factors (BF) #' #' This function compte the Bayes factors (BFs) that are appropriate to the #' input. For vectors or single models, it will compute [`BFs for single #' parameters`][bayesfactor_parameters], or is `hypothesis` is specified, #' [`BFs for restricted models`][bayesfactor_restricted]. For multiple models, #' it will return the BF corresponding to [`comparison between #' models`][bayesfactor_models] and if a model comparison is passed, it will #' compute the [`inclusion BF`][bayesfactor_inclusion]. #' \cr\cr #' For a complete overview of these functions, read the [Bayes factor vignette](https://easystats.github.io/bayestestR/articles/bayes_factors.html). #' #' @param ... A numeric vector, model object(s), or the output from #' `bayesfactor_models`. #' @inheritParams bayesfactor_parameters #' @inheritParams bayesfactor_restricted #' @inheritParams bayesfactor_models #' @inheritParams bayesfactor_inclusion #' #' @return Some type of Bayes factor, depending on the input. See #' [`bayesfactor_parameters()`], [`bayesfactor_models()`] or [`bayesfactor_inclusion()`]. #' #' @note There is also a [`plot()`-method](https://easystats.github.io/see/articles/bayestestR.html) implemented in the \href{https://easystats.github.io/see/}{\pkg{see}-package}. #' #' @examplesIf require("rstanarm") && require("logspline") #' \dontrun{ #' library(bayestestR) #' #' prior <- distribution_normal(1000, mean = 0, sd = 1) #' posterior <- distribution_normal(1000, mean = 0.5, sd = 0.3) #' #' bayesfactor(posterior, prior = prior, verbose = FALSE) #' #' # rstanarm models #' # --------------- #' model <- suppressWarnings(rstanarm::stan_lmer(extra ~ group + (1 | ID), data = sleep)) #' bayesfactor(model, verbose = FALSE) #' #' # Frequentist models #' # --------------- #' m0 <- lm(extra ~ 1, data = sleep) #' m1 <- lm(extra ~ group, data = sleep) #' m2 <- lm(extra ~ group + ID, data = sleep) #' #' comparison <- bayesfactor(m0, m1, m2) #' comparison #' #' bayesfactor(comparison) #' } #' @export bayesfactor <- function(..., prior = NULL, direction = "two-sided", null = 0, hypothesis = NULL, effects = "fixed", verbose = TRUE, denominator = 1, match_models = FALSE, prior_odds = NULL) { mods <- list(...) if (length(mods) > 1) { bayesfactor_models(..., denominator = denominator) } else if (inherits(mods[[1]], "bayesfactor_models")) { bayesfactor_inclusion(..., match_models = match_models, prior_odds = prior_odds) } else if (inherits(mods[[1]], "BFBayesFactor")) { if (inherits(mods[[1]]@numerator[[1]], "BFlinearModel")) { bayesfactor_inclusion(..., match_models = match_models, prior_odds = prior_odds) } else { bayesfactor_models(...) } } else if (is.null(hypothesis)) { bayesfactor_parameters( ..., prior = prior, direction = direction, null = null, effects = effects, verbose = verbose ) } else { bayesfactor_restricted(..., prior = prior, verbose = verbose, effects = effects ) } } bayestestR/R/utils_bayesfactor.R0000644000176200001440000003223614746106624016465 0ustar liggesusers# clean priors and posteriors --------------------------------------------- #' @keywords internal .clean_priors_and_posteriors <- function(posterior, prior, ...) { UseMethod(".clean_priors_and_posteriors") } #' @keywords internal .clean_priors_and_posteriors.stanreg <- function(posterior, prior, verbose = TRUE, ...) { # Get Priors if (is.null(prior)) { prior <- posterior } prior <- try(unupdate(prior, verbose = verbose), silent = TRUE) if (methods::is(prior, "try-error")) { if (grepl("flat priors", prior, fixed = TRUE)) { prior <- paste0( prior, "Could not therefore compute Bayes factors, as these inform about ", "the raltive likelihood of two 'hypotheses', and flat priors provide no ", "likelihood.\n", "See '?bayesfactor_parameters' for more information.\n" ) } insight::format_error(prior) } prior <- insight::get_parameters(prior, ...) posterior <- insight::get_parameters(posterior, ...) list( posterior = posterior, prior = prior ) } #' @keywords internal .clean_priors_and_posteriors.brmsfit <- .clean_priors_and_posteriors.stanreg #' @keywords internal .clean_priors_and_posteriors.blavaan <- function(posterior, prior, verbose = TRUE, ...) { # Get Priors if (is.null(prior)) { prior <- posterior } prior <- unupdate(prior, verbose = verbose) prior <- insight::get_parameters(prior) posterior <- insight::get_parameters(posterior) list( posterior = posterior, prior = prior ) } #' @keywords internal .clean_priors_and_posteriors.emmGrid <- function(posterior, prior, verbose = TRUE, ...) { insight::check_if_installed("emmeans") if (is.null(prior)) { prior <- posterior if (verbose) { insight::format_warning("Prior not specified! Please provide the original model to get meaningful results.") } } if (!inherits(prior, "emmGrid")) { # then is it a model on.exit( insight::format_error(paste0( "Unable to reconstruct prior estimates.\n", "Perhaps the emmGrid object has been transformed or regrid()-ed?\n", "See function details.\n\n", "Instead, you can reestimate the emmGrid with a prior model, Try:\n", "\tprior_model <- unupdate(mode)\n", "\tprior_emmgrid <- emmeans(prior_model, ...) # pass this as the 'prior' argument." )) ) if (inherits(prior, "brmsfit")) { insight::format_error("Cannot rebuild prior emmGrid from a brmsfit model.") } prior <- try(unupdate(prior, verbose = verbose), silent = TRUE) if (inherits(prior, "try-error")) { on.exit() # undo general error message if (grepl("flat priors", prior, fixed = TRUE)) { prior <- paste0( prior, "Could not therefore compute Bayes factors, as these inform about ", "the raltive likelihood of two 'hypotheses', and flat priors provide no ", "likelihood.\n", "See '?bayesfactor_parameters' for more information.\n" ) } insight::format_error(prior) } prior <- suppressWarnings(emmeans::ref_grid(prior)) prior <- prior@post.beta if (!isTRUE(all.equal(colnames(prior), colnames(posterior@post.beta)))) { insight::format_error("post.beta and prior.beta are non-conformable arguments.") } prior <- stats::update(posterior, post.beta = prior) on.exit() # undo general error message } prior <- insight::get_parameters(prior) posterior <- insight::get_parameters(posterior) list( posterior = posterior, prior = prior ) } .clean_priors_and_posteriors.emm_list <- function(posterior, prior, verbose = TRUE, ...) { if (is.null(prior)) { prior <- posterior if (verbose) { insight::format_warning("Prior not specified! Please provide the original model to get meaningful results.") } } if (!inherits(prior, "emm_list")) { # prior is a model if (inherits(prior, "brmsfit")) { insight::format_error("Cannot rebuild prior emm_list from a brmsfit model.") } prior <- try(unupdate(prior, verbose = verbose), silent = TRUE) if (inherits(prior, "try-error")) { if (grepl("flat priors", prior, fixed = TRUE)) { prior <- paste0( prior, "Could not therefore compute Bayes factors, as these inform about ", "the raltive likelihood of two 'hypotheses', and flat priors provide no ", "likelihood.\n", "See '?bayesfactor_parameters' for more information.\n" ) } insight::format_error(prior) } } # prior is now a model, or emm_list # is it a model? pass_em <- inherits(prior, "emm_list") res <- lapply(seq_along(posterior), function(i) { .clean_priors_and_posteriors.emmGrid( posterior[[i]], prior = if (pass_em) prior[[i]] else prior, verbose = verbose ) }) posterior <- do.call("cbind", lapply(res, "[[", "posterior")) prior <- do.call("cbind", lapply(res, "[[", "prior")) list( posterior = posterior, prior = prior ) } .clean_priors_and_posteriors.slopes <- function(posterior, prior, verbose = TRUE, ...) { if (is.null(prior)) { prior <- posterior if (verbose) { insight::format_warning("Prior not specified! Please provide the original model to get meaningful results.") } } posterior <- .get_marginaleffects_draws(posterior) prior <- .get_marginaleffects_draws(prior) list( posterior = posterior, prior = prior ) } .clean_priors_and_posteriors.predictions <- .clean_priors_and_posteriors.slopes .clean_priors_and_posteriors.comparisons <- .clean_priors_and_posteriors.slopes # BMA --------------------------------------------------------------------- #' @keywords internal .get_model_table <- function(BFGrid, priorOdds = NULL, add_effects_table = TRUE, ...) { denominator <- attr(BFGrid, "denominator") BFGrid <- rbind(BFGrid[denominator, ], BFGrid[-denominator, ]) attr(BFGrid, "denominator") <- 1 # This looks like it does nothing, but this is needed to prevent Inf in large BFs. # Small BFs are better than large BFs BFGrid <- stats::update(BFGrid, reference = "top") # Prior and post odds Modelnames <- BFGrid$Model if (is.null(priorOdds)) { priorOdds <- rep(1, length(Modelnames) - 1) } priorOdds <- c(1, priorOdds) prior_logodds <- log(priorOdds) posterior_logodds <- prior_logodds + BFGrid$log_BF # norm prior_logodds <- prior_logodds - log(sum(exp(prior_logodds))) posterior_logodds <- posterior_logodds - log(sum(exp(posterior_logodds))) df.model <- data.frame( Modelnames, priorProbs = exp(prior_logodds), postProbs = exp(posterior_logodds), stringsAsFactors = FALSE ) # add effects table if (add_effects_table) { for (m in seq_len(nrow(df.model))) { tmp_terms <- .make_terms(df.model$Modelnames[m]) if (length(tmp_terms) > 0) { missing_terms <- !tmp_terms %in% colnames(df.model) if (any(missing_terms)) df.model[, tmp_terms[missing_terms]] <- NA df.model[m, tmp_terms] <- TRUE } } } df.model[is.na(df.model)] <- FALSE df.model } #' @keywords internal .make_terms <- function(formula) { sort_interactions <- function(x) { if (grepl(":", x, fixed = TRUE)) { effs <- unlist(strsplit(x, ":", fixed = TRUE)) x <- paste0(sort(effs), collapse = ":") } x } formula.f <- stats::as.formula(paste0("~", formula)) all.terms <- attr(stats::terms(formula.f), "term.labels") # Fixed fix_trms <- all.terms[!grepl("|", all.terms, fixed = TRUE)] # no random if (length(fix_trms) > 0) { fix_trms <- sapply(fix_trms, sort_interactions) } # Random random_parts <- paste0(grep("|", all.terms, fixed = TRUE, value = TRUE)) # only random if (length(random_parts) == 0) { return(fix_trms) } random_units <- sub("^.+\\|\\s+", "", random_parts) tmp_random <- lapply( sub("\\|.+$", "", random_parts), function(x) stats::as.formula(paste0("~", x)) ) rand_trms <- vector("list", length(random_parts)) for (i in seq_along(random_parts)) { tmp_trms <- attr(stats::terms.formula(tmp_random[[i]]), "term.labels") tmp_trms <- sapply(tmp_trms, sort_interactions) if (!any(unlist(strsplit(as.character(tmp_random[[i]])[[2]], " + ", fixed = TRUE)) == "0")) { tmp_trms <- c("1", tmp_trms) } rand_trms[[i]] <- paste0(tmp_trms, ":", random_units[[i]]) } c(fix_trms, unlist(rand_trms)) } # make_BF_plot_data ------------------------------------------------------- #' @keywords internal .make_BF_plot_data <- function(posterior, prior, direction, null, extend_scale = 0.05, precision = 2^8, ...) { insight::check_if_installed("logspline") estimate_samples_density <- function(samples) { nm <- insight::safe_deparse_symbol(substitute(samples)) samples <- utils::stack(samples) samples <- split(samples, samples$ind) samples <- lapply(samples, function(data) { # 1. estimate density x <- data$values x_range <- range(x) x_rangex <- stats::median(x) + 7 * stats::mad(x) * c(-1, 1) x_range <- c( max(c(x_range[1], x_rangex[1])), min(c(x_range[2], x_rangex[2])) ) x_range <- range(c(x_range, null)[!is.infinite(c(x_range, null))]) extension_scale <- diff(x_range) * extend_scale x_range <- x_range + c(-1, 1) * extension_scale x_axis <- seq(x_range[1], x_range[2], length.out = precision) # x_axis <- sort(unique(c(x_axis, null))) f_x <- .logspline(x, ...) y <- logspline::dlogspline(x_axis, f_x) d_points <- data.frame(x = x_axis, y = y) # 2. estimate points d_null <- stats::approx(d_points$x, d_points$y, xout = null) d_null$y[is.na(d_null$y)] <- 0 # 3. direction? if (direction > 0) { d_points <- d_points[d_points$x >= min(null), , drop = FALSE] if (is.infinite(min(null))) { norm_factor <- 1 } else { norm_factor <- 1 - logspline::plogspline(min(null), f_x) } d_points$y <- d_points$y / norm_factor d_null$y <- d_null$y / norm_factor } else if (direction < 0) { d_points <- d_points[d_points$x <= max(null), , drop = FALSE] if (is.infinite(max(null))) { norm_factor <- 1 } else { norm_factor <- logspline::plogspline(max(null), f_x) } d_points$y <- d_points$y / norm_factor d_null$y <- d_null$y / norm_factor } d_points$ind <- d_null$ind <- data$ind[1] list(d_points, d_null) }) # 4a. organize point0 <- lapply(samples, function(.) as.data.frame(.[[2]])) point0 <- do.call("rbind", point0) samplesX <- lapply(samples, function(.) .[[1]]) samplesX <- do.call("rbind", samplesX) samplesX$Distribution <- point0$Distribution <- nm rownames(samplesX) <- rownames(point0) <- NULL list(samplesX, point0) } # 4b. orgenize posterior <- estimate_samples_density(posterior) prior <- estimate_samples_density(prior) list( plot_data = rbind(posterior[[1]], prior[[1]]), d_points = rbind(posterior[[2]], prior[[2]]) ) } # As numeric vector ------------------------------------------------------- #' @export as.numeric.bayesfactor_inclusion <- function(x, log = FALSE, ...) { out <- x[["log_BF"]] if (!log) out <- exp(out) return(out) } #' @export as.numeric.bayesfactor_models <- as.numeric.bayesfactor_inclusion #' @export as.numeric.bayesfactor_parameters <- as.numeric.bayesfactor_inclusion #' @export as.numeric.bayesfactor_restricted <- as.numeric.bayesfactor_inclusion ## Double: #' @export as.double.bayesfactor_inclusion <- as.numeric.bayesfactor_inclusion #' @export as.double.bayesfactor_models <- as.numeric.bayesfactor_inclusion #' @export as.double.bayesfactor_parameters <- as.numeric.bayesfactor_inclusion #' @export as.double.bayesfactor_restricted <- as.numeric.bayesfactor_inclusion # logspline --------------------------------------------------------------- #' @keywords internal .logspline <- function(x, ...) { insight::check_if_installed("logspline") in_args <- list(...) # arg_names <- names(formals(logspline::logspline, envir = parent.frame())) arg_names <- names(formals(logspline::logspline)) in_args <- in_args[names(in_args) %in% arg_names] in_args <- c(list(x = x), in_args) suppressWarnings(do.call(logspline::logspline, in_args)) } bayestestR/R/utils.R0000644000176200001440000001660315054263466014104 0ustar liggesusers# small wrapper around this commonly used try-catch .safe <- function(code, on_error = NULL) { tryCatch(code, error = function(e) on_error) } # select rows where values in "variable" match "value" #' @keywords internal .select_rows <- function(data, variable, value) { data[which(data[[variable]] == value), ] } #' select numerics columns #' @keywords internal .select_nums <- function(x) { x[unlist(lapply(x, is.numeric))] } #' @keywords internal .retrieve_model <- function(x) { # retrieve model obj_name <- attr(x, "object_name", exact = TRUE) model <- NULL if (!is.null(obj_name)) { # first try, parent frame model <- .safe(get(obj_name, envir = parent.frame())) if (is.null(model)) { # second try, global env model <- .safe(get(obj_name, envir = globalenv())) } if (is.null(model)) { # last try model <- .dynGet(obj_name, ifnotfound = NULL) } } model } #' @keywords internal .dynGet <- function(x, ifnotfound = stop(gettextf("%s not found", sQuote(x)), domain = NA, call. = FALSE), minframe = 1L, inherits = FALSE) { x <- insight::safe_deparse(x) n <- sys.nframe() myObj <- structure(list(.b = as.raw(7)), foo = 47L) while (n > minframe) { n <- n - 1L env <- sys.frame(n) r <- get0(x, envir = env, inherits = inherits, ifnotfound = myObj) if (!identical(r, myObj)) { return(r) } } ifnotfound } #' @keywords internal .get_direction <- function(direction) { if (length(direction) > 1) { insight::format_warning("Using first 'direction' value.") } if (is.numeric(direction[1])) { return(sign(direction[1])) } Value <- c( left = -1, right = 1, "two-sided" = 0, twosided = 0, "one-sided" = 1, onesided = 1, "<" = -1, ">" = 1, "=" = 0, "==" = 0, "-1" = -1, "0" = 0, "1" = 1, "+1" = 1 ) direction <- Value[tolower(direction[1])] if (is.na(direction)) { insight::format_error("Unrecognized 'direction' argument.") } direction } #' Prepare output data frame for printing #' #' @description #' This is an internal helper function to standardize and enrich the output of #' various `bayestestR` functions (like `ci()`, `hdi()`, `rope()`, etc.). #' Its main purpose is to merge a data frame containing analysis results #' (e.g., credible intervals) with a data frame of "cleaned" parameter #' information (from `insight::clean_parameters()`). #' #' This process adds human-readable parameter names and model component #' information (like fixed or random effects) to the output. It also includes #' special handling for complex models, such as multivariate models from #' `rstanarm` or `brms`, where response variables need to be parsed from #' parameter names. #' #' @param temp A data frame with estimation results, like CIs or point estimates. #' @param cleaned_parameters A data frame as returned by #' `insight::clean_parameters()`. #' @param is_stan_mv Logical, indicates if the model is a `stanmvreg` object. #' @param is_brms_mv Logical, indicates if the model is a `brms` multivariate #' model. #' #' @keywords internal #' @noRd .prepare_output <- function(temp, cleaned_parameters, is_stan_mv = FALSE, is_brms_mv = FALSE) { if (is.null(cleaned_parameters)) { return(temp) } if (isTRUE(is_stan_mv)) { # for models with multiple responses, we create a separate response column temp$Response <- gsub("(b\\[)*(.*)\\|(.*)", "\\2", temp$Parameter) # from the parameter names, we can now remove the name of the respone variables for (i in unique(temp$Response)) { temp$Parameter <- gsub(sprintf("%s|", i), "", temp$Parameter, fixed = TRUE) } merge_by <- c("Parameter", "Effects", "Component", "Response") remove_cols <- c("Group", "Cleaned_Parameter", "Function", ".roworder") } else if (isTRUE(is_brms_mv)) { # for models with multiple responses, we create a separate response column temp$Response <- gsub("(.*)_(.*)_(.*)", "\\2", temp$Parameter) merge_by <- c("Parameter", "Effects", "Component", "Response") remove_cols <- c("Group", "Cleaned_Parameter", "Function", ".roworder") } else { # By default, we only merge by these three columns merge_by <- c("Parameter", "Effects", "Component") remove_cols <- c("Group", "Cleaned_Parameter", "Response", "Function", ".roworder") } # in "temp", we have the data frame from the related functions (like # `point_estimate()`, `ci()` etc.). "cleaned_parameters" is a data frame # only with original parameter names, model components and "cleaned" # parameter names (retrieved from `insight::clean_parameters()`). merge_by <- intersect(merge_by, colnames(temp)) temp$.roworder <- seq_len(nrow(temp)) out <- merge(x = temp, y = cleaned_parameters, by = merge_by, all.x = TRUE) # hope this works for stanmvreg... if ((isTRUE(is_stan_mv) || isTRUE(is_brms_mv)) && all(is.na(out$Effects)) && all(is.na(out$Component))) { out$Effects <- cleaned_parameters$Effects[seq_len(nrow(out))] out$Component <- cleaned_parameters$Component[seq_len(nrow(out))] } # this here is required for multiple response models... if (all(is.na(out$Effects)) || all(is.na(out$Component))) { out <- out[!duplicated(out$.roworder), ] } else { out <- out[!is.na(out$Effects) & !is.na(out$Component) & !duplicated(out$.roworder), ] } attr(out, "Cleaned_Parameter") <- out$Cleaned_Parameter[order(out$.roworder)] datawizard::data_remove(out[order(out$.roworder), ], remove_cols, verbose = FALSE) } #' @keywords internal .merge_and_sort <- function(x, y, by, all) { if (is.null(ncol(y))) { return(x) } x$.rowid <- seq_len(nrow(x)) x <- merge(x, y, by = by, all = all) datawizard::data_remove(x[order(x$.rowid), ], ".rowid", verbose = FALSE) } # returns the variables that were used for grouping data frames (dplyr::group_var()) #' @keywords internal .group_vars <- function(x) { # dplyr < 0.8.0 returns attribute "indices" grps <- attr(x, "groups", exact = TRUE) # dplyr < 0.8.0? if (is.null(grps)) { ## TODO fix for dplyr < 0.8 attr(x, "vars", exact = TRUE) } else { setdiff(colnames(grps), ".rows") } } # safe add cleaned parameter names to a model object .add_clean_parameters_attribute <- function(params, model, ...) { cp <- tryCatch( { .get_cleaned_parameters(model, ...) }, error = function(e) { NULL } ) attr(params, "clean_parameters") <- cp params } #' @keywords internal .get_marginaleffects_draws <- function(object) { # errors and checks are handled by marginaleffects insight::check_if_installed("marginaleffects", minimum_version = "0.29.0") as.data.frame(marginaleffects::get_draws(object, shape = "DxP")) } #' @keywords internal .possibly_extract_rvar_col <- function(df, rvar_col) { if (missing(rvar_col) || is.null(rvar_col)) { return(NULL) } if (is.character(rvar_col) && length(rvar_col) == 1L && rvar_col %in% colnames(df) && inherits(df[[rvar_col]], "rvar")) { return(df[[rvar_col]]) } insight::format_error("The `rvar_col` argument must be a single, valid column name.") } bayestestR/R/utils_hdi_ci.R0000644000176200001440000000536214746106624015402 0ustar liggesusers#' @keywords internal .check_ci_fun <- function(dots) { ci_fun <- "hdi" if (identical(dots$ci_method, "spi")) { ci_fun <- "spi" } ci_fun } #' @keywords internal .check_ci_argument <- function(x, ci, verbose = TRUE) { if (ci > 1) { if (verbose) { insight::format_warning("`ci` should be less than 1, returning NAs.") } return(data.frame( "CI" = ci, "CI_low" = NA, "CI_high" = NA )) } if (ci == 1) { return(data.frame( "CI" = ci, "CI_low" = min(x, na.rm = TRUE), "CI_high" = max(x, na.rm = TRUE) )) } if (length(x) < 3) { if (verbose) { insight::format_warning("The posterior is too short, returning NAs.") } return(data.frame( "CI" = ci, "CI_low" = NA, "CI_high" = NA )) } NULL } #' @keywords internal .compute_interval_dataframe <- function(x, ci, verbose, fun) { numeric_variables <- vapply(x, is.numeric, TRUE) out <- insight::compact_list(lapply( x[, numeric_variables, drop = FALSE], get(fun, asNamespace("bayestestR")), ci = ci, verbose = verbose )) dat <- data.frame( Parameter = rep(names(out), each = length(ci)), do.call(rbind, out), stringsAsFactors = FALSE, row.names = NULL ) # rename for SPI, should be HDI if (identical(fun, "spi")) { class(dat) <- unique(c("bayestestR_hdi", "see_hdi", "bayestestR_spi", class(dat))) } else { class(dat) <- unique(c(paste0("bayestestR_", fun), paste0("see_", fun), class(dat))) } dat } #' @keywords internal .compute_interval_simMerMod <- function(x, ci, effects, parameters, verbose, fun) { fixed <- fixed.data <- NULL random <- random.data <- NULL if (effects %in% c("fixed", "all")) { fixed.data <- insight::get_parameters(x, effects = "fixed", parameters = parameters) fixed <- .compute_interval_dataframe(fixed.data, ci, verbose, fun) fixed$Group <- "fixed" } if (effects %in% c("random", "all")) { random.data <- insight::get_parameters(x, effects = "random", parameters = parameters) random <- .compute_interval_dataframe(random.data, ci, verbose, fun) random$Group <- "random" } d <- do.call(rbind, list(fixed, random)) if (length(unique(d$Group)) == 1) { d <- datawizard::data_remove(d, "Group", verbose = FALSE) } list(result = d, data = do.call(cbind, insight::compact_list(list(fixed.data, random.data)))) } #' @keywords internal .compute_interval_sim <- function(x, ci, parameters, verbose, fun) { fixed.data <- insight::get_parameters(x, parameters = parameters) d <- .compute_interval_dataframe(fixed.data, ci, verbose, fun) list(result = d, data = fixed.data) } bayestestR/R/display.R0000644000176200001440000000663215053406143014400 0ustar liggesusers#' @title Print tables in different output formats #' @name display.describe_posterior #' #' @description Prints tables (i.e. data frame) in different output formats. #' #' @param object,x An object returned by one of the package's function, for #' example [`describe_posterior()`], [`point_estimate()`], or [`eti()`]. #' @param format String, indicating the output format. Can be `"markdown"` #' `"html"`, or `"tt"`. `format = "tt"` creates a `tinytable` object, which is #' either printed as markdown or HTML table, depending on the environment. See #' [`insight::export_table()`] for details. #' @param digits Integer, number of digits to round the table output. Defaults #' to 2. #' @param caption Character, caption for the table. If `NULL`, no caption is #' added. By default, a caption is created based on the object type. #' @param ... Arguments passed down to `print_html()` or `print_md()` (e.g., #' `digits`), or to `insight::export_table()`. #' #' @return If `format = "markdown"`, the return value will be a character #' vector in markdown-table format. If `format = "html"`, an object of #' class `gt_tbl`. If `format = "tt"`, an object of class `tinytable`. #' #' @details `display()` is useful when the table-output from functions, which is #' usually printed as formatted text-table to console, should be formatted for #' pretty table-rendering in markdown documents, or if knitted from rmarkdown #' to PDF or Word files. See #' [vignette](https://easystats.github.io/parameters/articles/model_parameters_formatting.html) #' for examples. #' #' @examplesIf all(insight::check_if_installed(c("tinytable", "gt"), quietly = TRUE)) #' \donttest{ #' d <- data.frame(replicate(4, rnorm(20))) #' result <- describe_posterior(d) #' #' # markdown format #' display(result) #' #' # gt HTML #' display(result, format = "html") #' #' # tinytable #' display(result, format = "tt") #' } #' @export display.describe_posterior <- function(object, format = "markdown", ...) { format <- .display_default_format(format) if (format %in% c("html", "tt")) { print_html(object, backend = format, ...) } else { print_md(object, ...) } } #' @export display.point_estimate <- display.describe_posterior #' @export display.map_estimate <- display.describe_posterior #' @export display.p_direction <- display.describe_posterior #' @export display.p_map <- display.describe_posterior #' @export display.p_rope <- display.describe_posterior #' @export display.p_significance <- display.describe_posterior #' @export display.bayestestR_hdi <- display.describe_posterior #' @export display.bayestestR_eti <- display.describe_posterior #' @export display.bayestestR_si <- display.describe_posterior #' @export display.bayesfactor_models <- display.describe_posterior #' @export display.bayesfactor_restricted <- display.describe_posterior #' @export display.bayesfactor_parameters <- display.describe_posterior #' @export display.bayesfactor_inclusion <- display.describe_posterior # we allow exporting HTML format based on "gt" or "tinytable" .check_format_backend <- function(...) { dots <- list(...) if (identical(dots$backend, "tt")) { "tt" } else { "html" } } .display_default_format <- function(format) { format <- getOption("easystats_display_format", format) insight::validate_argument(format, c("markdown", "html", "md", "tt")) } bayestestR/R/append_datagrid.R0000644000176200001440000000721315054263445016044 0ustar liggesusers#' @keywords internal .append_datagrid <- function(results, object, long = FALSE) { UseMethod(".append_datagrid", object = object) } #' @keywords internal .append_datagrid.emmGrid <- function(results, object, long = FALSE) { # results is assumed to be a data frame with "Parameter" column # object is an emmeans / marginaleffects that results is based on all_attrs <- attributes(results) # save attributes for later all_class <- class(results) # extract model info. if we have categorical, add "group" variable if (inherits(object, c("emmGrid", "emm_list"))) { model <- attributes(object)$model } else { insight::check_if_installed("marginaleffects", minimum_version = "0.29.0") model <- marginaleffects::components(object, "model") } if (!long && !is.null(model)) { m_info <- insight::model_info(model, response = 1, verbose = FALSE) # check if we have ordinal and alike if (!is.null(m_info)) { has_response_levels <- isTRUE( m_info$is_categorical | m_info$is_mixture | m_info$is_ordinal | m_info$is_multinomial | m_info$is_cumulative ) } else { has_response_levels <- FALSE } if ((has_response_levels || isTRUE(insight::is_multivariate(model))) && "group" %in% colnames(object)) { results <- .safe( cbind(data.frame(group = object$group), results), results ) } } datagrid <- insight::get_datagrid(object) grid_names <- colnames(datagrid) if (long || nrow(datagrid) < nrow(results)) { datagrid$Parameter <- unique(results$Parameter) results <- datawizard::data_merge(datagrid, results, by = "Parameter") results$Parameter <- NULL class(results) <- all_class } else { results[colnames(datagrid)] <- datagrid results$Parameter <- NULL results <- results[, c(grid_names, setdiff(colnames(results), grid_names)), drop = FALSE] # add back attributes most_attrs <- all_attrs[setdiff(names(all_attrs), names(attributes(datagrid)))] attributes(results)[names(most_attrs)] <- most_attrs } attr(results, "idvars") <- grid_names results } #' @keywords internal .append_datagrid.emm_list <- .append_datagrid.emmGrid #' @keywords internal .append_datagrid.slopes <- .append_datagrid.emmGrid #' @keywords internal .append_datagrid.predictions <- .append_datagrid.emmGrid #' @keywords internal .append_datagrid.comparisons <- .append_datagrid.emmGrid #' @keywords internal .append_datagrid.data.frame <- function(results, object, long = FALSE) { # results is assumed to be a data frame with "Parameter" column # object is a data frame with an rvar column that results is based on all_attrs <- attributes(results) # save attributes for later all_class <- class(results) is_rvar <- vapply(object, inherits, FUN.VALUE = logical(1), "rvar") grid_names <- colnames(object)[!is_rvar] datagrid <- data.frame(object[, grid_names, drop = FALSE]) if (long || nrow(datagrid) < nrow(results)) { datagrid$Parameter <- unique(results$Parameter) results <- datawizard::data_merge(datagrid, results, by = "Parameter") results$Parameter <- NULL class(results) <- all_class } else { results[grid_names] <- object[grid_names] results$Parameter <- NULL results <- results[, c(grid_names, setdiff(colnames(results), grid_names)), drop = FALSE] # add back attributes most_attrs <- all_attrs[setdiff(names(all_attrs), names(attributes(object)))] attributes(results)[names(most_attrs)] <- most_attrs } attr(results, "idvars") <- grid_names results } bayestestR/R/ci.R0000644000176200001440000002363015005147105013320 0ustar liggesusers#' Confidence/Credible/Compatibility Interval (CI) #' #' Compute Confidence/Credible/Compatibility Intervals (CI) or Support Intervals #' (SI) for Bayesian and frequentist models. The Documentation is accessible #' for: #' #' - [Bayesian models](https://easystats.github.io/bayestestR/articles/credible_interval.html) #' - [Frequentist models](https://easystats.github.io/parameters/reference/ci.default.html) #' #' @param x A `stanreg` or `brmsfit` model, or a vector representing a posterior #' distribution. #' @param method Can be ["ETI"][eti] (default), ["HDI"][hdi], ["BCI"][bci], #' ["SPI"][spi] or ["SI"][si]. #' @param ci Value or vector of probability of the CI (between 0 and 1) #' to be estimated. Default to `0.95` (`95%`). #' @inheritParams hdi #' @inheritParams si #' @inherit hdi seealso #' @family ci #' #' @inheritSection hdi Model components #' #' @return A data frame with following columns: #' #' - `Parameter` The model parameter(s), if `x` is a model-object. If `x` is a #' vector, this column is missing. #' - `CI` The probability of the credible interval. #' - `CI_low`, `CI_high` The lower and upper credible interval limits for the parameters. #' #' @note When it comes to interpretation, we recommend thinking of the CI in terms of #' an "uncertainty" or "compatibility" interval, the latter being defined as #' "Given any value in the interval and the background assumptions, #' the data should not seem very surprising" (_Gelman & Greenland 2019_). #' #' There is also a [`plot()`-method](https://easystats.github.io/see/articles/bayestestR.html) implemented in the \href{https://easystats.github.io/see/}{\pkg{see}-package}. #' #' @references #' Gelman A, Greenland S. Are confidence intervals better termed "uncertainty #' intervals"? BMJ 2019;l5381. 10.1136/bmj.l5381 #' #' @examplesIf require("rstanarm", quietly = TRUE) #' library(bayestestR) #' #' posterior <- rnorm(1000) #' ci(posterior, method = "ETI") #' ci(posterior, method = "HDI") #' #' df <- data.frame(replicate(4, rnorm(100))) #' ci(df, method = "ETI", ci = c(0.80, 0.89, 0.95)) #' ci(df, method = "HDI", ci = c(0.80, 0.89, 0.95)) #' #' model <- suppressWarnings(rstanarm::stan_glm( #' mpg ~ wt, #' data = mtcars, chains = 2, iter = 200, refresh = 0 #' )) #' ci(model, method = "ETI", ci = c(0.80, 0.89)) #' ci(model, method = "HDI", ci = c(0.80, 0.89)) #' #' @examplesIf require("BayesFactor", quietly = TRUE) #' bf <- BayesFactor::ttestBF(x = rnorm(100, 1, 1)) #' ci(bf, method = "ETI") #' ci(bf, method = "HDI") #' #' @examplesIf require("emmeans", quietly = TRUE) && require("rstanarm", quietly = TRUE) #' model <- emmeans::emtrends(model, ~1, "wt", data = mtcars) #' ci(model, method = "ETI") #' ci(model, method = "HDI") #' @export ci <- function(x, ...) { UseMethod("ci") } #' @keywords internal .ci_bayesian <- function(x, ci = 0.95, method = "ETI", effects = "fixed", component = "conditional", parameters = NULL, verbose = TRUE, BF = 1, ...) { if (tolower(method) %in% c("eti", "equal", "ci", "quantile")) { return( eti( x, ci = ci, effects = effects, component = component, parameters = parameters, verbose = verbose, ... ) ) } else if (tolower(method) %in% c("bci", "bca", "bcai")) { return( bci( x, ci = ci, effects = effects, component = component, parameters = parameters, verbose = verbose, ... ) ) } else if (tolower(method) == "hdi") { return( hdi( x, ci = ci, effects = effects, component = component, parameters = parameters, verbose = verbose, ... ) ) } else if (tolower(method) == "spi") { return( spi( x, ci = ci, effects = effects, component = component, parameters = parameters, verbose = verbose, ... ) ) } else if (tolower(method) == "si") { return( si( x, BF = BF, effects = effects, component = component, parameters = parameters, verbose = verbose, ... ) ) } else { insight::format_error(paste0( "`method` should be 'ETI' (for equal-tailed interval), ", "'HDI' (for highest density interval), 'BCI' (for bias corrected and ", "accelerated bootstrap intervals), 'SPI' (for shortest probability ", "interval) or 'SI' (for support interval)." )) } } #' @rdname ci #' @export ci.numeric <- function(x, ci = 0.95, method = "ETI", verbose = TRUE, BF = 1, ...) { .ci_bayesian(x, ci = ci, method = method, verbose = verbose, BF = BF, ...) } #' @rdname ci #' @inheritParams p_direction #' @export ci.data.frame <- function(x, ci = 0.95, method = "ETI", BF = 1, rvar_col = NULL, verbose = TRUE, ...) { x_rvar <- .possibly_extract_rvar_col(x, rvar_col) if (length(x_rvar) > 0L) { cl <- match.call() cl[[1]] <- bayestestR::ci cl$x <- x_rvar cl$rvar_col <- NULL out <- eval.parent(cl) obj_name <- insight::safe_deparse_symbol(substitute(x)) attr(out, "object_name") <- sprintf('%s[["%s"]]', obj_name, rvar_col) return(.append_datagrid(out, x, long = length(ci) > 1L)) } .ci_bayesian(x, ci = ci, method = method, verbose = verbose, BF = BF, ...) } #' @export ci.draws <- function(x, ci = 0.95, method = "ETI", verbose = TRUE, BF = 1, ...) { .ci_bayesian( .posterior_draws_to_df(x), ci = ci, method = method, verbose = verbose, BF = BF, ... ) } #' @export ci.rvar <- ci.draws #' @export ci.emmGrid <- function(x, ci = NULL, ...) { if (!.is_baysian_grid(x)) { insight::check_if_installed("parameters") if (is.null(ci)) ci <- 0.95 return(parameters::ci(model = x, ci = ci, ...)) } if (is.null(ci)) ci <- 0.95 xdf <- insight::get_parameters(x) out <- ci(xdf, ci = ci, ...) out <- .append_datagrid(out, x, long = length(ci) > 1L) out } #' @export ci.emm_list <- ci.emmGrid #' @export ci.slopes <- function(x, ci = NULL, ...) { if (!.is_baysian_grid(x)) { insight::check_if_installed("parameters") if (is.null(ci)) ci <- 0.95 return(parameters::ci(model = x, ci = ci, ...)) } if (is.null(ci)) ci <- 0.95 xrvar <- .get_marginaleffects_draws(x) out <- ci(xrvar, ci = ci, ...) out <- .append_datagrid(out, x, long = length(ci) > 1L) out } #' @export ci.comparisons <- ci.slopes #' @export ci.predictions <- ci.slopes #' @export ci.sim.merMod <- function(x, ci = 0.95, method = "ETI", effects = "fixed", parameters = NULL, verbose = TRUE, ...) { .ci_bayesian( x, ci = ci, method = method, effects = effects, parameters = parameters, verbose = verbose, ... ) } #' @export ci.sim <- function(x, ci = 0.95, method = "ETI", parameters = NULL, verbose = TRUE, ...) { .ci_bayesian( x, ci = ci, method = method, parameters = parameters, verbose = verbose, ... ) } #' @export ci.stanreg <- function(x, ci = 0.95, method = "ETI", effects = "fixed", component = "location", parameters = NULL, verbose = TRUE, BF = 1, ...) { .ci_bayesian( x, ci = ci, method = method, effects = effects, component = component, parameters = parameters, verbose = verbose, BF = BF, ... ) } #' @rdname ci #' @export ci.brmsfit <- function(x, ci = 0.95, method = "ETI", effects = "fixed", component = "conditional", parameters = NULL, verbose = TRUE, BF = 1, ...) { .ci_bayesian( x, ci = ci, method = method, effects = effects, component = component, parameters = parameters, verbose = verbose, BF = BF, ... ) } #' @export ci.stanfit <- ci.stanreg #' @export ci.blavaan <- ci.stanreg #' @export ci.BFBayesFactor <- ci.numeric #' @export ci.MCMCglmm <- function(x, ci = 0.95, method = "ETI", verbose = TRUE, ...) { nF <- x$Fixed$nfl ci( as.data.frame(x$Sol[, 1:nF, drop = FALSE]), ci = ci, method = method, verbose = verbose, ... ) } #' @export ci.bamlss <- function(x, ci = 0.95, method = "ETI", component = "all", verbose = TRUE, ...) { ci( insight::get_parameters(x, component = component), ci = ci, method = method, verbose = verbose, ... ) } #' @export ci.bcplm <- function(x, ci = 0.95, method = "ETI", verbose = TRUE, ...) { ci(insight::get_parameters(x), ci = ci, method = method, verbose = verbose, ...) } #' @export ci.blrm <- ci.bcplm #' @export ci.mcmc <- ci.bcplm #' @export ci.mcmc.list <- ci.bcplm #' @export ci.BGGM <- ci.bcplm #' @export ci.get_predicted <- ci.data.frame bayestestR/R/datasets.R0000644000176200001440000000132214542333405014534 0ustar liggesusers#' Moral Disgust Judgment #' #' A sample (simulated) dataset, used in tests and some examples. #' #' @author Richard D. Morey #' #' @docType data #' #' @name disgust #' #' @keywords data #' #' @format A data frame with 500 rows and 5 variables: #' \describe{ #' \item{score}{Score on the questionnaire, which ranges from 0 to 50 with higher scores representing harsher moral judgment} #' \item{condition}{one of three conditions, differing by the odor present in the room: a pleasant scent associated with cleanliness (lemon), a disgusting scent (sulfur), and a control condition in which no unusual odor is present} #' } #' #' ```{r} #' data("disgust") #' head(disgust, n = 5) #' ```` #' NULL bayestestR/R/model_to_priors.R0000644000176200001440000000322314706241121016121 0ustar liggesusers#' Convert model's posteriors to priors (EXPERIMENTAL) #' #' Convert model's posteriors to (normal) priors. #' #' @param model A Bayesian model. #' @param scale_multiply The SD of the posterior will be multiplied by this amount before being set as a prior to avoid overly narrow priors. #' @param ... Other arguments for \code{insight::get_prior()} or \code{\link{describe_posterior}}. #' #' @examples #' \donttest{ #' # brms models #' # ----------------------------------------------- #' if (require("brms")) { #' formula <- brms::brmsformula(mpg ~ wt + cyl, center = FALSE) #' #' model <- brms::brm(formula, data = mtcars, refresh = 0) #' priors <- model_to_priors(model) #' priors <- brms::validate_prior(priors, formula, data = mtcars) #' priors #' #' model2 <- brms::brm(formula, data = mtcars, prior = priors, refresh = 0) #' } #' } #' @export model_to_priors <- function(model, scale_multiply = 3, ...) { UseMethod("model_to_priors") } #' @export model_to_priors.brmsfit <- function(model, scale_multiply = 3, ...) { params <- describe_posterior(model, centrality = "mean", dispersion = TRUE, ci = NULL, test = NULL, ...) priors_params <- attributes(insight::get_priors(model, ...))$priors priors <- brms::prior_summary(model) for (p in priors_params$Parameter) { if (p %in% params$Parameter) { param_subset <- params[params$Parameter == p, ] priors$prior[priors_params$Parameter == p] <- paste0( "normal(", insight::format_value(param_subset$Mean), ", ", insight::format_value(param_subset$SD * scale_multiply), ")" ) } } priors } bayestestR/R/sexit_thresholds.R0000644000176200001440000001215714746106624016336 0ustar liggesusers#' @title Find Effect Size Thresholds #' #' @description This function attempts at automatically finding suitable default #' values for a "significant" (i.e., non-negligible) and "large" effect. This is #' to be used with care, and the chosen threshold should always be explicitly #' reported and justified. See the detail section in [`sexit()`][sexit] for more #' information. #' #' @inheritParams rope #' #' @examples #' sexit_thresholds(rnorm(1000)) #' \donttest{ #' if (require("rstanarm")) { #' model <- suppressWarnings(stan_glm( #' mpg ~ wt + gear, #' data = mtcars, #' chains = 2, #' iter = 200, #' refresh = 0 #' )) #' sexit_thresholds(model) #' #' model <- suppressWarnings( #' stan_glm(vs ~ mpg, data = mtcars, family = "binomial", refresh = 0) #' ) #' sexit_thresholds(model) #' } #' #' if (require("brms")) { #' model <- brm(mpg ~ wt + cyl, data = mtcars) #' sexit_thresholds(model) #' } #' #' if (require("BayesFactor")) { #' bf <- ttestBF(x = rnorm(100, 1, 1)) #' sexit_thresholds(bf) #' } #' } #' @references Kruschke, J. K. (2018). Rejecting or accepting parameter values in Bayesian estimation. Advances in Methods and Practices in Psychological Science, 1(2), 270-280. \doi{10.1177/2515245918771304}. #' #' @export sexit_thresholds <- function(x, ...) { UseMethod("sexit_thresholds") } #' @export sexit_thresholds.brmsfit <- function(x, verbose = TRUE, ...) { response <- insight::get_response(x, source = "mf") information <- insight::model_info(x, verbose = FALSE) if (insight::is_multivariate(x)) { mapply(function(i, j) .sexit_thresholds(i, j), x, information, response, verbose) } else { .sexit_thresholds(x, information, response, verbose) } } #' @export sexit_thresholds.stanreg <- sexit_thresholds.brmsfit #' @export sexit_thresholds.BFBayesFactor <- function(x, verbose = TRUE, ...) { fac <- 1 if (inherits(x@numerator[[1]], "BFlinearModel")) { response <- .safe(insight::get_response(x, source = "mf")) if (!is.null(response)) { fac <- stats::sd(response, na.rm = TRUE) } } fac * .sexit_thresholds(x, verbose = verbose) } #' @export sexit_thresholds.lm <- sexit_thresholds.brmsfit #' @export sexit_thresholds.glm <- sexit_thresholds.brmsfit #' @export sexit_thresholds.merMod <- sexit_thresholds.brmsfit #' @export sexit_thresholds.glmmTMB <- sexit_thresholds.brmsfit #' @export sexit_thresholds.mixed <- sexit_thresholds.brmsfit #' @export sexit_thresholds.MixMod <- sexit_thresholds.brmsfit #' @export sexit_thresholds.wbm <- sexit_thresholds.brmsfit #' @export sexit_thresholds.feis <- sexit_thresholds.brmsfit #' @export sexit_thresholds.gee <- sexit_thresholds.brmsfit #' @export sexit_thresholds.geeglm <- sexit_thresholds.brmsfit #' @export sexit_thresholds.lme <- sexit_thresholds.brmsfit #' @export sexit_thresholds.felm <- sexit_thresholds.brmsfit #' @export sexit_thresholds.fixest <- sexit_thresholds.brmsfit #' @export sexit_thresholds.gls <- sexit_thresholds.brmsfit #' @export sexit_thresholds.hurdle <- sexit_thresholds.brmsfit #' @export sexit_thresholds.zeroinfl <- sexit_thresholds.brmsfit #' @export sexit_thresholds.bayesQR <- sexit_thresholds.brmsfit #' @export sexit_thresholds.default <- function(x, verbose = TRUE, ...) { .sexit_thresholds(x, verbose = verbose) } #' @export sexit_thresholds.mlm <- function(x, verbose = TRUE, ...) { response <- insight::get_response(x, type = "mf") information <- insight::model_info(x, verbose = FALSE) lapply(response, function(i) .sexit_thresholds(x, information, i, verbose = verbose)) } # helper ------------------ .sexit_thresholds <- function(x, information = NULL, response = NULL, verbose = TRUE) { if (is.null(information) && is.null(response)) { norm <- 1 } else { norm <- tryCatch( { # Linear Models if (information$is_linear) { stats::sd(response, na.rm = TRUE) # Logistic Regression Models } else if (information$is_binomial) { pi / sqrt(3) # Count Models } else if (information$is_count) { sig <- stats::sigma(x) if (!is.null(sig) && length(sig) > 0 && !is.na(sig)) { sig } else { 1 } # T-tests } else if (information$is_ttest) { if (inherits(x, "BFBayesFactor")) { stats::sd(x@data[, 1]) } else { if (verbose) { insight::format_warning("Could not estimate good thresholds, using default values.") } 1 } # Correlations } else if (information$is_correlation) { # https://github.com/easystats/bayestestR/issues/121 1 # Default } else { 1 } }, error = function(e) { if (verbose) { insight::format_warning("Could not estimate good thresholds, using default values.") } 1 } ) } c(0.05, 0.3) * norm } bayestestR/R/p_rope.R0000644000176200001440000001430115052646230014211 0ustar liggesusers#' Probability of being in the ROPE #' #' Compute the proportion of the whole posterior distribution that doesn't lie within a region of practical equivalence (ROPE). It is equivalent to running `rope(..., ci = 1)`. #' #' @inheritParams rope #' @param ... Other arguments passed to [rope()]. #' #' @inheritSection hdi Model components #' #' @examples #' library(bayestestR) #' #' p_rope(x = rnorm(1000, 0, 0.01), range = c(-0.1, 0.1)) #' p_rope(x = mtcars, range = c(-0.1, 0.1)) #' @export p_rope <- function(x, ...) { UseMethod("p_rope") } #' @method as.double p_rope #' @export as.double.p_rope <- function(x, ...) { x } #' @export p_rope.default <- function(x, ...) { NULL } #' @rdname p_rope #' @export p_rope.numeric <- function(x, range = "default", verbose = TRUE, ...) { out <- .p_rope(rope(x, range = range, ci = 1, verbose = verbose, ...)) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) out } #' @export #' @rdname p_rope #' @inheritParams p_direction p_rope.data.frame <- function(x, range = "default", rvar_col = NULL, verbose = TRUE, ...) { x_rvar <- .possibly_extract_rvar_col(x, rvar_col) if (length(x_rvar) > 0L) { cl <- match.call() cl[[1]] <- bayestestR::p_rope cl$x <- x_rvar cl$rvar_col <- NULL out <- eval.parent(cl) obj_name <- insight::safe_deparse_symbol(substitute(x)) attr(out, "object_name") <- sprintf('%s[["%s"]]', obj_name, rvar_col) return(.append_datagrid(out, x)) } out <- .p_rope(rope(x, range = range, ci = 1, verbose = verbose, ...)) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) out } #' @export p_rope.draws <- function(x, range = "default", verbose = TRUE, ...) { p_rope(.posterior_draws_to_df(x), range = range, verbose = verbose, ...) } #' @export p_rope.rvar <- p_rope.draws #' @export p_rope.emmGrid <- function(x, range = "default", verbose = TRUE, ...) { xdf <- insight::get_parameters(x) out <- p_rope(xdf, range = range, verbose = verbose) out <- .append_datagrid(out, x) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) out } #' @export p_rope.emm_list <- p_rope.emmGrid #' @export p_rope.slopes <- function(x, range = "default", verbose = TRUE, ...) { xrvar <- .get_marginaleffects_draws(x) out <- p_rope(xrvar, range = range, verbose = verbose) out <- .append_datagrid(out, x) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) out } #' @export p_rope.comparisons <- p_rope.slopes #' @export p_rope.predictions <- p_rope.slopes #' @export p_rope.BFBayesFactor <- p_rope.numeric #' @export p_rope.MCMCglmm <- p_rope.numeric #' @export p_rope.stanreg <- function(x, range = "default", effects = "fixed", component = "location", parameters = NULL, verbose = TRUE, ...) { out <- .p_rope(rope( x, range = range, ci = 1, effects = effects, component = component, parameters = parameters, verbose = verbose, ... )) out <- .add_clean_parameters_attribute(out, x, ...) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) out } #' @export p_rope.stanfit <- p_rope.stanreg #' @export p_rope.blavaan <- p_rope.stanreg #' @rdname p_rope #' @export p_rope.brmsfit <- function(x, range = "default", effects = "fixed", component = "conditional", parameters = NULL, verbose = TRUE, ...) { out <- .p_rope(rope( x, range = range, ci = 1, effects = effects, component = component, parameters = parameters, verbose = verbose, ... )) out <- .add_clean_parameters_attribute(out, x, ...) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) out } #' @export p_rope.sim.merMod <- p_rope.stanreg #' @export p_rope.sim <- function(x, range = "default", parameters = NULL, verbose = TRUE, ...) { out <- .p_rope(rope(x, range = range, ci = 1, parameters = parameters, verbose = verbose, ...)) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) out } #' @export p_rope.bamlss <- function(x, range = "default", component = "all", parameters = NULL, verbose = TRUE, ...) { out <- .p_rope(rope( x, range = range, ci = 1, effects = "all", component = component, parameters = parameters, verbose = verbose, ... )) out <- .add_clean_parameters_attribute(out, x) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) out } #' @export p_rope.mcmc <- function(x, range = "default", parameters = NULL, verbose = TRUE, ...) { out <- .p_rope(rope( x, range = range, ci = 1, parameters = parameters, verbose = verbose, ... )) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) out } #' @export p_rope.bcplm <- p_rope.mcmc #' @export p_rope.BGGM <- p_rope.mcmc #' @export p_rope.blrm <- p_rope.mcmc #' @export p_rope.mcmc.list <- p_rope.mcmc # Internal ---------------------------------------------------------------- #' @keywords internal .p_rope <- function(rope_rez) { cols <- c( "Parameter", "ROPE_low", "ROPE_high", "ROPE_Percentage", "Superiority_Percentage", "Inferiority_Percentage", "Effects", "Component" ) out <- as.data.frame(rope_rez)[cols[cols %in% names(rope_rez)]] names(out)[names(out) == "ROPE_Percentage"] <- "p_ROPE" if (all(c("Superiority_Percentage", "Inferiority_Percentage") %in% names(out))) { names(out)[names(out) == "Superiority_Percentage"] <- "p_Superiority" names(out)[names(out) == "Inferiority_Percentage"] <- "p_Inferiority" } class(out) <- c("p_rope", "see_p_rope", "data.frame") out } bayestestR/R/distribution.R0000644000176200001440000001507015052646230015450 0ustar liggesusers#' Empirical Distributions #' #' Generate a sequence of n-quantiles, i.e., a sample of size `n` with a #' near-perfect distribution. #' #' @param type Can be any of the names from base R's #' [Distributions][stats::Distributions], like `"cauchy"`, `"pois"` or `"beta"`. #' @param random Generate near-perfect or random (simple wrappers for the base R #' `r*` functions) distributions. When `random = FALSE`, these function return #' `q*(ppoints(n), ...)`. #' @param xi For tweedie distributions, the value of `xi` such that the variance #' is `var(Y) = phi * mu^xi`. #' @param power Alias for `xi`. #' @param ... Arguments passed to or from other methods. #' @inheritParams tweedie::rtweedie #' #' @examples #' library(bayestestR) #' x <- distribution(n = 10) #' plot(density(x)) #' #' x <- distribution(type = "gamma", n = 100, shape = 2) #' plot(density(x)) #' @export distribution <- function(type = "normal", ...) { basr_r_distributions <- c( "beta", "binom", "binomial", "cauchy", "chisq", "chisquared", "exp", "f", "gamma", "geom", "hyper", "lnorm", "multinom", "nbinom", "normal", "gaussian", "pois", "poisson", "student", "t", "student_t", "unif", "uniform", "weibull" ) switch(match.arg(arg = type, choices = basr_r_distributions), beta = distribution_beta(...), binom = , binomial = distribution_binomial(...), cauchy = distribution_cauchy(...), chisq = , chisquared = distribution_chisquared(...), gamma = distribution_gamma(...), gaussian = , normal = distribution_normal(...), nbinom = distribution_nbinom(...), poisson = distribution_poisson(...), t = , student = , student_t = distribution_student(...), uniform = distribution_uniform(...), distribution_custom(type = type, ...) ) } #' @rdname distribution #' @inheritParams distribution #' @export distribution_custom <- function(n, type = "norm", ..., random = FALSE) { if (random) { f <- match.fun(paste0("r", type)) f(n, ...) } else { f <- match.fun(paste0("q", type)) f(stats::ppoints(n), ...) } } #' @rdname distribution #' @inheritParams stats::rbeta #' @export distribution_beta <- function(n, shape1, shape2, ncp = 0, random = FALSE, ...) { if (random) { stats::rbeta(n, shape1, shape2, ncp = ncp) } else { stats::qbeta(stats::ppoints(n), shape1, shape2, ncp = ncp, ...) } } #' @rdname distribution #' @inheritParams stats::rbinom #' @export distribution_binomial <- function(n, size = 1, prob = 0.5, random = FALSE, ...) { if (random) { stats::rbinom(n, size, prob) } else { stats::qbinom(stats::ppoints(n), size, prob, ...) } } #' @rdname distribution #' @export distribution_binom <- distribution_binomial #' @rdname distribution #' @inheritParams stats::rcauchy #' @export distribution_cauchy <- function(n, location = 0, scale = 1, random = FALSE, ...) { if (random) { stats::rcauchy(n, location, scale) } else { stats::qcauchy(stats::ppoints(n), location, scale, ...) } } #' @rdname distribution #' @inheritParams stats::rchisq #' @export distribution_chisquared <- function(n, df, ncp = 0, random = FALSE, ...) { if (random) { stats::rchisq(n, df, ncp) } else { stats::qchisq(stats::ppoints(n), df, ncp, ...) } } #' @rdname distribution #' @export distribution_chisq <- distribution_chisquared #' @rdname distribution #' @inheritParams stats::rgamma #' @param shape Shape parameter. #' @export distribution_gamma <- function(n, shape, scale = 1, random = FALSE, ...) { if (random) { stats::rgamma(n = n, shape = shape, scale = scale) } else { stats::qgamma(p = stats::ppoints(n), shape = shape, scale = scale) } } #' @rdname distribution #' @inheritParams stats::rnorm #' @export distribution_mixture_normal <- function(n, mean = c(-3, 3), sd = 1, random = FALSE, ...) { n <- round(n / length(mean)) sd <- sd if (length(sd) != length(mean)) { sd <- rep_len(sd, length(mean)) } x <- NULL for (i in seq_along(mean)) { x <- c(x, distribution_normal(n = n, mean = mean[i], sd = sd[i], random = random)) } x } #' @rdname distribution #' @inheritParams stats::rnorm #' @export distribution_normal <- function(n, mean = 0, sd = 1, random = FALSE, ...) { if (random) { stats::rnorm(n, mean, sd) } else { stats::qnorm(stats::ppoints(n), mean, sd, ...) } } #' @rdname distribution #' @export distribution_gaussian <- distribution_normal #' @rdname distribution #' @inheritParams stats::rnbinom #' @param phi Corresponding to `glmmTMB`'s implementation of nbinom #' distribution, where `size=mu/phi`. #' @export distribution_nbinom <- function(n, size, prob, mu, phi, random = FALSE, ...) { if (missing(size)) { size <- mu / phi } if (random) { stats::rnbinom(n, size, prob, mu) } else { stats::qnbinom(stats::ppoints(n), size, prob, mu, ...) } } #' @rdname distribution #' @inheritParams stats::rpois #' @export distribution_poisson <- function(n, lambda = 1, random = FALSE, ...) { if (random) { stats::rpois(n, lambda) } else { stats::qpois(stats::ppoints(n), lambda, ...) } } #' @rdname distribution #' @inheritParams stats::rt #' @export distribution_student <- function(n, df, ncp, random = FALSE, ...) { if (random) { stats::rt(n, df, ncp) } else { stats::qt(stats::ppoints(n), df, ncp, ...) } } #' @rdname distribution #' @export distribution_t <- distribution_student #' @rdname distribution #' @export distribution_student_t <- distribution_student #' @rdname distribution #' @inheritParams tweedie::rtweedie #' @export distribution_tweedie <- function(n, xi = NULL, mu, phi, power = NULL, random = FALSE, ...) { insight::check_if_installed("tweedie") if (random) { tweedie::rtweedie( n = n, xi = xi, mu = mu, phi = phi, power = power ) } else { tweedie::qtweedie( p = stats::ppoints(n), xi = xi, mu = mu, phi = phi, power = power ) } } #' @rdname distribution #' @inheritParams stats::runif #' @export distribution_uniform <- function(n, min = 0, max = 1, random = FALSE, ...) { if (random) { stats::runif(n, min, max) } else { stats::qunif(stats::ppoints(n), min, max, ...) } } bayestestR/R/eti.R0000644000176200001440000002072415052646230013514 0ustar liggesusers#' Equal-Tailed Interval (ETI) #' #' Compute the **Equal-Tailed Interval (ETI)** of posterior distributions using #' the quantiles method. The probability of being below this interval is equal #' to the probability of being above it. The ETI can be used in the context of #' uncertainty characterisation of posterior distributions as #' **Credible Interval (CI)**. #' #' @inheritParams hdi #' @inherit ci return #' @inherit hdi details #' @inherit hdi seealso #' @family ci #' #' @inheritSection hdi Model components #' #' @examplesIf require("rstanarm") && require("emmeans") && require("brms") && require("BayesFactor") #' library(bayestestR) #' #' posterior <- rnorm(1000) #' eti(posterior) #' eti(posterior, ci = c(0.80, 0.89, 0.95)) #' #' df <- data.frame(replicate(4, rnorm(100))) #' eti(df) #' eti(df, ci = c(0.80, 0.89, 0.95)) #' \donttest{ #' model <- suppressWarnings( #' rstanarm::stan_glm(mpg ~ wt + gear, data = mtcars, chains = 2, iter = 200, refresh = 0) #' ) #' eti(model) #' eti(model, ci = c(0.80, 0.89, 0.95)) #' #' eti(emmeans::emtrends(model, ~1, "wt", data = mtcars)) #' #' model <- brms::brm(mpg ~ wt + cyl, data = mtcars) #' eti(model) #' eti(model, ci = c(0.80, 0.89, 0.95)) #' #' bf <- BayesFactor::ttestBF(x = rnorm(100, 1, 1)) #' eti(bf) #' eti(bf, ci = c(0.80, 0.89, 0.95)) #' } #' #' @export eti <- function(x, ...) { UseMethod("eti") } #' @export eti.default <- function(x, ...) { insight::format_error(paste0("'eti()' is not yet implemented for objects of class '", class(x)[1], "'.")) } #' @rdname eti #' @export eti.numeric <- function(x, ci = 0.95, verbose = TRUE, ...) { out <- do.call(rbind, lapply(ci, function(i) { .eti(x = x, ci = i, verbose = verbose) })) class(out) <- unique(c("bayestestR_eti", "see_eti", "bayestestR_ci", "see_ci", class(out))) attr(out, "data") <- x out } #' @export #' @rdname eti #' @inheritParams p_direction eti.data.frame <- function(x, ci = 0.95, rvar_col = NULL, verbose = TRUE, ...) { obj_name <- insight::safe_deparse_symbol(substitute(x)) x_rvar <- .possibly_extract_rvar_col(x, rvar_col) if (length(x_rvar) > 0L) { cl <- match.call() cl[[1]] <- bayestestR::eti cl$x <- x_rvar cl$rvar_col <- NULL out <- eval.parent(cl) attr(out, "object_name") <- sprintf('%s[["%s"]]', obj_name, rvar_col) return(.append_datagrid(out, x, long = length(ci) > 1L)) } dat <- .compute_interval_dataframe(x = x, ci = ci, verbose = verbose, fun = "eti") attr(dat, "object_name") <- obj_name dat } #' @export eti.draws <- function(x, ci = 0.95, verbose = TRUE, ...) { dat <- .compute_interval_dataframe(x = .posterior_draws_to_df(x), ci = ci, verbose = verbose, fun = "eti") attr(dat, "object_name") <- insight::safe_deparse_symbol(substitute(x)) dat } #' @export eti.rvar <- eti.draws #' @export eti.MCMCglmm <- function(x, ci = 0.95, verbose = TRUE, ...) { nF <- x$Fixed$nfl d <- as.data.frame(x$Sol[, 1:nF, drop = FALSE]) dat <- .compute_interval_dataframe(x = d, ci = ci, verbose = verbose, fun = "eti") attr(dat, "data") <- insight::safe_deparse_symbol(substitute(x)) dat } #' @export eti.mcmc <- function(x, ci = 0.95, verbose = TRUE, ...) { d <- as.data.frame(x) dat <- .compute_interval_dataframe(x = d, ci = ci, verbose = verbose, fun = "eti") attr(dat, "data") <- insight::safe_deparse_symbol(substitute(x)) dat } #' @export eti.bamlss <- function(x, ci = 0.95, component = "all", verbose = TRUE, ...) { d <- insight::get_parameters(x, component = component) dat <- .compute_interval_dataframe(x = d, ci = ci, verbose = verbose, fun = "eti") attr(dat, "data") <- insight::safe_deparse_symbol(substitute(x)) dat } #' @export eti.bcplm <- function(x, ci = 0.95, verbose = TRUE, ...) { d <- insight::get_parameters(x) dat <- .compute_interval_dataframe(x = d, ci = ci, verbose = verbose, fun = "eti") attr(dat, "data") <- insight::safe_deparse_symbol(substitute(x)) dat } #' @export eti.bayesQR <- eti.bcplm #' @export eti.blrm <- eti.bcplm #' @export eti.mcmc.list <- eti.bcplm #' @export eti.BGGM <- eti.bcplm #' @export eti.sim.merMod <- function(x, ci = 0.95, effects = "fixed", parameters = NULL, verbose = TRUE, ...) { dat <- .compute_interval_simMerMod( x = x, ci = ci, effects = effects, parameters = parameters, verbose = verbose, fun = "eti" ) out <- dat$result attr(out, "data") <- dat$data out } #' @export eti.sim <- function(x, ci = 0.95, parameters = NULL, verbose = TRUE, ...) { dat <- .compute_interval_sim( x = x, ci = ci, parameters = parameters, verbose = verbose, fun = "eti" ) out <- dat$result attr(out, "data") <- dat$data out } #' @export eti.emmGrid <- function(x, ci = 0.95, verbose = TRUE, ...) { xdf <- insight::get_parameters(x) dat <- eti(xdf, ci = ci, verbose = verbose, ...) dat <- .append_datagrid(dat, x, long = length(ci) > 1L) attr(dat, "object_name") <- insight::safe_deparse_symbol(substitute(x)) dat } #' @export eti.emm_list <- eti.emmGrid #' @export eti.slopes <- function(x, ci = 0.95, verbose = TRUE, ...) { xrvar <- .get_marginaleffects_draws(x) dat <- eti(xrvar, ci = ci, verbose = verbose, ...) dat <- .append_datagrid(dat, x, long = length(ci) > 1L) attr(dat, "object_name") <- insight::safe_deparse_symbol(substitute(x)) dat } #' @export eti.comparisons <- eti.slopes #' @export eti.predictions <- eti.slopes #' @export eti.stanreg <- function(x, ci = 0.95, effects = "fixed", component = "location", parameters = NULL, verbose = TRUE, ...) { out <- .prepare_output( eti( insight::get_parameters( x, effects = effects, component = component, parameters = parameters ), ci = ci, verbose = verbose, ... ), .get_cleaned_parameters(x, ...), inherits(x, "stanmvreg") ) class(out) <- unique(c("bayestestR_eti", "see_eti", class(out))) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) out } #' @export eti.stanfit <- eti.stanreg #' @export eti.blavaan <- eti.stanreg #' @rdname eti #' @export eti.brmsfit <- function(x, ci = 0.95, effects = "fixed", component = "conditional", parameters = NULL, verbose = TRUE, ...) { out <- .prepare_output( eti( insight::get_parameters( x, effects = effects, component = component, parameters = parameters ), ci = ci, verbose = verbose, ... ), .get_cleaned_parameters(x, ...) ) class(out) <- unique(c("bayestestR_eti", "see_eti", class(out))) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) out } #' @export eti.BFBayesFactor <- function(x, ci = 0.95, verbose = TRUE, ...) { out <- eti(insight::get_parameters(x), ci = ci, verbose = verbose, ...) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) out } #' @rdname eti #' @export eti.get_predicted <- function(x, ci = 0.95, use_iterations = FALSE, verbose = TRUE, ...) { if (isTRUE(use_iterations)) { if ("iterations" %in% names(attributes(x))) { out <- eti(as.data.frame(t(attributes(x)$iterations)), ci = ci, verbose = verbose, ...) } else { insight::format_error("No iterations present in the output.") } attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) } else { out <- eti(as.numeric(x), ci = ci, verbose = verbose, ...) } out } # Helper ------------------------------------------------------------------ .eti <- function(x, ci, verbose = TRUE) { check_ci <- .check_ci_argument(x, ci, verbose) if (!is.null(check_ci)) { return(check_ci) } results <- as.vector(stats::quantile( x, probs = c((1 - ci) / 2, (1 + ci) / 2), names = FALSE, na.rm = TRUE )) data.frame( CI = ci, CI_low = results[1], CI_high = results[2] ) } bayestestR/R/is_baysian_grid.R0000644000176200001440000000142415054263456016064 0ustar liggesusers#' @keywords internal .is_baysian_grid <- function(x) { UseMethod(".is_baysian_grid") } #' @keywords internal .is_baysian_grid.emmGrid <- function(x) { if (inherits(x, "emm_list")) { x <- x[[1]] } post.beta <- methods::slot(x, "post.beta") !(all(dim(post.beta) == 1) && is.na(post.beta)) } #' @keywords internal .is_baysian_grid.emm_list <- .is_baysian_grid.emmGrid #' @keywords internal .is_baysian_grid.slopes <- function(x) { insight::check_if_installed("marginaleffects", minimum_version = "0.29.0") !is.null(suppressWarnings(marginaleffects::get_draws(x, "PxD"))) } #' @keywords internal .is_baysian_grid.predictions <- .is_baysian_grid.slopes #' @keywords internal .is_baysian_grid.comparisons <- .is_baysian_grid.slopes bayestestR/R/p_to_bf.R0000644000176200001440000001004114746106624014341 0ustar liggesusers#' Convert p-values to (pseudo) Bayes Factors #' #' Convert p-values to (pseudo) Bayes Factors. This transformation has been #' suggested by Wagenmakers (2022), but is based on a vast amount of assumptions. #' It might therefore be not reliable. Use at your own risks. For more accurate #' approximate Bayes factors, use [bic_to_bf()] instead. #' #' @param x A (frequentist) model object, or a (numeric) vector of p-values. #' @param n_obs Number of observations. Either length 1, or same length as `p`. #' @param log Wether to return log Bayes Factors. **Note:** The `print()` method #' always shows `BF` - the `"log_BF"` column is only accessible from the returned #' data frame. #' @param ... Other arguments to be passed (not used for now). #' #' @references #' - Wagenmakers, E.J. (2022). Approximate objective Bayes factors from p-values #' and sample size: The 3p(sqrt(n)) rule. Preprint available on ArXiv: #' https://psyarxiv.com/egydq #' #' @examplesIf require("parameters") #' data(iris) #' model <- lm(Petal.Length ~ Sepal.Length + Species, data = iris) #' p_to_bf(model) #' #' # Examples that demonstrate comparison between #' # BIC-approximated and pseudo BF #' # -------------------------------------------- #' m0 <- lm(mpg ~ 1, mtcars) #' m1 <- lm(mpg ~ am, mtcars) #' m2 <- lm(mpg ~ factor(cyl), mtcars) #' #' # In this first example, BIC-approximated BF and #' # pseudo-BF based on p-values are close... #' #' # BIC-approximated BF, m1 against null model #' bic_to_bf(BIC(m1), denominator = BIC(m0)) #' #' # pseudo-BF based on p-values - dropping intercept #' p_to_bf(m1)[-1, ] #' #' # The second example shows that results from pseudo-BF are less accurate #' # and should be handled wit caution! #' bic_to_bf(BIC(m2), denominator = BIC(m0)) #' p_to_bf(anova(m2), n_obs = nrow(mtcars)) #' #' @return A data frame with the p-values and pseudo-Bayes factors (against the null). #' #' @seealso [bic_to_bf()] for more accurate approximate Bayes factors. #' #' @export p_to_bf <- function(x, ...) { UseMethod("p_to_bf") } #' @export #' @rdname p_to_bf p_to_bf.numeric <- function(x, log = FALSE, n_obs = NULL, ...) { p <- x # Validate n_obs if (is.null(n_obs)) { insight::format_error("Argument `n_obs` must be specified.") } else if (length(n_obs) == 1L) { n_obs <- rep(n_obs, times = length(p)) } else if (length(n_obs) != length(p)) { insight::format_error("`n_obs` must be of length 1 or same length as `p`.") } # Convert log_BF <- vector("numeric", length = length(p)) for (i in seq_along(p)) { if (p[i] <= 0.1) { log_BF[i] <- log(3 * p[i] * sqrt(n_obs[i])) } else if (p[i] <= 0.5) { # log_BF[i] <- log((4 / 3) * p[i] ^ (2 / 3) * sqrt(n_obs[i])) log_BF[i] <- log(p[i]) * (2 / 3) + log(sqrt(n_obs[i]) * (4 / 3)) } else { # log_BF[i] <- p[i] ^ .25 * sqrt(n_obs[i]) log_BF[i] <- log(p[i]) / 4 + log(sqrt(n_obs[i])) } } # Clean up out <- data.frame( p = p, # IMPORTANT! This is BF10! log_BF = -log_BF, stringsAsFactors = FALSE ) if (!log) { out$BF <- exp(out$log_BF) out$log_BF <- NULL } class(out) <- c("p_to_pseudo_bf", "data.frame") out } #' @export #' @rdname p_to_bf p_to_bf.default <- function(x, log = FALSE, ...) { if (insight::is_model(x)) { insight::check_if_installed("parameters") params <- parameters::p_value(x) p <- params$p n_obs <- insight::n_obs(x) # validation check if (is.null(n_obs)) { # user may also pass n_obs via dots... n_obs <- list(...)$n_obs } } else { insight::format_error("Argument `x` must be a model object, or a numeric vector of p-values.") } out <- p_to_bf(p, n_obs = n_obs, log = log) out <- cbind(params, out[, -1, drop = FALSE]) class(out) <- c("p_to_pseudo_bf", "data.frame") out } # methods --------------- #' @export print.p_to_pseudo_bf <- function(x, ...) { cat(insight::export_table(insight::format_table(x), caption = "Pseudo-BF (against NULL)")) } bayestestR/R/print.equivalence_test.R0000644000176200001440000000414314766104675017440 0ustar liggesusers#' @export print.equivalence_test <- function(x, digits = 2, ...) { orig_x <- x insight::print_color("# Test for Practical Equivalence\n\n", "blue") # print ROPE limits, if we just have one set of ROPE values if (insight::has_single_value(x$ROPE_low, remove_na = TRUE)) { cat(sprintf(" ROPE: [%.*f %.*f]\n\n", digits, x$ROPE_low[1], digits, x$ROPE_high[1])) } # fix "sd" pattern model <- .retrieve_model(x) if (!is.null(model) && !is.data.frame(model)) { cp <- insight::clean_parameters(model) if (!is.null(cp$Group) && any(startsWith(cp$Group, "SD/Cor"))) { cp <- cp[startsWith(cp$Group, "SD/Cor"), ] matches <- match(cp$Parameter, x$Parameter) if (length(matches)) { new_pattern <- paste0( "SD/Cor: ", cp$Cleaned_Parameter[unique(stats::na.omit(match(x$Parameter, cp$Parameter)))] ) if (length(new_pattern) == length(matches)) { x$Parameter[matches] <- new_pattern } } } } x$ROPE_Percentage <- sprintf("%.*f %%", digits, x$ROPE_Percentage * 100) x$HDI <- insight::format_ci(x$HDI_low, x$HDI_high, ci = NULL, digits = digits) ci <- unique(x$CI) keep.columns <- c( attr(x, "idvars"), "Parameter", "Effects", "Component", "ROPE_Equivalence", "ROPE_Percentage", "CI", "HDI" ) # keep ROPE columns for multiple ROPE values if (insight::n_unique(x$ROPE_low) > 1) { keep.columns <- c(keep.columns, "ROPE") x$ROPE <- insight::format_ci(x$ROPE_low, x$ROPE_high, ci = NULL, digits = digits) } x <- x[, intersect(keep.columns, colnames(x))] colnames(x)[which(colnames(x) == "ROPE_Equivalence")] <- "H0" colnames(x)[which(colnames(x) == "ROPE_Percentage")] <- "inside ROPE" .print_equivalence_component(x, ci, digits) invisible(orig_x) } .print_equivalence_component <- function(x, ci, digits) { for (i in ci) { xsub <- x[x$CI == i, -which(colnames(x) == "CI"), drop = FALSE] colnames(xsub)[colnames(xsub) == "HDI"] <- sprintf("%i%% HDI", 100 * i) .print_data_frame(xsub, digits = digits) cat("\n") } } bayestestR/R/area_under_curve.R0000644000176200001440000000401614640231526016240 0ustar liggesusers#' Area under the Curve (AUC) #' #' Based on the DescTools `AUC` function. It can calculate the area under the #' curve with a naive algorithm or a more elaborated spline approach. The curve #' must be given by vectors of xy-coordinates. This function can handle unsorted #' x values (by sorting x) and ties for the x values (by ignoring duplicates). #' #' @param x Vector of x values. #' @param y Vector of y values. #' @param method Method to compute the Area Under the Curve (AUC). Can be #' `"trapezoid"` (default), `"step"` or `"spline"`. If "trapezoid", the curve #' is formed by connecting all points by a direct line (composite trapezoid #' rule). If "step" is chosen then a stepwise connection of two points is #' used. For calculating the area under a spline interpolation the splinefun #' function is used in combination with integrate. #' @param ... Arguments passed to or from other methods. #' #' #' @examples #' library(bayestestR) #' posterior <- distribution_normal(1000) #' #' dens <- estimate_density(posterior) #' dens <- dens[dens$x > 0, ] #' x <- dens$x #' y <- dens$y #' #' area_under_curve(x, y, method = "trapezoid") #' area_under_curve(x, y, method = "step") #' area_under_curve(x, y, method = "spline") #' @seealso DescTools #' @export area_under_curve <- function(x, y, method = c("trapezoid", "step", "spline"), ...) { # From DescTools [GPL-3]: https://github.com/cran/DescTools/blob/master/R/StatsAndCIs.r if (length(x) != length(y)) { insight::format_error("Length of x must be equal to length of y.") } idx <- order(x) x <- x[idx] y <- y[idx] switch(match.arg(arg = method, choices = c("trapezoid", "step", "spline")), trapezoid = sum((rowMeans(cbind(y[-length(y)], y[-1]))) * (x[-1] - x[-length(x)])), step = sum(y[-length(y)] * (x[-1] - x[-length(x)])), spline = stats::integrate(stats::splinefun(x, y, method = "natural"), lower = min(x), upper = max(x))$value ) } #' @rdname area_under_curve #' @export auc <- area_under_curve bayestestR/R/utils_posterior.R0000644000176200001440000000160414542333405016175 0ustar liggesusers# helper ------------------------------ .posterior_draws_to_df <- function(x) { UseMethod(".posterior_draws_to_df") } .posterior_draws_to_df.default <- function(x) { insight::format_error(paste0("Objects of class `%s` are not yet supported.", class(x)[1])) } .posterior_draws_to_df.data.frame <- function(x) { x } .posterior_draws_to_df.draws_df <- function(x) { insight::check_if_installed("posterior") datawizard::data_remove(as.data.frame(posterior::as_draws_df(x)), c(".chain", ".iteration", ".draw")) } .posterior_draws_to_df.draws_matrix <- .posterior_draws_to_df.draws_df .posterior_draws_to_df.draws_array <- .posterior_draws_to_df.draws_df .posterior_draws_to_df.draws_list <- .posterior_draws_to_df.draws_df .posterior_draws_to_df.draws_rvars <- .posterior_draws_to_df.draws_df .posterior_draws_to_df.rvar <- .posterior_draws_to_df.draws_df bayestestR/R/estimate_density.R0000644000176200001440000005414114765755711016324 0ustar liggesusers#' Density Estimation #' #' This function is a wrapper over different methods of density estimation. By #' default, it uses the base R `density` with by default uses a different smoothing #' bandwidth (`"SJ"`) from the legacy default implemented the base R `density` #' function (`"nrd0"`). However, Deng and Wickham suggest that `method = "KernSmooth"` #' is the fastest and the most accurate. #' #' @inheritParams hdi #' @inheritParams stats::density #' @param bw See the eponymous argument in `density`. Here, the default has been #' changed for `"SJ"`, which is recommended. #' @param ci The confidence interval threshold. Only used when `method = "kernel"`. #' This feature is experimental, use with caution. #' @param method Density estimation method. Can be `"kernel"` (default), `"logspline"` #' or `"KernSmooth"`. #' @param precision Number of points of density data. See the `n` parameter in `density`. #' @param extend Extend the range of the x axis by a factor of `extend_scale`. #' @param extend_scale Ratio of range by which to extend the x axis. A value of `0.1` #' means that the x axis will be extended by `1/10` of the range of the data. #' @param select Character vector of column names. If `NULL` (the default), all #' numeric variables will be selected. Other arguments from #' `datawizard::extract_column_names()` (such as `exclude`) can also be used. #' @param by Optional character vector. If not `NULL` and input is a data frame, #' density estimation is performed for each group (subsets) indicated by `by`. #' See examples. #' #' @inheritSection hdi Model components #' #' @note There is also a [`plot()`-method](https://easystats.github.io/see/articles/bayestestR.html) implemented in the \href{https://easystats.github.io/see/}{\pkg{see}-package}. #' #' @examplesIf require("logspline") && require("KernSmooth") && require("mclust") && require("emmeans") && require("rstanarm") && require("brms") #' library(bayestestR) #' #' set.seed(1) #' x <- rnorm(250, mean = 1) #' #' # Basic usage #' density_kernel <- estimate_density(x) # default method is "kernel" #' #' hist(x, prob = TRUE) #' lines(density_kernel$x, density_kernel$y, col = "black", lwd = 2) #' lines(density_kernel$x, density_kernel$CI_low, col = "gray", lty = 2) #' lines(density_kernel$x, density_kernel$CI_high, col = "gray", lty = 2) #' legend("topright", #' legend = c("Estimate", "95% CI"), #' col = c("black", "gray"), lwd = 2, lty = c(1, 2) #' ) #' #' # Other Methods #' density_logspline <- estimate_density(x, method = "logspline") #' density_KernSmooth <- estimate_density(x, method = "KernSmooth") #' density_mixture <- estimate_density(x, method = "mixture") #' #' hist(x, prob = TRUE) #' lines(density_kernel$x, density_kernel$y, col = "black", lwd = 2) #' lines(density_logspline$x, density_logspline$y, col = "red", lwd = 2) #' lines(density_KernSmooth$x, density_KernSmooth$y, col = "blue", lwd = 2) #' lines(density_mixture$x, density_mixture$y, col = "green", lwd = 2) #' #' # Extension #' density_extended <- estimate_density(x, extend = TRUE) #' density_default <- estimate_density(x, extend = FALSE) #' #' hist(x, prob = TRUE) #' lines(density_extended$x, density_extended$y, col = "red", lwd = 3) #' lines(density_default$x, density_default$y, col = "black", lwd = 3) #' #' # Multiple columns #' head(estimate_density(iris)) #' head(estimate_density(iris, select = "Sepal.Width")) #' #' # Grouped data #' head(estimate_density(iris, by = "Species")) #' head(estimate_density(iris$Petal.Width, by = iris$Species)) #' \donttest{ #' # rstanarm models #' # ----------------------------------------------- #' library(rstanarm) #' model <- suppressWarnings( #' stan_glm(mpg ~ wt + gear, data = mtcars, chains = 2, iter = 200, refresh = 0) #' ) #' head(estimate_density(model)) #' #' library(emmeans) #' head(estimate_density(emtrends(model, ~1, "wt", data = mtcars))) #' #' # brms models #' # ----------------------------------------------- #' library(brms) #' model <- brms::brm(mpg ~ wt + cyl, data = mtcars) #' estimate_density(model) #' } #' #' @references Deng, H., & Wickham, H. (2011). Density estimation in R. Electronic publication. #' #' @export estimate_density <- function(x, ...) { UseMethod("estimate_density") } #' @export estimate_density.default <- function(x, ...) { insight::format_error( paste0("`estimate_density()` is not yet implemented for objects of class `", class(x)[1], "`.") ) } #' @keywords internal .estimate_density <- function(x, method = "kernel", precision = 2^10, extend = FALSE, extend_scale = 0.1, bw = "SJ", ci = NULL, ...) { method <- match.arg( tolower(method), c("kernel", "logspline", "kernsmooth", "smooth", "mixture", "mclust") ) # Remove NA x <- x[!is.na(x)] if (length(x) < 2) { return(stats::setNames( data.frame(matrix(ncol = 3, nrow = 0)), c("Parameter", "x", "y") )) } # Range x_range <- range(x) if (extend) { extension_scale <- diff(x_range) * extend_scale x_range[1] <- x_range[1] - extension_scale x_range[2] <- x_range[2] + extension_scale } # Replace inf values if needed x_range[is.infinite(x_range)] <- 5.565423e+156 # Kernel if (method == "kernel") { kde <- .estimate_density_kernel(x, x_range, precision, bw, ci, ...) # Logspline } else if (method == "logspline") { kde <- .estimate_density_logspline(x, x_range, precision, ...) # KernSmooth } else if (method %in% c("kernsmooth", "smooth")) { kde <- .estimate_density_KernSmooth(x, x_range, precision, ...) # Mixture } else if (method %in% c("mixture", "mclust")) { kde <- .estimate_density_mixture(x, x_range, precision, ...) } else { insight::format_error("method should be one of 'kernel', 'logspline', 'KernSmooth' or 'mixture'.") } kde } # Methods ----------------------------------------------------------------- #' @export estimate_density.numeric <- function(x, method = "kernel", precision = 2^10, extend = FALSE, extend_scale = 0.1, bw = "SJ", ci = NULL, by = NULL, ...) { if (!is.null(by)) { if (length(by) == 1) { insight::format_error(paste0( "`by` must be either the name of a group column if a data frame is entered as input,", " or in this case (where a single vector was passed) a vector of same length." )) } out <- estimate_density( data.frame(V1 = x, Group = by, stringsAsFactors = FALSE), method = method, precision = precision, extend = extend, extend_scale = extend_scale, bw = bw, ci = ci, by = "Group", ... ) out$Parameter <- NULL return(out) } out <- .estimate_density( x, method = method, precision = precision, extend = extend, extend_scale = extend_scale, bw = bw, ci = ci, ... ) class(out) <- .set_density_class(out) out } #' @rdname estimate_density #' @inheritParams p_direction #' @export estimate_density.data.frame <- function(x, method = "kernel", precision = 2^10, extend = FALSE, extend_scale = 0.1, bw = "SJ", ci = NULL, select = NULL, by = NULL, rvar_col = NULL, ...) { x_rvar <- .possibly_extract_rvar_col(x, rvar_col) if (length(x_rvar) > 0L) { cl <- match.call() cl[[1]] <- bayestestR::estimate_density cl$x <- x_rvar cl$rvar_col <- NULL out <- eval.parent(cl) obj_name <- insight::safe_deparse_symbol(substitute(x)) attr(out, "object_name") <- sprintf('%s[["%s"]]', obj_name, rvar_col) out <- .append_datagrid(out, x, long = TRUE) class(out) <- .set_density_class(out) return(out) } if (is.null(by)) { # No grouping ------------------- out <- .estimate_density_df( x = x, method = method, precision = precision, extend = extend, extend_scale = extend_scale, bw = bw, ci = ci, select = select, ... ) } else { # Deal with by- grouping -------- groups <- insight::get_datagrid(x[, by, drop = FALSE], by = by) # Get combinations out <- data.frame() for (row in seq_len(nrow(groups))) { subdata <- datawizard::data_match(x, groups[row, , drop = FALSE]) subdata[names(groups)] <- NULL subdata <- .estimate_density_df( subdata, method = method, precision = precision, extend = extend, extend_scale = extend_scale, bw = bw, ci = ci, select = select, ... ) out <- rbind(out, merge(subdata, groups[row, , drop = FALSE])) } } class(out) <- .set_density_df_class(out) out } #' @export estimate_density.draws <- function(x, method = "kernel", precision = 2^10, extend = FALSE, extend_scale = 0.1, bw = "SJ", ci = NULL, select = NULL, by = NULL, ...) { estimate_density( .posterior_draws_to_df(x), method = method, precision = precision, extend = extend, extend_scale = extend_scale, bw = bw, select = select, by = by ) } #' @export estimate_density.rvar <- estimate_density.draws #' @export estimate_density.grouped_df <- function(x, method = "kernel", precision = 2^10, extend = FALSE, extend_scale = 0.1, bw = "SJ", ci = NULL, select = NULL, ...) { groups <- .group_vars(x) ungrouped_x <- as.data.frame(x) xlist <- split(ungrouped_x, ungrouped_x[groups]) out <- lapply(names(xlist), function(group) { dens <- estimate_density( xlist[[group]], method = method, precision = precision, extend = extend, extend_scale = extend_scale, bw = bw, ci = ci, select = select, ... ) dens$Group <- group dens }) do.call(rbind, out) } # to avoid class conflicts - e.g., numeric variables imported with the # haven package are of class "haven_labelled" and "double", which causes # problems with the generic or numeric method. #' @export estimate_density.double <- estimate_density.numeric #' @export estimate_density.emmGrid <- function(x, method = "kernel", precision = 2^10, extend = FALSE, extend_scale = 0.1, bw = "SJ", ...) { xdf <- insight::get_parameters(x) out <- estimate_density(xdf, method = method, precision = precision, extend = extend, extend_scale = extend_scale, bw = bw, ... ) out <- .append_datagrid(out, x, long = TRUE) class(out) <- .set_density_class(out) out } #' @export estimate_density.emm_list <- estimate_density.emmGrid #' @export estimate_density.slopes <- function(x, method = "kernel", precision = 2^10, extend = FALSE, extend_scale = 0.1, bw = "SJ", ...) { xdf <- .get_marginaleffects_draws(x) out <- estimate_density(xdf, method = method, precision = precision, extend = extend, extend_scale = extend_scale, bw = bw, ... ) out <- .append_datagrid(out, x, long = TRUE) class(out) <- .set_density_class(out) out } #' @export estimate_density.predictions <- estimate_density.slopes #' @export estimate_density.comparisons <- estimate_density.slopes #' @export estimate_density.stanreg <- function(x, method = "kernel", precision = 2^10, extend = FALSE, extend_scale = 0.1, bw = "SJ", effects = "fixed", component = "location", parameters = NULL, ...) { out <- estimate_density( insight::get_parameters( x, effects = effects, component = component, parameters = parameters ), method = method, precision = precision, extend = extend, extend_scale = extend_scale, bw = bw, ... ) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) class(out) <- .set_density_class(out) out } #' @export estimate_density.stanfit <- estimate_density.stanreg #' @export estimate_density.blavaan <- estimate_density.stanreg #' @rdname estimate_density #' @export estimate_density.brmsfit <- function(x, method = "kernel", precision = 2^10, extend = FALSE, extend_scale = 0.1, bw = "SJ", effects = "fixed", component = "conditional", parameters = NULL, ...) { out <- estimate_density( insight::get_parameters( x, effects = effects, component = component, parameters = parameters ), method = method, precision = precision, extend = extend, extend_scale = extend_scale, bw = bw, ... ) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) class(out) <- .set_density_class(out) out } #' @export estimate_density.MCMCglmm <- function(x, method = "kernel", precision = 2^10, extend = FALSE, extend_scale = 0.1, bw = "SJ", parameters = NULL, ...) { nF <- x$Fixed$nfl out <- estimate_density( as.data.frame(x$Sol[, 1:nF, drop = FALSE]), method = method, precision = precision, extend = extend, extend_scale = extend_scale, bw = bw, ... ) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) class(out) <- .set_density_class(out) out } #' @export estimate_density.mcmc <- function(x, method = "kernel", precision = 2^10, extend = FALSE, extend_scale = 0.1, bw = "SJ", parameters = NULL, ...) { out <- estimate_density( insight::get_parameters(x, parameters = parameters), method = method, precision = precision, extend = extend, extend_scale = extend_scale, bw = bw, ... ) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) class(out) <- .set_density_class(out) out } #' @export estimate_density.bayesQR <- estimate_density.mcmc #' @export estimate_density.blrm <- estimate_density.mcmc #' @export estimate_density.bcplm <- estimate_density.mcmc #' @export estimate_density.BGGM <- estimate_density.mcmc #' @export estimate_density.mcmc.list <- estimate_density.mcmc #' @export estimate_density.bamlss <- function(x, method = "kernel", precision = 2^10, extend = FALSE, extend_scale = 0.1, bw = "SJ", component = "all", parameters = NULL, ...) { out <- estimate_density( insight::get_parameters(x, component = component, parameters = parameters), method = method, precision = precision, extend = extend, extend_scale = extend_scale, bw = bw, ... ) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) class(out) <- .set_density_class(out) out } #' Coerce to a Data Frame #' #' @inheritParams base::as.data.frame #' @method as.data.frame density #' @export as.data.frame.density <- function(x, ...) { data.frame(x = x$x, y = x$y) } # helper ------------------ .estimate_density_df <- function(x, method = "kernel", precision = 2^10, extend = FALSE, extend_scale = 0.1, bw = "SJ", ci = NULL, select = NULL, ...) { # TODO: replace by exposed select argument if (is.null(select)) { x <- .select_nums(x) } else { x <- datawizard::data_select(x, select, ...) } out <- sapply( x, estimate_density, method = method, precision = precision, extend = extend, extend_scale = extend_scale, bw = bw, ci = ci, simplify = FALSE ) for (i in names(out)) { if (nrow(out[[i]]) == 0) { insight::format_warning(paste0("`", i, "`, or one of its groups specified in `by`, is empty and has no density information.")) } else { out[[i]]$Parameter <- i } } out <- do.call(rbind, out) row.names(out) <- NULL out[, c("Parameter", "x", "y")] } #' Density Probability at a Given Value #' #' Compute the density value at a given point of a distribution (i.e., #' the value of the `y` axis of a value `x` of a distribution). #' #' @param posterior Vector representing a posterior distribution. #' @param x The value of which to get the approximate probability. #' @inheritParams estimate_density #' #' @examples #' library(bayestestR) #' posterior <- distribution_normal(n = 10) #' density_at(posterior, 0) #' density_at(posterior, c(0, 1)) #' @export density_at <- function(posterior, x, precision = 2^10, method = "kernel", ...) { posterior_density <- estimate_density(posterior, precision = precision, method = method, ...) stats::approx(posterior_density$x, posterior_density$y, xout = x)$y } # Different functions ----------------------------------------------------- .estimate_density_kernel <- function(x, x_range, precision, bw, ci = 0.95, ...) { # unsupported arguments raise warnings dots <- list(...) dots[c("effects", "component", "parameters")] <- NULL # Get the kernel density estimation (KDE) my_args <- c(dots, list( x = x, n = precision, bw = bw, from = x_range[1], to = x_range[2] )) fun <- get("density", asNamespace("stats")) kde <- suppressWarnings(do.call("fun", my_args)) my_df <- as.data.frame(kde) # Get CI (https://bookdown.org/egarpor/NP-UC3M/app-kde-ci.html) if (!is.null(ci)) { h <- kde$bw # Selected bandwidth # R(K) for a normal Rk <- 1 / (2 * sqrt(pi)) # Estimate the SD sd_kde <- sqrt(my_df$y * Rk / (length(x) * h)) # CI with estimated variance z_alpha <- stats::qnorm(ci) my_df$CI_low <- my_df$y - z_alpha * sd_kde my_df$CI_high <- my_df$y + z_alpha * sd_kde } my_df } .estimate_density_logspline <- function(x, x_range, precision, ...) { insight::check_if_installed("logspline") x_axis <- seq(x_range[1], x_range[2], length.out = precision) y <- logspline::dlogspline(x_axis, logspline::logspline(x, ...), ...) data.frame(x = x_axis, y = y) } .estimate_density_KernSmooth <- function(x, x_range, precision, ...) { insight::check_if_installed("KernSmooth") as.data.frame(KernSmooth::bkde(x, range.x = x_range, gridsize = precision, truncate = TRUE, ...)) } .estimate_density_mixture <- function(x, x_range, precision, ...) { insight::check_if_installed("mclust") x_axis <- seq(x_range[1], x_range[2], length.out = precision) y <- stats::predict(mclust::densityMclust(x, verbose = FALSE, ...), newdata = x_axis, ...) data.frame(x = x_axis, y = y) } .set_density_df_class <- function(out) { setdiff( unique(c("estimate_density_df", "see_estimate_density_df", class(out))), c("estimate_density", "see_estimate_density") ) } .set_density_class <- function(out) { if (is.null(out)) { return(NULL) } setdiff( unique(c("estimate_density", "see_estimate_density", class(out))), c("estimate_density_df", "see_estimate_density_df") ) } bayestestR/R/reexports.R0000644000176200001440000000044215052646230014761 0ustar liggesusers# DO NOT REMOVE # Re-exported generics for which the current package defines S3 methods #' @importFrom insight print_html #' @export insight::print_html #' @importFrom insight print_md #' @export insight::print_md #' @importFrom insight display #' @export insight::display bayestestR/R/hdi.R0000644000176200001440000004266115052646230013503 0ustar liggesusers#' Highest Density Interval (HDI) #' #' Compute the **Highest Density Interval (HDI)** of posterior distributions. #' All points within this interval have a higher probability density than points #' outside the interval. The HDI can be used in the context of uncertainty #' characterisation of posterior distributions as **Credible Interval (CI)**. #' #' @param x Vector representing a posterior distribution, or a data frame of such #' vectors. Can also be a Bayesian model. **bayestestR** supports a wide range #' of models (see, for example, `methods("hdi")`) and not all of those are #' documented in the 'Usage' section, because methods for other classes mostly #' resemble the arguments of the `.numeric` or `.data.frame`methods. #' @param ci Value or vector of probability of the (credible) interval - CI #' (between 0 and 1) to be estimated. Default to `.95` (`95%`). #' @param component Which type of parameters to return, such as parameters for #' the conditional model, the zero-inflated part of the model, the dispersion #' term, etc. See details in section _Model Components_. May be abbreviated. #' Note that the *conditional* component also refers to the *count* or *mean* #' component - names may differ, depending on the modeling package. There are #' three convenient shortcuts (not applicable to *all* model classes): #' - `component = "all"` returns all possible parameters. #' - If `component = "location"`, location parameters such as `conditional`, #' `zero_inflated`, `smooth_terms`, or `instruments` are returned (everything #' that are fixed or random effects - depending on the `effects` argument - #' but no auxiliary parameters). #' - For `component = "distributional"` (or `"auxiliary"`), components like #' `sigma`, `dispersion`, `beta` or `precision` (and other auxiliary #' parameters) are returned. #' @param parameters Regular expression pattern that describes the parameters #' that should be returned. Meta-parameters (like `lp__` or `prior_`) are #' filtered by default, so only parameters that typically appear in the #' `summary()` are returned. Use `parameters` to select specific parameters #' for the output. #' @param use_iterations Logical, if `TRUE` and `x` is a `get_predicted` object, #' (returned by [`insight::get_predicted()`]), the function is applied to the #' iterations instead of the predictions. This only applies to models that return #' iterations for predicted values (e.g., `brmsfit` models). #' @param verbose Toggle off warnings. #' @param ... Currently not used. #' @inheritParams insight::get_parameters.BFBayesFactor #' @inheritParams insight::get_parameters #' #' @note There is also a [`plot()`-method](https://easystats.github.io/see/articles/bayestestR.html) implemented in the \href{https://easystats.github.io/see/}{\pkg{see}-package}. #' #' @section Model components: #' #' Possible values for the `component` argument depend on the model class. #' Following are valid options: #' - `"all"`: returns all model components, applies to all models, but will only #' have an effect for models with more than just the conditional model #' component. #' - `"conditional"`: only returns the conditional component, i.e. "fixed #' effects" terms from the model. Will only have an effect for models with #' more than just the conditional model component. #' - `"smooth_terms"`: returns smooth terms, only applies to GAMs (or similar #' models that may contain smooth terms). #' - `"zero_inflated"` (or `"zi"`): returns the zero-inflation component. #' - `"location"`: returns location parameters such as `conditional`, #' `zero_inflated`, or `smooth_terms` (everything that are fixed or random #' effects - depending on the `effects` argument - but no auxiliary #' parameters). #' - `"distributional"` (or `"auxiliary"`): components like `sigma`, #' `dispersion`, `beta` or `precision` (and other auxiliary parameters) are #' returned. #' #' For models of class `brmsfit` (package **brms**), even more options are #' possible for the `component` argument, which are not all documented in detail #' here. See also [`?insight::find_parameters`](https://easystats.github.io/insight/reference/find_parameters.BGGM.html). #' #' @details Unlike equal-tailed intervals (see [`eti()`]) that typically exclude #' `2.5%` from each tail of the distribution and always include the median, the #' HDI is *not* equal-tailed and therefore always includes the mode(s) of posterior #' distributions. While this can be useful to better represent the credibility #' mass of a distribution, the HDI also has some limitations. See [`spi()`] for #' details. #' #' The [`95%` or `89%` Credible Intervals (CI)](https://easystats.github.io/bayestestR/articles/credible_interval.html) #' are two reasonable ranges to characterize the uncertainty related to the #' estimation (see [here](https://easystats.github.io/bayestestR/articles/credible_interval.html) #' for a discussion about the differences between these two values). #' #' The `89%` intervals (`ci = 0.89`) are deemed to be more stable than, for #' instance, `95%` intervals (_Kruschke, 2014_). An effective sample size #' of at least 10.000 is recommended if one wants to estimate `95%` intervals #' with high precision (_Kruschke, 2014, p. 183ff_). Unfortunately, the #' default number of posterior samples for most Bayes packages (e.g., `rstanarm` #' or `brms`) is only 4.000 (thus, you might want to increase it when fitting #' your model). Moreover, 89 indicates the arbitrariness of interval limits - #' its only remarkable property is being the highest prime number that does not #' exceed the already unstable `95%` threshold (_McElreath, 2015_). #' #' However, `95%` has some [advantages too](https://easystats.github.io/blog/posts/bayestestr_95/). #' For instance, it shares (in the case of a normal posterior distribution) an #' intuitive relationship with the standard deviation and it conveys a more #' accurate image of the (artificial) bounds of the distribution. Also, because #' it is wider, it makes analyses more conservative (i.e., the probability of #' covering zero is larger for the `95%` CI than for lower ranges such as `89%`), #' which is a good thing in the context of the reproducibility crisis. #' #' A `95%` equal-tailed interval (ETI) has `2.5%` of the distribution on either #' side of its limits. It indicates the 2.5th percentile and the 97.5th #' percentile. In symmetric distributions, the two methods of computing credible #' intervals, the ETI and the [HDI][hdi], return similar results. #' #' This is not the case for skewed distributions. Indeed, it is possible that #' parameter values in the ETI have lower credibility (are less probable) than #' parameter values outside the ETI. This property seems undesirable as a summary #' of the credible values in a distribution. #' #' On the other hand, the ETI range does change when transformations are applied #' to the distribution (for instance, for a log odds scale to probabilities): #' the lower and higher bounds of the transformed distribution will correspond #' to the transformed lower and higher bounds of the original distribution. #' On the contrary, applying transformations to the distribution will change #' the resulting HDI. #' #' @inherit ci return #' #' @family ci #' @seealso Other interval functions, such as [`hdi()`], [`eti()`], [`bci()`], #' [`spi()`], [`si()`]. #' #' @examplesIf all(insight::check_if_installed(c("rstanarm", "brms", "emmeans", "BayesFactor"), quietly = TRUE)) #' library(bayestestR) #' #' posterior <- rnorm(1000) #' hdi(posterior, ci = 0.89) #' hdi(posterior, ci = c(0.80, 0.90, 0.95)) #' #' hdi(iris[1:4]) #' hdi(iris[1:4], ci = c(0.80, 0.90, 0.95)) #' \donttest{ #' model <- suppressWarnings( #' rstanarm::stan_glm(mpg ~ wt + gear, data = mtcars, chains = 2, iter = 200, refresh = 0) #' ) #' hdi(model) #' hdi(model, ci = c(0.80, 0.90, 0.95)) #' #' hdi(emmeans::emtrends(model, ~1, "wt", data = mtcars)) #' #' model <- brms::brm(mpg ~ wt + cyl, data = mtcars) #' hdi(model) #' hdi(model, ci = c(0.80, 0.90, 0.95)) #' #' bf <- BayesFactor::ttestBF(x = rnorm(100, 1, 1)) #' hdi(bf) #' hdi(bf, ci = c(0.80, 0.90, 0.95)) #' } #' @author Credits go to **ggdistribute** and [**HDInterval**](https://github.com/mikemeredith/HDInterval). #' #' @references #' - Kruschke, J. (2014). Doing Bayesian data analysis: A tutorial with R, JAGS, #' and Stan. Academic Press. #' - McElreath, R. (2015). Statistical rethinking: A Bayesian course with #' examples in R and Stan. Chapman and Hall/CRC. #' #' @export hdi <- function(x, ...) { UseMethod("hdi") } #' @export hdi.default <- function(x, ...) { insight::format_error(paste0( "'hdi()' is not yet implemented for objects of class '", class(x)[1], "'." )) } #' @rdname hdi #' @export hdi.numeric <- function(x, ci = 0.95, verbose = TRUE, ...) { out <- do.call(rbind, lapply(ci, function(i) { .hdi(x, ci = i, verbose = verbose) })) class(out) <- unique(c("bayestestR_hdi", "see_hdi", "bayestestR_ci", "see_ci", class(out))) attr(out, "data") <- x out } #' @rdname hdi #' @inheritParams p_direction #' @export hdi.data.frame <- function(x, ci = 0.95, rvar_col = NULL, verbose = TRUE, ...) { obj_name <- insight::safe_deparse_symbol(substitute(x)) x_rvar <- .possibly_extract_rvar_col(x, rvar_col) if (length(x_rvar) > 0L) { cl <- match.call() cl[[1]] <- bayestestR::hdi cl$x <- x_rvar cl$rvar_col <- NULL out <- eval.parent(cl) attr(out, "object_name") <- sprintf('%s[["%s"]]', obj_name, rvar_col) return(.append_datagrid(out, x, long = length(ci) > 1L)) } dat <- .compute_interval_dataframe(x = x, ci = ci, verbose = verbose, fun = "hdi") attr(dat, "object_name") <- obj_name dat } #' @export hdi.draws <- function(x, ci = 0.95, verbose = TRUE, ...) { dat <- .compute_interval_dataframe( x = .posterior_draws_to_df(x), ci = ci, verbose = verbose, fun = "hdi" ) attr(dat, "object_name") <- insight::safe_deparse_symbol(substitute(x)) dat } #' @export hdi.rvar <- hdi.draws #' @export hdi.MCMCglmm <- function(x, ci = 0.95, verbose = TRUE, ...) { ci_fun <- .check_ci_fun(list(...)) nF <- x$Fixed$nfl d <- as.data.frame(x$Sol[, 1:nF, drop = FALSE]) dat <- .compute_interval_dataframe(x = d, ci = ci, verbose = verbose, fun = ci_fun) attr(dat, "data") <- insight::safe_deparse_symbol(substitute(x)) dat } #' @export hdi.bamlss <- function(x, ci = 0.95, component = "all", verbose = TRUE, ...) { ci_fun <- .check_ci_fun(list(...)) d <- insight::get_parameters(x, component = component) dat <- .compute_interval_dataframe(x = d, ci = ci, verbose = verbose, fun = ci_fun) dat <- .add_clean_parameters_attribute(dat, x) attr(dat, "data") <- insight::safe_deparse_symbol(substitute(x)) dat } #' @export hdi.mcmc <- function(x, ci = 0.95, verbose = TRUE, ...) { ci_fun <- .check_ci_fun(list(...)) d <- as.data.frame(x) dat <- .compute_interval_dataframe(x = d, ci = ci, verbose = verbose, fun = ci_fun) attr(dat, "data") <- insight::safe_deparse_symbol(substitute(x)) dat } #' @export hdi.bcplm <- function(x, ci = 0.95, verbose = TRUE, ...) { ci_fun <- .check_ci_fun(list(...)) d <- insight::get_parameters(x) dat <- .compute_interval_dataframe(x = d, ci = ci, verbose = verbose, fun = ci_fun) attr(dat, "data") <- insight::safe_deparse_symbol(substitute(x)) dat } #' @export hdi.bayesQR <- hdi.bcplm #' @export hdi.blrm <- hdi.bcplm #' @export hdi.mcmc.list <- hdi.bcplm #' @export hdi.BGGM <- hdi.bcplm #' @export hdi.sim.merMod <- function(x, ci = 0.95, effects = "fixed", parameters = NULL, verbose = TRUE, ...) { ci_fun <- .check_ci_fun(list(...)) dat <- .compute_interval_simMerMod( x = x, ci = ci, effects = effects, parameters = parameters, verbose = verbose, fun = ci_fun ) out <- dat$result attr(out, "data") <- dat$data out } #' @export hdi.sim <- function(x, ci = 0.95, parameters = NULL, verbose = TRUE, ...) { ci_fun <- .check_ci_fun(list(...)) dat <- .compute_interval_sim( x = x, ci = ci, parameters = parameters, verbose = verbose, fun = ci_fun ) out <- dat$result attr(out, "data") <- dat$data out } #' @export hdi.emmGrid <- function(x, ci = 0.95, verbose = TRUE, ...) { xdf <- insight::get_parameters(x) out <- hdi(xdf, ci = ci, verbose = verbose, ...) out <- .append_datagrid(out, x, long = length(ci) > 1L) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) out } #' @export hdi.emm_list <- hdi.emmGrid #' @export hdi.slopes <- function(x, ci = 0.95, verbose = TRUE, ...) { xrvar <- .get_marginaleffects_draws(x) out <- hdi(xrvar, ci = ci, verbose = verbose, ...) out <- .append_datagrid(out, x, long = length(ci) > 1L) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) out } #' @export hdi.comparisons <- hdi.slopes #' @export hdi.predictions <- hdi.slopes #' @export hdi.stanreg <- function(x, ci = 0.95, effects = "fixed", component = "location", parameters = NULL, verbose = TRUE, ...) { cleaned_parameters <- .get_cleaned_parameters(x, ...) out <- .prepare_output( hdi( insight::get_parameters( x, effects = effects, component = component, parameters = parameters ), ci = ci, verbose = verbose, ... ), cleaned_parameters, inherits(x, "stanmvreg") ) attr(out, "clean_parameters") <- cleaned_parameters attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) class(out) <- unique(c("bayestestR_hdi", "see_hdi", class(out))) out } #' @export hdi.stanfit <- hdi.stanreg #' @export hdi.blavaan <- hdi.stanreg #' @rdname hdi #' @export hdi.brmsfit <- function(x, ci = 0.95, effects = "fixed", component = "conditional", parameters = NULL, verbose = TRUE, ...) { cleaned_parameters <- .get_cleaned_parameters(x, ...) out <- .prepare_output( hdi( insight::get_parameters( x, effects = effects, component = component, parameters = parameters ), ci = ci, verbose = verbose, ... ), cleaned_parameters ) attr(out, "clean_parameters") <- cleaned_parameters attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) class(out) <- unique(c("bayestestR_hdi", "see_hdi", class(out))) out } #' @export hdi.BFBayesFactor <- function(x, ci = 0.95, verbose = TRUE, ...) { out <- hdi(insight::get_parameters(x), ci = ci, verbose = verbose, ...) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) out } #' @rdname hdi #' @export hdi.get_predicted <- function(x, ci = 0.95, use_iterations = FALSE, verbose = TRUE, ...) { if (isTRUE(use_iterations)) { if ("iterations" %in% names(attributes(x))) { out <- hdi(as.data.frame(t(attributes(x)$iterations)), ci = ci, verbose = verbose, ...) } else { insight::format_error("No iterations present in the output.") } attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) } else { out <- hdi(as.numeric(x), ci = ci, verbose = verbose, ...) } out } # Helper ------------------------------------------------------------------ #' @keywords internal .hdi <- function(x, ci = 0.95, verbose = TRUE) { check_ci <- .check_ci_argument(x, ci, verbose) if (!is.null(check_ci)) { return(check_ci) } # removes NA/NaN, but not Inf x_sorted <- unname(sort.int(x, method = "quick")) # See https://github.com/easystats/bayestestR/issues/39 window_size <- ceiling(ci * length(x_sorted)) if (window_size < 2) { if (verbose) { insight::format_alert("`ci` is too small or x does not contain enough data points, returning NAs.") } return(data.frame( CI = ci, CI_low = NA, CI_high = NA )) } nCIs <- length(x_sorted) - window_size if (nCIs < 1) { if (verbose) { insight::format_alert("`ci` is too large or x does not contain enough data points, returning NAs.") } return(data.frame( CI = ci, CI_low = NA, CI_high = NA )) } ci.width <- sapply(1:nCIs, function(.x) x_sorted[.x + window_size] - x_sorted[.x]) # find minimum of width differences, check for multiple minima min_i <- which(ci.width == min(ci.width)) n_candies <- length(min_i) if (n_candies > 1) { if (any(diff(sort(min_i)) != 1)) { if (verbose) { insight::format_alert("Identical densities found along different segments of the distribution, choosing rightmost.") } min_i <- max(min_i) } else { min_i <- floor(mean(min_i)) } } data.frame( CI = ci, CI_low = x_sorted[min_i], CI_high = x_sorted[min_i + window_size] ) } bayestestR/R/overlap.R0000644000176200001440000000511214706241121014370 0ustar liggesusers#' Overlap Coefficient #' #' A method to calculate the overlap coefficient between two empirical #' distributions (that can be used as a measure of similarity between two #' samples). #' #' @param x Vector of x values. #' @param y Vector of x values. #' @param method_auc Area Under the Curve (AUC) estimation method. See [area_under_curve()]. #' @param method_density Density estimation method. See [estimate_density()]. #' @inheritParams estimate_density #' #' @examples #' library(bayestestR) #' #' x <- distribution_normal(1000, 2, 0.5) #' y <- distribution_normal(1000, 0, 1) #' #' overlap(x, y) #' plot(overlap(x, y)) #' @export overlap <- function(x, y, method_density = "kernel", method_auc = "trapezoid", precision = 2^10, extend = TRUE, extend_scale = 0.1, ...) { # Generate densities dx <- estimate_density( x, method = method_density, precision = precision, extend = extend, extend_scale = extend_scale, ... ) dy <- estimate_density( y, method = method_density, precision = precision, extend = extend, extend_scale = extend_scale, ... ) # Create density estimation functions fx <- stats::approxfun(dx$x, dx$y, method = "linear", rule = 2) fy <- stats::approxfun(dy$x, dy$y, method = "linear", rule = 2) x_axis <- seq(min(c(dx$x, dy$x)), max(c(dx$x, dy$x)), length.out = precision) approx_data <- data.frame(x = x_axis, y1 = fx(x_axis), y2 = fy(x_axis)) # calculate intersection densities approx_data$intersection <- pmin(approx_data$y1, approx_data$y2) approx_data$exclusion <- pmax(approx_data$y1, approx_data$y2) # integrate areas under curves area_intersection <- area_under_curve( approx_data$x, approx_data$intersection, method = method_auc ) # area_exclusion <- area_under_curve(data$x, data$exclusion, method = method_auc) # compute overlap coefficient overlap <- area_intersection attr(overlap, "data") <- approx_data class(overlap) <- c("overlap", class(overlap)) overlap } #' @export print.overlap <- function(x, ...) { insight::print_color("# Overlap\n\n", "blue") cat(sprintf("%.1f%%\n", 100 * as.numeric(x))) } #' @export plot.overlap <- function(x, ...) { # Can be improved through see plot_data <- attributes(x)$data graphics::plot(plot_data$x, plot_data$exclusion, type = "l") graphics::polygon(plot_data$x, plot_data$intersection, col = "red") } bayestestR/R/simulate_data.R0000644000176200001440000001116214746106624015552 0ustar liggesusers#' Data Simulation #' #' Simulate data with specific characteristics. #' #' @param n The number of observations to be generated. #' @param r A value or vector corresponding to the desired correlation #' coefficients. #' @param d A value or vector corresponding to the desired difference between #' the groups. #' @param mean A value or vector corresponding to the mean of the variables. #' @param sd A value or vector corresponding to the SD of the variables. #' @param names A character vector of desired variable names. #' @param ... Arguments passed to or from other methods. #' @examplesIf requireNamespace("MASS", quietly = TRUE) #' #' # Correlation -------------------------------- #' data <- simulate_correlation(r = 0.5) #' plot(data$V1, data$V2) #' cor.test(data$V1, data$V2) #' summary(lm(V2 ~ V1, data = data)) #' #' # Specify mean and SD #' data <- simulate_correlation(r = 0.5, n = 50, mean = c(0, 1), sd = c(0.7, 1.7)) #' cor.test(data$V1, data$V2) #' round(c(mean(data$V1), sd(data$V1)), 1) #' round(c(mean(data$V2), sd(data$V2)), 1) #' summary(lm(V2 ~ V1, data = data)) #' #' # Generate multiple variables #' cor_matrix <- matrix( #' c( #' 1.0, 0.2, 0.4, #' 0.2, 1.0, 0.3, #' 0.4, 0.3, 1.0 #' ), #' nrow = 3 #' ) #' #' data <- simulate_correlation(r = cor_matrix, names = c("y", "x1", "x2")) #' cor(data) #' summary(lm(y ~ x1, data = data)) #' #' # t-test -------------------------------- #' data <- simulate_ttest(n = 30, d = 0.3) #' plot(data$V1, data$V0) #' round(c(mean(data$V1), sd(data$V1)), 1) #' diff(t.test(data$V1 ~ data$V0)$estimate) #' summary(lm(V1 ~ V0, data = data)) #' summary(glm(V0 ~ V1, data = data, family = "binomial")) #' #' # Difference -------------------------------- #' data <- simulate_difference(n = 30, d = 0.3) #' plot(data$V1, data$V0) #' round(c(mean(data$V1), sd(data$V1)), 1) #' diff(t.test(data$V1 ~ data$V0)$estimate) #' summary(lm(V1 ~ V0, data = data)) #' summary(glm(V0 ~ V1, data = data, family = "binomial")) #' @export simulate_correlation <- function(n = 100, r = 0.5, mean = 0, sd = 1, names = NULL, ...) { insight::check_if_installed("MASS") # Define matrix if (is.matrix(r)) { if (isSymmetric(r)) { if (any(r > 1)) { insight::format_error("`r` should only contain values between -1 and 1.") } else { dispersion <- r } } else { insight::format_error("`r` should be a symetric matrix (relative to the diagonal).") } } else if (length(r) == 1L) { if (abs(r) > 1) { insight::format_error("`r` should only contain values between -1 and 1.") } else { dispersion <- matrix(c(1, r, r, 1), nrow = 2) } } else { insight::format_error("`r` should be a value (e.g., r = 0.5) or a square matrix.") } # Get data out <- MASS::mvrnorm( n = n, mu = rep_len(0, ncol(dispersion)), # Means of variables Sigma = dispersion, empirical = TRUE ) # Adjust scale if (any(sd != 1)) { out <- t(t(out) * rep_len(sd, ncol(dispersion))) } # Adjust mean if (any(mean != 0)) { out <- t(t(out) + rep_len(mean, ncol(dispersion))) } out <- as.data.frame(out) # Rename if (!is.null(names) && length(names) == ncol(out)) { names(out) <- names } out } #' @rdname simulate_correlation #' @export simulate_ttest <- function(n = 100, d = 0.5, names = NULL, ...) { x <- distribution_normal(n, 0, 1) # Continuous variables z <- 0 + d * x # Linear combination pr <- 1 / (1 + exp(-z)) # Pass it through an inverse logit function y <- distribution_binomial(n, 1, pr, random = 3) # Bernoulli response variable out <- data.frame(y = as.factor(y), x = x) names(out) <- paste0("V", 0:(ncol(out) - 1)) if (!is.null(names) && length(names) == ncol(out)) { names(out) <- names } out } #' @rdname simulate_correlation #' @export simulate_difference <- function(n = 100, d = 0.5, names = NULL, ...) { x <- distribution_normal(round(n / 2), -d / 2, 1) y <- distribution_normal(round(n / 2), d / 2, 1) out <- data.frame( y = as.factor(rep(c(0, 1), each = round(n / 2))), x = c(x, y) ) names(out) <- paste0("V", 0:(ncol(out) - 1)) if (!is.null(names) && length(names) == ncol(out)) { names(out) <- names } out } # Simulate regression: see https://stats.stackexchange.com/questions/363623/simulate-regression-with-specified-standardized-coefficients/508107#508107 bayestestR/R/sexit.R0000644000176200001440000003323514746106624014077 0ustar liggesusers#' Sequential Effect eXistence and sIgnificance Testing (SEXIT) #' #' @description #' #' The SEXIT is a new framework to describe Bayesian effects, guiding which #' indices to use. Accordingly, the `sexit()` function returns the minimal (and #' optimal) required information to describe models' parameters under a Bayesian #' framework. It includes the following indices: #' #' - Centrality: the median of the posterior distribution. In #' probabilistic terms, there is `50%` of probability that the effect is higher #' and lower. See [`point_estimate()`][point_estimate]. #' #' - Uncertainty: the `95%` Highest Density Interval (HDI). In #' probabilistic terms, there is `95%` of probability that the effect is #' within this confidence interval. See [`ci()`][ci]. #' #' - Existence: The probability of direction allows to quantify the #' certainty by which an effect is positive or negative. It is a critical #' index to show that an effect of some manipulation is not harmful (for #' instance in clinical studies) or to assess the direction of a link. See #' [`p_direction()`][p_direction]. #' #' - Significance: Once existence is demonstrated with high certainty, we #' can assess whether the effect is of sufficient size to be considered as #' significant (i.e., not negligible). This is a useful index to determine #' which effects are actually important and worthy of discussion in a given #' process. See [`p_significance()`][p_significance]. #' #' - Size: Finally, this index gives an idea about the strength of an #' effect. However, beware, as studies have shown that a big effect size can #' be also suggestive of low statistical power (see details section). #' #' @inheritParams p_direction #' @inheritParams hdi #' @param significant,large The threshold values to use for significant and #' large probabilities. If left to 'default', will be selected through #' [`sexit_thresholds()`][sexit_thresholds]. See the details section below. #' #' @details #' #' \subsection{Rationale}{ #' The assessment of "significance" (in its broadest meaning) is a pervasive #' issue in science, and its historical index, the p-value, has been strongly #' criticized and deemed to have played an important role in the replicability #' crisis. In reaction, more and more scientists have tuned to Bayesian methods, #' offering an alternative set of tools to answer their questions. However, the #' Bayesian framework offers a wide variety of possible indices related to #' "significance", and the debate has been raging about which index is the best, #' and which one to report. #' #' This situation can lead to the mindless reporting of all possible indices #' (with the hopes that with that the reader will be satisfied), but often #' without having the writer understanding and interpreting them. It is indeed #' complicated to juggle between many indices with complicated definitions and #' subtle differences. #' #' SEXIT aims at offering a practical framework for Bayesian effects reporting, #' in which the focus is put on intuitiveness, explicitness and usefulness of #' the indices' interpretation. To that end, we suggest a system of description #' of parameters that would be intuitive, easy to learn and apply, #' mathematically accurate and useful for taking decision. #' #' Once the thresholds for significance (i.e., the ROPE) and the one for a #' "large" effect are explicitly defined, the SEXIT framework does not make any #' interpretation, i.e., it does not label the effects, but just sequentially #' gives 3 probabilities (of direction, of significance and of being large, #' respectively) as-is on top of the characteristics of the posterior (using the #' median and HDI for centrality and uncertainty description). Thus, it provides #' a lot of information about the posterior distribution (through the mass of #' different 'sections' of the posterior) in a clear and meaningful way. #' } #' #' \subsection{Threshold selection}{ #' One of the most important thing about the SEXIT framework is that it relies #' on two "arbitrary" thresholds (i.e., that have no absolute meaning). They #' are the ones related to effect size (an inherently subjective notion), #' namely the thresholds for significant and large effects. They are set, by #' default, to `0.05` and `0.3` of the standard deviation of the outcome #' variable (tiny and large effect sizes for correlations according to Funder #' and Ozer, 2019). However, these defaults were chosen by lack of a better #' option, and might not be adapted to your case. Thus, they are to be handled #' with care, and the chosen thresholds should always be explicitly reported #' and justified. #' #' - For **linear models (lm)**, this can be generalised to \ifelse{html}{\out{0.05 * SDy}}{\eqn{[0.05*SD_{y}]}} and \ifelse{html}{\out{0.3 * SDy}}{\eqn{[0.3*SD_{y}]}} for significant and large effects, respectively. #' - For **logistic models**, the parameters expressed in log odds ratio can be converted to standardized difference through the formula \ifelse{html}{\out{π/√(3)}}{\eqn{\pi/\sqrt{3}}}, resulting a threshold of `0.09` and `0.54`. #' - For other models with **binary outcome**, it is strongly recommended to manually specify the rope argument. Currently, the same default is applied that for logistic models. #' - For models from **count data**, the residual variance is used. This is a rather experimental threshold and is probably often similar to `0.05` and `0.3`, but should be used with care! #' - For **t-tests**, the standard deviation of the response is used, similarly to linear models (see above). #' - For **correlations**,`0.05` and `0.3` are used. #' - For all other models, `0.05` and `0.3` are used, but it is strongly advised to specify it manually. #' } #' #' \subsection{Examples}{ #' The three values for existence, significance and size provide a useful #' description of the posterior distribution of the effects. Some possible #' scenarios include: #' #' - The probability of existence is low, but the probability of being large is high: it suggests that the posterior is very wide (covering large territories on both side of 0). The statistical power might be too low, which should warrant any confident conclusion. #' - The probability of existence and significance is high, but the probability of being large is very small: it suggests that the effect is, with high confidence, not large (the posterior is mostly contained between the significance and the large thresholds). #' - The 3 indices are very low: this suggests that the effect is null with high confidence (the posterior is closely centred around 0). #' } #' #' @return A dataframe and text as attribute. #' #' @references #' #' - Makowski, D., Ben-Shachar, M. S., & Lüdecke, D. (2019). bayestestR: #' Describing Effects and their Uncertainty, Existence and Significance within #' the Bayesian Framework. Journal of Open Source Software, 4(40), 1541. \doi{10.21105/joss.01541} #' #' - Makowski D, Ben-Shachar MS, Chen SHA, Lüdecke D (2019) Indices of Effect #' Existence and Significance in the Bayesian Framework. Frontiers in Psychology #' 2019;10:2767. \doi{10.3389/fpsyg.2019.02767} #' #' @examples #' \donttest{ #' library(bayestestR) #' #' s <- sexit(rnorm(1000, -1, 1)) #' s #' print(s, summary = TRUE) #' #' s <- sexit(iris) #' s #' print(s, summary = TRUE) #' #' if (require("rstanarm")) { #' model <- suppressWarnings(rstanarm::stan_glm(mpg ~ wt * cyl, #' data = mtcars, #' iter = 400, refresh = 0 #' )) #' s <- sexit(model) #' s #' print(s, summary = TRUE) #' } #' } #' @export sexit <- function(x, significant = "default", large = "default", ci = 0.95, ...) { thresholds <- .sexit_preprocess(x, significant, large, ...) significant <- thresholds$significant large <- thresholds$large thresholds_text <- thresholds$text # Description centrality <- point_estimate(x, "median") centrality$Effects <- centrality$Component <- NULL centrality_text <- paste0("Median = ", insight::format_value(centrality$Median)) direction <- ifelse(centrality$Median < 0, "negative", "positive") uncertainty <- ci(x, ci = ci, method = "ETI", ...)[c("CI", "CI_low", "CI_high")] uncertainty_text <- insight::format_ci(uncertainty$CI_low, uncertainty$CI_high, uncertainty$CI) # Indices existence_rez <- as.numeric(p_direction(x, ...)) existence_value <- insight::format_value(existence_rez, as_percent = TRUE) existence_threshold <- ifelse(direction == "negative", "< 0", "> 0") sig_rez <- as.numeric(p_significance(x, threshold = significant, ...)) sig_value <- insight::format_value(sig_rez, as_percent = TRUE) sig_threshold <- ifelse(direction == "negative", -1 * significant, significant) sig_threshold <- paste0(ifelse(direction == "negative", "< ", "> "), insight::format_value(sig_threshold)) large_rez <- as.numeric(p_significance(x, threshold = large, ...)) large_value <- insight::format_value(large_rez, as_percent = TRUE) large_threshold <- ifelse(direction == "negative", -1 * large, large) large_threshold <- paste0(ifelse(direction == "negative", "< ", "> "), insight::format_value(large_threshold)) if ("Parameter" %in% names(centrality)) { parameters <- centrality$Parameter } else { parameters <- "The effect" } text_full <- paste0( parameters, " (", centrality_text, ", ", uncertainty_text, ") has a ", existence_value, " probability of being ", direction, " (", existence_threshold, "), ", sig_value, " of being significant (", sig_threshold, "), and ", large_value, " of being large (", large_threshold, ")" ) text_short <- paste0( parameters, " (", centrality_text, ", ", uncertainty_text, ") has ", existence_value, ", ", sig_value, " and ", large_value, " probability of being ", direction, " (", existence_threshold, "), significant (", sig_threshold, ") and large (", large_threshold, ")" ) out <- cbind( centrality, as.data.frame(uncertainty), data.frame(Direction = existence_rez), data.frame(Significance = sig_rez), data.frame(Large = large_rez) ) # Prepare output attr(out, "sexit_info") <- "Following the Sequential Effect eXistence and sIgnificance Testing (SEXIT) framework, we report the median of the posterior distribution and its 95% CI (Highest Density Interval), along the probability of direction (pd), the probability of significance and the probability of being large." attr(out, "sexit_ci_method") <- "ETI" attr(out, "sexit_significance") <- significant attr(out, "sexit_large") <- large attr(out, "sexit_textlong") <- text_full attr(out, "sexit_textshort") <- text_short attr(out, "sexit_thresholds") <- thresholds_text pretty_cols <- c( "Median", paste0(insight::format_value(ci * 100, protect_integers = TRUE), "% CI"), "Direction", paste0("Significance (> |", insight::format_value(significant), "|)"), paste0("Large (> |", insight::format_value(large), "|)") ) if ("Parameter" %in% names(out)) pretty_cols <- c("Parameter", pretty_cols) attr(out, "pretty_cols") <- pretty_cols attr(out, "data") <- x class(out) <- unique(c("sexit", "see_sexit", class(out))) out } #' @keywords internal .sexit_preprocess <- function(x, significant = "default", large = "default", ...) { thresholds <- sexit_thresholds(x) if (significant == "default") significant <- thresholds[1] if (large == "default") large <- thresholds[2] suppressWarnings({ resp <- .safe(insight::get_response(x, type = "mf")) }) suppressWarnings({ info <- .safe(insight::model_info(x, verbose = FALSE)) }) if (!is.null(resp) && !is.null(info) && info$is_linear) { sd1 <- significant / stats::sd(resp, na.rm = TRUE) sd2 <- large / stats::sd(resp, na.rm = TRUE) text_sd <- paste0( " (corresponding respectively to ", insight::format_value(sd1), " and ", insight::format_value(sd2), " of the outcome's SD)" ) } else { text_sd <- "" } thresholds <- paste0( "The thresholds beyond which the effect is considered ", "as significant (i.e., non-negligible) and large are |", insight::format_value(significant), "| and |", insight::format_value(large), "|", text_sd, "." ) list(significant = significant, large = large, text = thresholds) } #' @export print.sexit <- function(x, summary = FALSE, digits = 2, ...) { orig_x <- x # Long if (isFALSE(summary)) { insight::print_color(paste0("# ", attributes(x)$sexit_info, " ", attributes(x)$sexit_thresholds, "\n\n"), "blue") text <- attributes(x)$sexit_textlong if (length(text) > 1) text <- paste0(paste0("- ", text), collapse = "\n") insight::print_color(text, "yellow") cat("\n\n") df <- data.frame(Median = x$Median, CI = insight::format_ci(x$CI_low, x$CI_high, NULL)) if ("Parameter" %in% names(x)) { df <- cbind(data.frame(Parameter = x$Parameter), df, x[c("Direction", "Significance", "Large")]) } else { df <- cbind(df, x[c("Direction", "Significance", "Large")]) } names(df) <- attributes(x)$pretty_cols .print_data_frame(df, digits = digits, ...) # Short } else { insight::print_color(paste0("# ", attributes(x)$sexit_thresholds, "\n\n"), "blue") text <- attributes(x)$sexit_textshort if (length(text) > 1) text <- paste0(paste0("- ", text), collapse = "\n") cat(text) } invisible(orig_x) } bayestestR/R/describe_posterior.R0000644000176200001440000013774615052646230016636 0ustar liggesusers#' Describe Posterior Distributions #' #' Compute indices relevant to describe and characterize the posterior distributions. #' #' @param posterior A vector, data frame or model of posterior draws. #' **bayestestR** supports a wide range of models (see `methods("describe_posterior")`) #' and not all of those are documented in the 'Usage' section, because methods #' for other classes mostly resemble the arguments of the `.numeric` method. #' @param ci_method The type of index used for Credible Interval. Can be `"ETI"` #' (default, see [`eti()`]), `"HDI"` (see [`hdi()`]), `"BCI"` (see [`bci()`]), #' `"SPI"` (see [`spi()`]), or `"SI"` (see [`si()`]). #' @param test The indices of effect existence to compute. Character (vector) or #' list with one or more of these options: `"p_direction"` (or `"pd"`), #' `"rope"`, `"p_map"`, `"p_significance"` (or `"ps"`), `"p_rope"`, #' `"equivalence_test"` (or `"equitest"`), `"bayesfactor"` (or `"bf"`) or #' `"all"` to compute all tests. For each "test", the corresponding #' **bayestestR** function is called (e.g. [`rope()`] or [`p_direction()`]) #' and its results included in the summary output. #' @param rope_range ROPE's lower and higher bounds. Should be a vector of two #' values (e.g., `c(-0.1, 0.1)`), `"default"` or a list of numeric vectors of #' the same length as numbers of parameters. If `"default"`, the bounds are #' set to `x +- 0.1*SD(response)`. #' @param rope_ci The Credible Interval (CI) probability, corresponding to the #' proportion of HDI, to use for the percentage in ROPE. #' @param keep_iterations If `TRUE`, will keep all iterations (draws) of #' bootstrapped or Bayesian models. They will be added as additional columns #' named `iter_1, iter_2, ...`. You can reshape them to a long format by #' running [`reshape_iterations()`]. #' @param bf_prior Distribution representing a prior for the computation of #' Bayes factors / SI. Used if the input is a posterior, otherwise (in the #' case of models) ignored. #' @param priors Add the prior used for each parameter. #' #' @inheritParams point_estimate #' @inheritParams ci #' @inheritParams si #' @inheritParams hdi #' #' @inheritSection hdi Model components #' #' @details #' One or more components of point estimates (like posterior mean or median), #' intervals and tests can be omitted from the summary output by setting the #' related argument to `NULL`. For example, `test = NULL` and `centrality = #' NULL` would only return the HDI (or CI). #' #' @references #' - Makowski, D., Ben-Shachar, M. S., Chen, S. H. A., and Lüdecke, D. (2019). #' *Indices of Effect Existence and Significance in the Bayesian Framework*. #' Frontiers in Psychology 2019;10:2767. \doi{10.3389/fpsyg.2019.02767} #' - [Region of Practical Equivalence (ROPE)](https://easystats.github.io/bayestestR/articles/region_of_practical_equivalence.html) #' - [Bayes factors](https://easystats.github.io/bayestestR/articles/bayes_factors.html) #' #' @examplesIf all(insight::check_if_installed(c("logspline", "rstanarm", "emmeans", "BayesFactor"), quietly = TRUE)) #' library(bayestestR) #' #' x <- rnorm(1000) #' describe_posterior(x, verbose = FALSE) #' describe_posterior(x, #' centrality = "all", #' dispersion = TRUE, #' test = "all", #' verbose = FALSE #' ) #' describe_posterior(x, ci = c(0.80, 0.90), verbose = FALSE) #' #' df <- data.frame(replicate(4, rnorm(100))) #' describe_posterior(df, verbose = FALSE) #' describe_posterior( #' df, #' centrality = "all", #' dispersion = TRUE, #' test = "all", #' verbose = FALSE #' ) #' describe_posterior(df, ci = c(0.80, 0.90), verbose = FALSE) #' #' df <- data.frame(replicate(4, rnorm(20))) #' head(reshape_iterations( #' describe_posterior(df, keep_iterations = TRUE, verbose = FALSE) #' )) #' #' \donttest{ #' # rstanarm models #' # ----------------------------------------------- #' model <- suppressWarnings( #' rstanarm::stan_glm( #' mpg ~ wt + gear, #' data = mtcars, chains = 2, iter = 200, #' refresh = 0 #' ) #' ) #' describe_posterior(model) #' describe_posterior(model, centrality = "all", dispersion = TRUE, test = "all") #' describe_posterior(model, ci = c(0.80, 0.90)) #' describe_posterior(model, rope_range = list(c(-10, 5), c(-0.2, 0.2), "default")) #' #' # emmeans estimates #' # ----------------------------------------------- #' describe_posterior(emmeans::emtrends(model, ~1, "wt")) #' #' # BayesFactor objects #' # ----------------------------------------------- #' bf <- BayesFactor::ttestBF(x = rnorm(100, 1, 1)) #' describe_posterior(bf) #' describe_posterior(bf, centrality = "all", dispersion = TRUE, test = "all") #' describe_posterior(bf, ci = c(0.80, 0.90)) #' } #' @export describe_posterior <- function(posterior, ...) { UseMethod("describe_posterior") } #' @export describe_posterior.default <- function(posterior, ...) { insight::format_error( paste0("`describe_posterior()` is not yet implemented for objects of class `", class(posterior)[1], "`.") ) } #' @keywords internal .describe_posterior <- function(x, centrality = "median", dispersion = FALSE, ci = 0.95, ci_method = "eti", test = c("p_direction", "rope"), rope_range = "default", rope_ci = 0.95, keep_iterations = FALSE, bf_prior = NULL, BF = 1, verbose = TRUE, ...) { if (is.null(x)) { if (verbose) { insight::format_warning("Could not extract posterior samples.") } return(NULL) } # we need this information from the original object if (.check_if_need_to_compute_rope_range(rope_range, test)) { rope_range <- rope_range(x, verbose = verbose, ...) } if (!is.data.frame(x) && !is.numeric(x)) { is_stanmvreg <- inherits(x, "stanmvreg") cleaned_parameters <- insight::clean_parameters(x) # rename to use `x` in bayes factor later x_df <- insight::get_parameters(x, ...) } else { cleaned_parameters <- NULL x_df <- x } # Arguments fixes if (!is.null(centrality) && length(centrality) == 1 && (centrality == "none" || isFALSE(centrality))) { centrality <- NULL } if (!is.null(ci) && length(ci) == 1 && (is.na(ci) || isFALSE(ci))) { ci <- NULL } if (!is.null(test) && length(test) == 1 && (test == "none" || isFALSE(test))) { test <- NULL } # Point-estimates if (is.null(centrality)) { estimates <- data.frame(Parameter = NA) } else { estimates <- .prepare_output( point_estimate( x_df, centrality = centrality, dispersion = dispersion, cleaned_parameters = cleaned_parameters, ... ), cleaned_parameters, is_stanmvreg ) if (!"Parameter" %in% names(estimates)) { estimates <- cbind( data.frame(Parameter = "Posterior", stringsAsFactors = FALSE), estimates ) } } # Uncertainty if (is.null(ci)) { uncertainty <- data.frame(Parameter = NA) } else { ci_method <- insight::validate_argument( tolower(ci_method), c("hdi", "spi", "quantile", "ci", "eti", "si", "bci", "bcai") ) # not sure why "si" requires the model object if (ci_method == "si") { uncertainty <- ci( x, BF = BF, method = ci_method, prior = bf_prior, verbose = verbose, cleaned_parameters = cleaned_parameters, ... ) } else { uncertainty <- ci( x_df, ci = ci, method = ci_method, verbose = verbose, cleaned_parameters = cleaned_parameters, ... ) } uncertainty <- .prepare_output( uncertainty, cleaned_parameters, is_stanmvreg ) if (!"Parameter" %in% names(uncertainty)) { uncertainty <- cbind( data.frame(Parameter = "Posterior", stringsAsFactors = FALSE), uncertainty ) } } # Effect Existence if (is.null(test)) { test_pd <- data.frame( Parameter = NA, Effects = NA, Component = NA, Response = NA ) test_rope <- data.frame( Parameter = NA, Effects = NA, Component = NA, Response = NA ) test_prope <- data.frame( Parameter = NA, Effects = NA, Component = NA, Response = NA ) test_psig <- data.frame( Parameter = NA, Effects = NA, Component = NA, Response = NA ) test_bf <- data.frame( Parameter = NA, Effects = NA, Component = NA, Response = NA ) test_pmap <- data.frame( Parameter = NA, Effects = NA, Component = NA, Response = NA ) } else { test <- .check_test_values(test) if ("all" %in% test) { test <- c("pd", "p_map", "p_rope", "p_significance", "rope", "equivalence", "bf") } ## TODO no BF for arm::sim if (inherits(x, c("sim", "sim.merMod", "mcmc", "stanfit"))) { test <- setdiff(test, "bf") } ## TODO enable once "rope()" works for multi-response models # no ROPE for multi-response models if (insight::is_multivariate(x)) { test <- setdiff(test, c("rope", "p_rope")) if (verbose) { insight::format_warning( "Multivariate response models are not yet supported for tests `rope` and `p_rope`." ) } } # MAP-based p-value if (any(c("p_map", "p_pointnull") %in% test)) { test_pmap <- .prepare_output( p_map(x_df, cleaned_parameters = cleaned_parameters, ...), cleaned_parameters, is_stanmvreg ) if (!is.data.frame(test_pmap)) { test_pmap <- data.frame( Parameter = "Posterior", p_MAP = test_pmap, stringsAsFactors = FALSE ) } } else { test_pmap <- data.frame(Parameter = NA) } # Probability of direction if (any(c("pd", "p_direction", "pdir", "mpe") %in% test)) { test_pd <- .prepare_output( p_direction(x_df, cleaned_parameters = cleaned_parameters, ...), cleaned_parameters, is_stanmvreg ) if (!is.data.frame(test_pd)) { test_pd <- data.frame( Parameter = "Posterior", pd = test_pd, stringsAsFactors = FALSE ) } } else { test_pd <- data.frame(Parameter = NA) } # Probability of rope if ("p_rope" %in% test) { test_prope <- .prepare_output( p_rope( x_df, range = rope_range, verbose = verbose, cleaned_parameters = cleaned_parameters, ... ), cleaned_parameters, is_stanmvreg ) if (!"Parameter" %in% names(test_prope)) { test_prope <- cbind( data.frame(Parameter = "Posterior", stringsAsFactors = FALSE), test_prope ) } } else { test_prope <- data.frame(Parameter = NA) } # Probability of significance if (any(c("ps", "p_sig", "p_significance") %in% test)) { test_psig <- .prepare_output( p_significance( x_df, threshold = rope_range, cleaned_parameters = cleaned_parameters, ... ), cleaned_parameters, is_stanmvreg ) if (!is.data.frame(test_psig)) { test_psig <- data.frame( Parameter = "Posterior", ps = test_psig, stringsAsFactors = FALSE ) } } else { test_psig <- data.frame(Parameter = NA) } # ROPE if ("rope" %in% test) { test_rope <- .prepare_output( rope( x_df, range = rope_range, ci = rope_ci, cleaned_parameters = cleaned_parameters, ... ), cleaned_parameters, is_stanmvreg ) if (!"Parameter" %in% names(test_rope)) { test_rope <- cbind( data.frame(Parameter = "Posterior", stringsAsFactors = FALSE), test_rope ) } names(test_rope)[names(test_rope) == "CI"] <- "ROPE_CI" } else { test_rope <- data.frame(Parameter = NA) } # Equivalence test if (any(c("equivalence", "equivalence_test", "equitest") %in% test)) { dot_args <- list(...) dot_args$verbose <- !"rope" %in% test test_equi <- .prepare_output( do.call( equivalence_test, c( dot_args, list( x = x_df, range = rope_range, ci = rope_ci ) ) ), cleaned_parameters, is_stanmvreg ) test_equi$Cleaned_Parameter <- NULL if (!"Parameter" %in% names(test_equi)) { test_equi <- cbind( data.frame(Parameter = "Posterior", stringsAsFactors = FALSE), test_equi ) } names(test_equi)[names(test_equi) == "CI"] <- "ROPE_CI" test_rope <- merge(test_rope, test_equi, all = TRUE) test_rope <- test_rope[!names(test_rope) %in% c("HDI_low", "HDI_high")] } # Bayes Factors if (any(c("bf", "bayesfactor", "bayes_factor") %in% test)) { test_bf <- tryCatch( .prepare_output( bayesfactor_parameters(x, prior = bf_prior, verbose = verbose, ...), cleaned_parameters, is_stanmvreg ), error = function(e) data.frame(Parameter = NA) ) if (!"Parameter" %in% names(test_bf)) { test_bf <- cbind( data.frame(Parameter = "Posterior", stringsAsFactors = FALSE), test_bf ) } } else { test_bf <- data.frame(Parameter = NA) } } # for data frames or numeric, and even for some models, we don't # have the "Effects" or "Component" column for all data frames. # To make "merge()" work, we add those columns to all data frames, # filled with NA, and remove the columns later if necessary estimates <- .add_effects_component_column(estimates) uncertainty <- .add_effects_component_column(uncertainty) test_pmap <- .add_effects_component_column(test_pmap) test_pd <- .add_effects_component_column(test_pd) test_prope <- .add_effects_component_column(test_prope) test_psig <- .add_effects_component_column(test_psig) test_rope <- .add_effects_component_column(test_rope) test_bf <- .add_effects_component_column(test_bf) # at least one "valid" data frame needs a row id, to restore # row-order after merging if (!all(is.na(estimates$Parameter))) { estimates$.rowid <- seq_len(nrow(estimates)) } else if (!all(is.na(test_pmap$Parameter))) { test_pmap$.rowid <- seq_len(nrow(test_pmap)) } else if (!all(is.na(test_pd$Parameter))) { test_pd$.rowid <- seq_len(nrow(test_pd)) } else if (!all(is.na(test_prope$Parameter))) { test_prope$.rowid <- seq_len(nrow(test_prope)) } else if (!all(is.na(test_psig$Parameter))) { test_psig$.rowid <- seq_len(nrow(test_psig)) } else if (!all(is.na(test_rope$Parameter))) { test_rope$.rowid <- seq_len(nrow(test_rope)) } else if (!all(is.na(test_bf$Parameter))) { # nolint test_bf$.rowid <- seq_len(nrow(test_bf)) } else { estimates$.rowid <- seq_len(nrow(estimates)) } # remove duplicated columns if (all(c("rope", "p_rope") %in% test)) { test_prope$ROPE_low <- NULL test_prope$ROPE_high <- NULL } # merge all data frames merge_by <- c("Parameter", "Effects", "Component", "Response") # merge_by <- intersect(merge_by, colnames(estimates)) out <- merge(estimates, uncertainty, by = merge_by, all = TRUE) out <- merge(out, test_pmap, by = merge_by, all = TRUE) out <- merge(out, test_pd, by = merge_by, all = TRUE) out <- merge(out, test_prope, by = merge_by, all = TRUE) out <- merge(out, test_psig, by = merge_by, all = TRUE) out <- merge(out, test_rope, by = merge_by, all = TRUE) out <- merge(out, test_bf, by = merge_by, all = TRUE) out <- out[!is.na(out$Parameter), ] # check which columns can be removed at the end. In any case, we don't # need .rowid in the returned data frame, and when the Effects or Component # column consist only of missing values, we remove those columns as well remove_columns <- ".rowid" if (insight::n_unique(out$Effects, remove_na = TRUE) < 2) { remove_columns <- c(remove_columns, "Effects") } if (insight::n_unique(out$Component, remove_na = TRUE) < 2) { remove_columns <- c(remove_columns, "Component") } if (insight::n_unique(out$Response, remove_na = TRUE) < 2) { remove_columns <- c(remove_columns, "Response") } # Restore columns order out <- datawizard::data_remove( out[order(out$.rowid), ], remove_columns, verbose = FALSE ) # Add iterations if (keep_iterations) { row_order <- out$Parameter iter <- as.data.frame(t(as.data.frame(x_df, ...))) names(iter) <- paste0("iter_", seq_len(ncol(iter))) iter$Parameter <- row.names(iter) out <- merge(out, iter, all.x = TRUE, by = "Parameter") out <- out[match(row_order, out$Parameter), ] row.names(out) <- NULL } # Prepare output attr(out, "cleaned_parameters") <- cleaned_parameters attr(out, "ci_method") <- ci_method out } #' @keywords internal .add_effects_component_column <- function(x) { if (!"Effects" %in% names(x)) x <- cbind(x, data.frame(Effects = NA)) if (!"Component" %in% names(x)) x <- cbind(x, data.frame(Component = NA)) if (!"Response" %in% names(x)) x <- cbind(x, data.frame(Response = NA)) x } # Models based on simple data frame of posterior --------------------- #' @rdname describe_posterior #' @export describe_posterior.numeric <- function(posterior, centrality = "median", dispersion = FALSE, ci = 0.95, ci_method = "eti", test = c("p_direction", "rope"), rope_range = "default", rope_ci = 0.95, keep_iterations = FALSE, bf_prior = NULL, BF = 1, verbose = TRUE, ...) { out <- .describe_posterior( posterior, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, keep_iterations = keep_iterations, bf_prior = bf_prior, BF = BF, verbose = verbose, ... ) class(out) <- unique(c("describe_posterior", "see_describe_posterior", class(out))) out } #' @export describe_posterior.double <- describe_posterior.numeric #' @export #' @rdname describe_posterior #' @inheritParams p_direction describe_posterior.data.frame <- function(posterior, centrality = "median", dispersion = FALSE, ci = 0.95, ci_method = "eti", test = c("p_direction", "rope"), rope_range = "default", rope_ci = 0.95, keep_iterations = FALSE, bf_prior = NULL, BF = 1, rvar_col = NULL, verbose = TRUE, ...) { x_rvar <- .possibly_extract_rvar_col(posterior, rvar_col) if (length(x_rvar) > 0L) { cl <- match.call() cl[[1]] <- bayestestR::describe_posterior cl$posterior <- x_rvar cl$rvar_col <- NULL prior_rvar <- .possibly_extract_rvar_col(posterior, bf_prior) if (length(prior_rvar) > 0L) { cl$bf_prior <- prior_rvar } out <- eval.parent(cl) obj_name <- insight::safe_deparse_symbol(substitute(posterior)) attr(out, "object_name") <- sprintf('%s[["%s"]]', obj_name, rvar_col) return(.append_datagrid(out, posterior)) } out <- .describe_posterior( posterior, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, keep_iterations = keep_iterations, bf_prior = bf_prior, BF = BF, verbose = verbose, ... ) class(out) <- unique(c("describe_posterior", "see_describe_posterior", class(out))) out } #' @export describe_posterior.sim.merMod <- describe_posterior.numeric #' @export describe_posterior.sim <- describe_posterior.numeric #' @export describe_posterior.bayesQR <- function(posterior, centrality = "median", dispersion = FALSE, ci = 0.95, ci_method = "eti", test = c("p_direction", "rope"), rope_range = "default", rope_ci = 0.95, keep_iterations = FALSE, parameters = NULL, verbose = TRUE, ...) { out <- .describe_posterior( insight::get_parameters(posterior), centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, keep_iterations = keep_iterations, effects = "fixed", parameters = parameters, verbose = verbose, ... ) attr(out, "ci_method") <- ci_method attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(posterior)) class(out) <- c("describe_posterior", "see_describe_posterior", class(out)) out } #' @export describe_posterior.blrm <- describe_posterior.bayesQR #' @export describe_posterior.mcmc <- describe_posterior.bayesQR #' @export describe_posterior.mcmc.list <- describe_posterior.bayesQR #' @export describe_posterior.BGGM <- describe_posterior.bayesQR #' @export describe_posterior.draws <- function(posterior, centrality = "median", dispersion = FALSE, ci = 0.95, ci_method = "eti", test = c("p_direction", "rope"), rope_range = "default", rope_ci = 0.95, keep_iterations = FALSE, bf_prior = NULL, BF = 1, verbose = TRUE, ...) { out <- .describe_posterior( .posterior_draws_to_df(posterior), centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, keep_iterations = keep_iterations, bf_prior = if (!is.null(bf_prior)) .posterior_draws_to_df(bf_prior), BF = BF, verbose = verbose, ... ) class(out) <- unique(c("describe_posterior", "see_describe_posterior", class(out))) out } #' @export describe_posterior.rvar <- describe_posterior.draws # easystats methods ------------------------ #' @export describe_posterior.effectsize_std_params <- function(posterior, centrality = "median", dispersion = FALSE, ci = 0.95, ci_method = "eti", test = c("p_direction", "rope"), rope_range = "default", rope_ci = 0.95, keep_iterations = FALSE, bf_prior = NULL, BF = 1, verbose = TRUE, ...) { class(posterior) <- "data.frame" no_unique <- vapply(posterior, function(col) { length(unique(col)) == 1 }, FUN.VALUE = TRUE) if (any(no_unique)) { no_unique <- which(no_unique) out <- describe_posterior.data.frame( posterior[, -no_unique], centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, keep_iterations = keep_iterations, bf_prior = bf_prior, BF = BF, verbose = verbose, ... ) out_int <- data.frame(Parameter = colnames(posterior)[no_unique]) col_diff <- setdiff(colnames(out), colnames(out_int)) out_int[, col_diff] <- NA out <- rbind(out_int, out) out <- out[order(match(out$Parameter, colnames(posterior))), ] return(out) } describe_posterior.data.frame( posterior, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, keep_iterations = keep_iterations, bf_prior = bf_prior, BF = BF, verbose = verbose, ... ) } #' @export describe_posterior.get_predicted <- function(posterior, centrality = "median", dispersion = FALSE, ci = 0.95, ci_method = "eti", test = NULL, verbose = TRUE, ...) { if ("iterations" %in% names(attributes(posterior))) { describe_posterior( as.data.frame(t(attributes(posterior)$iterations)), centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, verbose = verbose, ... ) } else { insight::format_error("No iterations present in the output.") } } # emmeans --------------------------- #' @export describe_posterior.emmGrid <- function(posterior, centrality = "median", dispersion = FALSE, ci = 0.95, ci_method = "eti", test = c("p_direction", "rope"), rope_range = "default", rope_ci = 0.95, keep_iterations = FALSE, bf_prior = NULL, BF = 1, verbose = TRUE, ...) { if (any(c("all", "bf", "bayesfactor", "bayes_factor") %in% tolower(test)) || "si" %in% tolower(ci_method)) { samps <- .clean_priors_and_posteriors(posterior, bf_prior, verbose = verbose) bf_prior <- samps$prior posterior_samples <- samps$posterior } else { posterior_samples <- insight::get_parameters(posterior) } out <- .describe_posterior( posterior_samples, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, keep_iterations = keep_iterations, bf_prior = bf_prior, BF = BF, verbose = verbose, ... ) row.names(out) <- NULL # Reset row names out <- .append_datagrid(out, posterior) class(out) <- c("describe_posterior", "see_describe_posterior", class(out)) attr(out, "ci_method") <- ci_method attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(posterior)) out } #' @export describe_posterior.emm_list <- describe_posterior.emmGrid #' @export describe_posterior.slopes <- function(posterior, centrality = "median", dispersion = FALSE, ci = 0.95, ci_method = "eti", test = c("p_direction", "rope"), rope_range = "default", rope_ci = 0.95, keep_iterations = FALSE, bf_prior = NULL, BF = 1, verbose = TRUE, ...) { if (any(c("all", "bf", "bayesfactor", "bayes_factor") %in% tolower(test)) || "si" %in% tolower(ci_method)) { samps <- .clean_priors_and_posteriors(posterior, bf_prior, verbose = verbose) bf_prior <- samps$prior posterior_samples <- samps$posterior } else { posterior_samples <- .get_marginaleffects_draws(posterior) } out <- describe_posterior( posterior_samples, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, keep_iterations = keep_iterations, bf_prior = bf_prior, BF = BF, verbose = verbose, ... ) row.names(out) <- NULL # Reset row names out <- .append_datagrid(out, posterior) class(out) <- c("describe_posterior", "see_describe_posterior", class(out)) attr(out, "ci_method") <- ci_method attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(posterior)) out } #' @export describe_posterior.comparisons <- describe_posterior.slopes #' @export describe_posterior.predictions <- describe_posterior.slopes # Stan ------------------------------ #' @inheritParams insight::get_parameters #' @inheritParams diagnostic_posterior #' @rdname describe_posterior #' @export describe_posterior.stanreg <- function(posterior, centrality = "median", dispersion = FALSE, ci = 0.95, ci_method = "eti", test = c("p_direction", "rope"), rope_range = "default", rope_ci = 0.95, keep_iterations = FALSE, bf_prior = NULL, diagnostic = c("ESS", "Rhat"), priors = FALSE, effects = "fixed", component = "location", parameters = NULL, BF = 1, verbose = TRUE, ...) { if ((any(c("all", "bf", "bayesfactor", "bayes_factor") %in% tolower(test)) || "si" %in% tolower(ci_method)) && is.null(bf_prior)) { bf_prior <- suppressMessages(unupdate(posterior)) } out <- .describe_posterior( posterior, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, keep_iterations = keep_iterations, bf_prior = bf_prior, BF = BF, effects = effects, component = component, parameters = parameters, verbose = verbose, ... ) # intermediate step: save cleaned parameters cp <- attributes(out)$cleaned_parameters diagnostic <- diagnostic_posterior( posterior, diagnostic, effects = effects, component = component, parameters = parameters, ... ) out <- .merge_and_sort(out, diagnostic, by = "Parameter", all = TRUE) if (isTRUE(priors)) { priors_data <- describe_prior(posterior, parameters = out$Parameter, ...) out <- .merge_and_sort(out, priors_data, by = "Parameter", all = TRUE) } out <- .add_clean_parameters_attribute(out, posterior, cleaned_parameters = cp) attr(out, "ci_method") <- ci_method attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(posterior)) class(out) <- c("describe_posterior", "see_describe_posterior", class(out)) out } #' @inheritParams insight::get_parameters #' @inheritParams diagnostic_posterior #' @export describe_posterior.stanmvreg <- function(posterior, centrality = "median", dispersion = FALSE, ci = 0.95, ci_method = "eti", test = "p_direction", rope_range = "default", rope_ci = 0.95, keep_iterations = FALSE, bf_prior = NULL, diagnostic = c("ESS", "Rhat"), priors = FALSE, effects = "fixed", component = "location", parameters = NULL, verbose = TRUE, ...) { out <- .describe_posterior( posterior, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, keep_iterations = keep_iterations, bf_prior = bf_prior, effects = effects, parameters = parameters, verbose = verbose, ... ) # intermediate step: save cleaned parameters cp <- attributes(out)$cleaned_parameters if (is.null(out$Response)) { out$Response <- gsub("(b\\[)*(.*)\\|(.*)", "\\2", out$Parameter) } diagnostic <- diagnostic_posterior( posterior, diagnostic, effects = effects, parameters = parameters, ... ) out <- .merge_and_sort(out, diagnostic, by = c("Parameter", "Response"), all = TRUE) if (isTRUE(priors)) { priors_data <- describe_prior(posterior, parameters = NULL, ...) priors_data$Parameter <- gsub("^(.*)\\|(.*)", replacement = "\\2", priors_data$Parameter) out <- .merge_and_sort(out, priors_data, by = c("Parameter", "Response"), all = TRUE) } out <- .add_clean_parameters_attribute(out, posterior, cleaned_parameters = cp) attr(out, "ci_method") <- ci_method attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(posterior)) class(out) <- c("describe_posterior", "see_describe_posterior", class(out)) out } #' @inheritParams insight::get_parameters #' @inheritParams diagnostic_posterior #' @export describe_posterior.stanfit <- function(posterior, centrality = "median", dispersion = FALSE, ci = 0.95, ci_method = "eti", test = c("p_direction", "rope"), rope_range = "default", rope_ci = 0.95, keep_iterations = FALSE, diagnostic = c("ESS", "Rhat"), effects = "fixed", parameters = NULL, priors = FALSE, verbose = TRUE, ...) { out <- .describe_posterior( posterior, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, keep_iterations = keep_iterations, effects = effects, parameters = parameters, verbose = verbose, ... ) diagnostic <- diagnostic_posterior( posterior, diagnostic, effects = effects, parameters = parameters, ... ) out <- .merge_and_sort(out, diagnostic, by = "Parameter", all = TRUE) if (isTRUE(priors)) { priors_data <- describe_prior(posterior, parameters = out$Parameter, ...) out <- .merge_and_sort(out, priors_data, by = "Parameter", all = TRUE) } attr(out, "ci_method") <- ci_method class(out) <- c("describe_posterior", "see_describe_posterior", class(out)) out } #' @export describe_posterior.brmsfit <- function(posterior, centrality = "median", dispersion = FALSE, ci = 0.95, ci_method = "eti", test = c("p_direction", "rope"), rope_range = "default", rope_ci = 0.95, keep_iterations = FALSE, bf_prior = NULL, diagnostic = c("ESS", "Rhat"), effects = "fixed", component = "conditional", parameters = NULL, BF = 1, priors = FALSE, verbose = TRUE, ...) { if ((any(c("all", "bf", "bayesfactor", "bayes_factor") %in% tolower(test)) || "si" %in% tolower(ci_method)) && is.null(bf_prior)) { bf_prior <- suppressMessages(unupdate(posterior)) } out <- .describe_posterior( posterior, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, keep_iterations = keep_iterations, bf_prior = bf_prior, BF = BF, effects = effects, component = component, parameters = parameters, verbose = verbose, ... ) # intermediate step: save cleaned parameters cp <- attributes(out)$cleaned_parameters if (!is.null(diagnostic)) { diagnostic <- diagnostic_posterior( posterior, diagnostic, effects = effects, component = component, parameters = parameters, ... ) out <- .merge_and_sort(out, diagnostic, by = "Parameter", all = TRUE) } if (isTRUE(priors)) { priors_data <- describe_prior(posterior, parameters = out$Parameter, ...) out <- .merge_and_sort(out, priors_data, by = "Parameter", all = TRUE) } out <- .add_clean_parameters_attribute(out, posterior, cleaned_parameters = cp) attr(out, "ci_method") <- ci_method attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(posterior)) class(out) <- c("describe_posterior", "see_describe_posterior", class(out)) out } #' @export describe_posterior.blavaan <- describe_posterior.stanfit # other models -------------------------------- #' @inheritParams describe_posterior.stanreg #' @export describe_posterior.MCMCglmm <- function(posterior, centrality = "median", dispersion = FALSE, ci = 0.95, ci_method = "eti", test = c("p_direction", "rope"), rope_range = "default", rope_ci = 0.95, keep_iterations = FALSE, diagnostic = "ESS", parameters = NULL, verbose = TRUE, ...) { out <- .describe_posterior( posterior, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, keep_iterations = keep_iterations, effects = "fixed", parameters = parameters, verbose = verbose, ... ) if (!is.null(diagnostic) && diagnostic == "ESS") { diagnostic <- effective_sample(posterior, effects = "fixed", parameters = parameters, ...) out <- .merge_and_sort(out, diagnostic, by = "Parameter", all = TRUE) } out } #' @export describe_posterior.bcplm <- function(posterior, centrality = "median", dispersion = FALSE, ci = 0.95, ci_method = "eti", test = c("p_direction", "rope"), rope_range = "default", rope_ci = 0.95, keep_iterations = FALSE, priors = TRUE, parameters = NULL, verbose = TRUE, ...) { out <- .describe_posterior( insight::get_parameters(posterior), centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, keep_iterations = keep_iterations, effects = "fixed", parameters = parameters, verbose = verbose, ... ) if (isTRUE(priors)) { priors_data <- describe_prior(posterior, parameters = out$Parameter, ...) out <- .merge_and_sort(out, priors_data, by = "Parameter", all = TRUE) } attr(out, "ci_method") <- ci_method attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(posterior)) class(out) <- c("describe_posterior", "see_describe_posterior", class(out)) out } #' @export describe_posterior.bamlss <- function(posterior, centrality = "median", dispersion = FALSE, ci = 0.95, ci_method = "eti", test = c("p_direction", "rope"), rope_range = "default", rope_ci = 0.95, keep_iterations = FALSE, component = "all", parameters = NULL, verbose = TRUE, ...) { out <- .describe_posterior( posterior, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, keep_iterations = keep_iterations, component = component, parameters = parameters, verbose = verbose, ... ) attr(out, "ci_method") <- ci_method attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(posterior)) class(out) <- c("describe_posterior", "see_describe_posterior", class(out)) out } # BayesFactor -------------------- #' @export describe_posterior.BFBayesFactor <- function(posterior, centrality = "median", dispersion = FALSE, ci = 0.95, ci_method = "eti", test = c("p_direction", "rope", "bf"), rope_range = "default", rope_ci = 0.95, keep_iterations = FALSE, priors = TRUE, verbose = TRUE, ...) { # Match test args to catch BFs if (!is.null(test)) { test <- .check_test_values(test) if ("all" %in% test) { test <- c("pd", "p_map", "p_rope", "p_significance", "rope", "equivalence", "bf") } } # Remove BF from list if (any(c("bf", "bayesfactor", "bayes_factor") %in% test)) { test <- test[!test %in% c("bf", "bayesfactor", "bayes_factor")] if (length(test) == 0L) test <- NULL compute_bf <- TRUE } else { compute_bf <- FALSE } draws <- insight::get_parameters(posterior) if (all(rope_range == "default")) { rope_range <- rope_range(posterior, verbose = verbose) } # Describe posterior out <- .describe_posterior( draws, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, keep_iterations = keep_iterations, verbose = verbose, ... ) if (is.null(out)) { return(NULL) } # Compute and read BF a posteriori if (compute_bf) { tryCatch( { out$log_BF <- as.data.frame(bayesfactor_models(posterior[1], ...))[-1, ]$log_BF out$BF <- exp(out$log_BF) }, error = function(e) { NULL } ) } # Add priors if (priors) { priors_data <- describe_prior(posterior, ...) out <- .merge_and_sort(out, priors_data, by = intersect(names(out), names(priors_data)), all = TRUE) } attr(out, "ci_method") <- ci_method attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(posterior)) class(out) <- c("describe_posterior", "see_describe_posterior", class(out)) out } # Helpers ----------------------------------------------------------------- #' @keywords internal .check_test_values <- function(test) { match.arg(tolower(test), c( "pd", "p_direction", "pdir", "mpe", "ps", "psig", "p_significance", "p_rope", "rope", "equivalence", "equivalence_test", "equitest", "bf", "bayesfactor", "bayes_factor", "p_map", "all" ), several.ok = TRUE) } #' @keywords internal .check_if_need_to_compute_rope_range <- function(rope_range, test) { if (is.numeric(rope_range) || is.list(rope_range)) { return(FALSE) } need_rope <- c( "all", "p_rope", "ps", "p_sig", "p_significance", "rope", "equivalence", "equivalence_test", "equitest" ) return(is.character(test) && length(test) > 0L && any(need_rope %in% tolower(test))) } bayestestR/R/convert_pd_to_p.R0000644000176200001440000000622114746106624016122 0ustar liggesusers#' Convert between Probability of Direction (pd) and p-value. #' #' Enables a conversion between Probability of Direction (pd) and p-value. #' #' @param pd A Probability of Direction (pd) value (between 0 and 1). Can also #' be a data frame with a column named `pd`, `p_direction`, or `PD`, as returned #' by [`p_direction()`]. In this case, the column is converted to p-values and #' the new data frame is returned. #' @param p A p-value. #' @param direction What type of p-value is requested or provided. Can be #' `"two-sided"` (default, two tailed) or `"one-sided"` (one tailed). #' @param verbose Toggle off warnings. #' @param ... Arguments passed to or from other methods. #' #' @return A p-value or a data frame with a p-value column. #' #' @details #' Conversion is done using the following equation (see _Makowski et al., 2019_): #' #' When `direction = "two-sided"` #' #' \ifelse{html}{\out{p = 2 * (1 - pd)}}{\eqn{p = 2 \times (1 - p_d)}} #' #' When `direction = "one-sided"` #' #' \ifelse{html}{\out{p = 1 - pd}}{\eqn{p = 1 - p_d}} #' #' Note that this conversion is only valid when the lowest possible values of pd #' is 0.5 - i.e., when the posterior represents continuous parameter space (see #' [`p_direction()`]). If any pd < 0.5 are detected, they are converted to a p #' of 1, and a warning is given. #' #' @references #' Makowski, D., Ben-Shachar, M. S., Chen, S. H. A., and Lüdecke, D. (2019). #' *Indices of Effect Existence and Significance in the Bayesian Framework*. #' Frontiers in Psychology 2019;10:2767. \doi{10.3389/fpsyg.2019.02767} #' #' @examples #' pd_to_p(pd = 0.95) #' pd_to_p(pd = 0.95, direction = "one-sided") #' #' @export pd_to_p <- function(pd, ...) { UseMethod("pd_to_p") } #' @export #' @rdname pd_to_p pd_to_p.numeric <- function(pd, direction = "two-sided", verbose = TRUE, ...) { p <- 1 - pd if (.get_direction(direction) == 0) { p <- 2 * p } less_than_0.5 <- pd < 0.5 if (any(less_than_0.5)) { if (verbose) { insight::format_warning(paste( "pd-values smaller than 0.5 detected, indicating inconsistent direction of the probability mass.", "This usually happens when the parameters space is not continuous. Affected values are set to 1.", "See help('p_direction') for more info." )) } p[less_than_0.5] <- 1 } p } #' @export pd_to_p.data.frame <- function(pd, direction = "two-sided", verbose = TRUE, ...) { # check if data frame has an appropriate column pd_column <- intersect(c("pd", "p_direction", "PD"), colnames(pd))[1] if (is.na(pd_column) || length(pd_column) == 0) { insight::format_error("No column named `pd`, `p_direction`, or `PD` found.") } # add p-value column pd$p <- pd_to_p(as.numeric(pd[[pd_column]])) # remove pd-column pd[[pd_column]] <- NULL pd } #' @rdname pd_to_p #' @export p_to_pd <- function(p, direction = "two-sided", ...) { if (.get_direction(direction) == 0) { p <- p / 2 } (1 - p) } #' @rdname pd_to_p #' @export convert_p_to_pd <- p_to_pd #' @rdname pd_to_p #' @export convert_pd_to_p <- pd_to_p bayestestR/vignettes/0000755000176200001440000000000015054305242014407 5ustar liggesusersbayestestR/vignettes/overview_of_vignettes.Rmd0000644000176200001440000000410014542333405021473 0ustar liggesusers--- title: "Overview of Vignettes" output: rmarkdown::html_vignette: vignette: > %\VignetteIndexEntry{Overview of Vignettes} \usepackage[utf8]{inputenc} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console --- ```{r message=FALSE, warning=FALSE, include=FALSE} library(knitr) knitr::opts_chunk$set( echo = TRUE, collapse = TRUE, warning = FALSE, message = FALSE, comment = "#>", eval = TRUE ) ``` All package vignettes are available at [https://easystats.github.io/bayestestR/](https://easystats.github.io/bayestestR/). ## Function Overview * [Function Reference](https://easystats.github.io/bayestestR/reference/index.html) ## Get Started * [Get Started with Bayesian Analysis](https://easystats.github.io/bayestestR/articles/bayestestR.html) ## Examples 1. [Initiation to Bayesian models](https://easystats.github.io/bayestestR/articles/example1.html) 2. [Confirmation of Bayesian skills](https://easystats.github.io/bayestestR/articles/example2.html) 3. [Become a Bayesian master](https://easystats.github.io/bayestestR/articles/example3.html) ## Articles * [Credible Intervals (CI))](https://easystats.github.io/bayestestR/articles/credible_interval.html) * [Region of Practical Equivalence (ROPE)](https://easystats.github.io/bayestestR/articles/region_of_practical_equivalence.html) * [Probability of Direction (pd)](https://easystats.github.io/bayestestR/articles/probability_of_direction.html) * [Bayes Factors](https://easystats.github.io/bayestestR/articles/bayes_factors.html) ## In-Depths * [Comparison of Point-Estimates](https://easystats.github.io/bayestestR/articles/web_only/indicesEstimationComparison.html) * [Indices of Effect Existence and Significance in the Bayesian Framework](https://www.frontiersin.org/articles/10.3389/fpsyg.2019.02767/full) * [Mediation Analysis using Bayesian Regression Models](https://easystats.github.io/bayestestR/articles/mediation.html) ## Guidelines * [Reporting Guidelines](https://easystats.github.io/bayestestR/articles/guidelines.html) bayestestR/data/0000755000176200001440000000000014542333405013313 5ustar liggesusersbayestestR/data/disgust.rdata0000644000176200001440000000062414542333405016014 0ustar liggesusersTN@)Z BKh7nM%< BT~[Ӟ1'8̽wΜ9wA(1 &2yȨBIXS/wYN.|Q' aBƈ w%a 4>Dzu}pn:KG&k{hs+6au {O]h #@C} -|8oM/KYɷдO =r}M{Y |㮝9VWA?IܯFMЬ zv~ !GM)` methods when using multiple credible levels (#688). # bayestestR 0.15.0 ## Changes * Support for `posterior::rvar`-type column in data frames. For example, a data frame `df` with an `rvar` column `".pred"` can now be called directly via `p_direction(df, rvar_col = ".pred")`. * Added support for `{marginaleffects}` * The ROPE or threshold ranges in `rope()`, `describe_posterior()`, `p_significance()` and `equivalence_test()` can now be specified as a list. This allows for different ranges for different parameters. * Results from objects generated by `{emmeans}` (`emmGrid`/`emm_list`) now return results with appended grid-data. * Usability improvements for `p_direction()`: - Results from `p_direction()` can directly be used in `pd_to_p()`. - `p_direction()` gets an `as_p` argument, to directly convert pd-values into frequentist p-values. - `p_direction()` gets a `remove_na` argument, which defaults to `TRUE`, to remove `NA` values from the input before calculating the pd-values. - Besides the existing `as.numeric()` method, `p_direction()` now also has an `as.vector()` method. * `p_significance()` now accepts non-symmetric ranges for the `threshold` argument. * `p_to_pd()` now also works with data frames returned by `p_direction()`. If a data frame contains a `pd`, `p_direction` or `PD` column name, this is assumed to be the pd-values, which are then converted to p-values. * `p_to_pd()` for data frame inputs gets a `as.numeric()` and `as.vector()` method. ## Bug fixes * Fixed warning in CRAN check results. # bayestestR 0.14.0 ## Breaking Changes * Arguments named `group`, `at`, `group_by` and `split_by` will be deprecated in future releases of _easystats_ packages. Please use `by` instead. This affects following functions in *bayestestR*: `estimate_density()`. ## Changes * `bayesian_as_frequentist()` now supports more model families from Bayesian models that can be successfully converted to their frequentists counterparts. * `bayesfactor_models()` now throws an informative error when Bayes factors for comparisons could not be calculated. ## Bug fixes * Fixed issue in `bayesian_as_frequentist()` for *brms* models with `0 + Intercept` specification in the model formula. # bayestestR 0.13.2 ## Breaking Changes * `pd_to_p()` now returns 1 and a warning for values smaller than 0.5. * `map_estimate()`, `p_direction()`, `p_map()`, and `p_significance()` now return a data-frame when the input is a numeric vector. (making the output consistently a data frame for all inputs.) * Argument `posteriors` was renamed into `posterior`. Before, there were a mix of both spellings, now it is consistently `posterior`. ## Changes * Retrieving models from the environment was improved. ## Bug fixes * Fixed issues in various `format()` methods, which did not work properly for some few functions (like `p_direction()`). * Fixed issue in `estimate_density()` for double vectors that also had other class attributes. * Fixed several minor issues and tests. # bayestestR 0.13.1 ## Changes * Improved speed performance when functions are called using `do.call()`. * Improved speed performance to `bayesfactor_models()` for `brmsfit` objects that already included a `marglik` element in the model object. ## New functionality * `as.logical()` for `bayesfactor_restricted()` results, extracts the boolean vector(s) the mark which draws are part of the order restriction. ## Bug fixes * `p_map()` gains a new `null` argument to specify any non-0 nulls. * Fixed non-working examples for `ci(method = "SI")`. * Fixed wrong calculation of rope range for model objects in `describe_posterior()`. * Some smaller bug fixes. # bayestestR 0.13.0 ## Breaking * The minimum needed R version has been bumped to `3.6`. * `contr.equalprior(contrasts = FALSE)` (previously `contr.orthonorm`) no longer returns an identity matrix, but a shifted `diag(n) - 1/n`, for consistency. ## New functionality * `p_to_bf()`, to convert p-values into Bayes factors. For more accurate approximate Bayes factors, use `bic_to_bf()`. * *bayestestR* now supports objects of class `rvar` from package *posterior*. * `contr.equalprior` (previously `contr.orthonorm`) gains two new functions: `contr.equalprior_pairs` and `contr.equalprior_deviations` to aide in setting more intuitive priors. ## Changes * has been renamed *`contr.equalprior`* to be more explicit about its function. * `p_direction()` now accepts objects of class `parameters_model()` (from `parameters::model_parameters()`), to compute probability of direction for parameters of frequentist models. # bayestestR 0.12.1 ## Breaking * `Bayesfactor_models()` for frequentist models now relies on the updated `insight::get_loglikelihood()`. This might change some results for REML based models. See documentation. * `estimate_density()` argument `group_by` is renamed `at`. * All `distribution_*(random = FALSE)` functions now rely on `ppoints()`, which will result in slightly different results, especially with small `n`s. * Uncertainty estimation now defaults to `"eti"` (formerly was `"hdi"`). ## Changes * *bayestestR* functions now support `draws` objects from package *posterior*. * `rope_range()` now handles log(normal)-families and models with log-transformed outcomes. * New function `spi()`, to compute shortest probability intervals. Furthermore, the `"spi"` option was added as new method to compute uncertainty intervals. ## Bug fixes * `bci()` for some objects incorrectly returned the equal-tailed intervals. # bayestestR 0.11.5 * Fixes failing tests in CRAN checks. # bayestestR 0.11.1 ## New functions * `describe_posterior()` gains a `plot()` method, which is a short cut for `plot(estimate_density(describe_posterior()))`. # bayestestR 0.11 ## Bug fixes * Fixed issues related to last *brms* update. * Fixed bug in `describe_posterior.BFBayesFactor()` where Bayes factors were missing from out put ( #442 ). # bayestestR 0.10.0 ## Breaking * All Bayes factors are now returned as `log(BF)` (column name `log_BF`). Printing is unaffected. To retrieve the raw BFs, you can run `exp(result$log_BF)`. ## New functions * `bci()` (and its alias `bcai()`) to compute bias-corrected and accelerated bootstrap intervals. Along with this new function, `ci()` and `describe_posterior()` gain a new `ci_method` type, `"bci"`. ## Changes * `contr.bayes` has been renamed *`contr.orthonorm`* to be more explicit about its function. # bayestestR 0.9.0 ## Breaking * The default `ci` width has been changed to 0.95 instead of 0.89 (see [here](https://github.com/easystats/bayestestR/discussions/250)). This should not come as a surprise to the long-time users of `bayestestR` as we have been warning about this impending change for a while now :) * Column names for `bayesfactor_restricted()` are now `p_prior` and `p_posterior` (was `Prior_prob` and `Posterior_prob`), to be consistent with `bayesfactor_inclusion()` output. * Removed the experimental function `mhdior`. ## General * Support for `blavaan` models. * Support for `blrm` models (*rmsb*). * Support for `BGGM` models (*BGGM*). * `check_prior()` and `describe_prior()` should now also work for more ways of prior definition in models from *rstanarm* or *brms*. ## Bug fixes * Fixed bug in `print()` method for the `mediation()` function. * Fixed remaining inconsistencies with CI values, which were not reported as fraction for `rope()`. * Fixed issues with special prior definitions in `check_prior()`, `describe_prior()` and `simulate_prior()`. # bayestestR 0.8.2 ## General * Support for `bamlss` models. * Roll-back R dependency to R >= 3.4. ## Changes to functions * All `.stanreg` methods gain a `component` argument, to also include auxiliary parameters. ## Bug fixes * `bayesfactor_parameters()` no longer errors for no reason when computing extremely un/likely direction hypotheses. * `bayesfactor_pointull()` / `bf_pointull()` are now `bayesfactor_pointnull()` / `bf_pointnull()` (can *you* spot the difference? #363 ). # bayestestR 0.8.0 ## New functions * `sexit()`, a function for sequential effect existence and significance testing (SEXIT). ## General * Added startup-message to warn users that default ci-width might change in a future update. * Added support for *mcmc.list* objects. ## Bug fixes * `unupdate()` gains a `newdata` argument to work with `brmsfit_multiple` models. * Fixed issue in Bayes factor vignette (don't evaluate code chunks if packages not available). # bayestestR 0.7.5 ## New functions * Added `as.matrix()` function for `bayesfactor_model` arrays. * `unupdate()`, a utility function to get Bayesian models un-fitted from the data, representing the priors only. ## Changes to functions * `ci()` supports `emmeans` - both Bayesian and frequentist ( #312 - cross fix with `parameters`) ## Bug fixes * Fixed issue with *default* rope range for `BayesFactor` models. * Fixed issue in collinearity-check for `rope()` for models with less than two parameters. * Fixed issue in print-method for `mediation()` with `stanmvreg`-models, which displays the wrong name for the response-value. * Fixed issue in `effective_sample()` for models with only one parameter. * `rope_range()` for `BayesFactor` models returns non-`NA` values ( #343 ) # bayestestR 0.7.2 ## New functions - `mediation()`, to compute average direct and average causal mediation effects of multivariate response models (`brmsfit`, `stanmvreg`). ## Bug fixes - `bayesfactor_parameters()` works with `R<3.6.0`. # bayestestR 0.7.0 ## General - Preliminary support for *stanfit* objects. - Added support for *bayesQR* objects. ## Changes to functions - `weighted_posteriors()` can now be used with data frames. - Revised `print()` for `describe_posterior()`. - Improved value formatting for Bayesfactor functions. ## Bug fixes - Link transformation are now taken into account for `emmeans` objets. E.g., in `describe_posterior()`. - Fix `diagnostic_posterior()` when algorithm is not "sampling". - Minor revisions to some documentations. - Fix CRAN check issues for win-old-release. # bayestestR 0.6.0 ## Changes to functions - `describe_posterior()` now also works on `effectsize::standardize_posteriors()`. - `p_significance()` now also works on `parameters::simulate_model()`. - `rope_range()` supports more (frequentis) models. ## Bug fixes - Fixed issue with `plot()` `data.frame`-methods of `p_direction()` and `equivalence_test()`. - Fix check issues for forthcoming insight-update. # bayestestR 0.5.3 ## General - Support for *bcplm* objects (package **cplm**) ## Changes to functions - `estimate_density()` now also works on grouped data frames. ## Bug fixes - Fixed bug in `weighted_posteriors()` to properly weight Intercept-only `BFBayesFactor` models. - Fixed bug in `weighted_posteriors()` when models have very low posterior probability ( #286 ). - Fixed bug in `describe_posterior()`, `rope()` and `equivalence_test()` for *brmsfit* models with monotonic effect. - Fixed issues related to latest changes in `as.data.frame.brmsfit()` from the *brms* package. # bayestestR 0.5.0 ## General - Added `p_pointnull()` as an alias to `p_MAP()`. - Added `si()` function to compute support intervals. - Added `weighted_posteriors()` for generating posterior samples averaged across models. - Added `plot()`-method for `p_significance()`. - `p_significance()` now also works for *brmsfit*-objects. - `estimate_density()` now also works for *MCMCglmm*-objects. - `equivalence_test()` gets `effects` and `component` arguments for *stanreg* and *brmsfit* models, to print specific model components. - Support for *mcmc* objects (package **coda**) - Provide more distributions via `distribution()`. - Added `distribution_tweedie()`. - Better handling of `stanmvreg` models for `describe_posterior()`, `diagnostic_posterior()` and `describe_prior()`. ## Breaking changes - `point_estimate()`: argument `centrality` default value changed from 'median' to 'all'. - `p_rope()`, previously as exploratory index, was renamed as `mhdior()` (for *Max HDI inside/outside ROPE*), as `p_rope()` will refer to `rope(..., ci = 1)` ( #258 ) ## Bug fixes - Fixed mistake in description of `p_significance()`. - Fixed error when computing BFs with `emmGrid` based on some non-linear models ( #260 ). - Fixed wrong output for percentage-values in `print.equivalence_test()`. - Fixed issue in `describe_posterior()` for `BFBayesFactor`-objects with more than one model. # bayestestR 0.4.0 ## New functions / features - `convert_bayesian_to_frequentist()` Convert (refit) Bayesian model as frequentist - `distribution_binomial()` for perfect binomial distributions - `simulate_ttest()` Simulate data with a mean difference - `simulate_correlation()` Simulate correlated datasets - `p_significance()` Compute the probability of Practical Significance (ps) - `overlap()` Compute overlap between two empirical distributions - `estimate_density()`: `method = "mixture"` argument added for mixture density estimation ## Bug fixes - Fixed bug in `simulate_prior()` for stanreg-models when `autoscale` was set to `FALSE` # bayestestR 0.3.0 ## General - revised `print()`-methods for functions like `rope()`, `p_direction()`, `describe_posterior()` etc., in particular for model objects with random effects and/or zero-inflation component ## New functions / features - `check_prior()` to check if prior is informative - `simulate_prior()` to simulate model's priors as distributions - `distribution_gamma()` to generate a (near-perfect or random) Gamma distribution - `contr.bayes` function for orthogonal factor coding (implementation from Singmann & Gronau's [`bfrms`](https://github.com/bayesstuff/bfrms/), used for proper prior estimation when factor have 3 levels or more. See Bayes factor vignette ## Changes to functions - Added support for `sim`, `sim.merMod` (from `arm::sim()`) and `MCMCglmm`-objects to many functions (like `hdi()`, `ci()`, `eti()`, `rope()`, `p_direction()`, `point_estimate()`, ...) - `describe_posterior()` gets an `effects` and `component` argument, to include the description of posterior samples from random effects and/or zero-inflation component. - More user-friendly warning for non-supported models in `bayesfactor()`-methods ## Bug fixes - Fixed bug in `bayesfactor_inclusion()` where the same interaction sometimes appeared more than once (#223) - Fixed bug in `describe_posterior()` for *stanreg* models fitted with fullrank-algorithm # bayestestR 0.2.5 ## Breaking changes - `rope_range()` for binomial model has now a different default (-.18; .18 ; instead of -.055; .055) - `rope()`: returns a proportion (between 0 and 1) instead of a value between 0 and 100 - `p_direction()`: returns a proportion (between 0.5 and 1) instead of a value between 50 and 100 ([#168](https://github.com/easystats/bayestestR/issues/168)) - `bayesfactor_savagedickey()`: `hypothesis` argument replaced by `null` as part of the new `bayesfactor_parameters()` function ## New functions / features - `density_at()`, `p_map()` and `map_estimate()`: `method` argument added - `rope()`: `ci_method` argument added - `eti()`: Computes equal-tailed intervals - `reshape_ci()`: Reshape CIs between wide/long - `bayesfactor_parameters()`: New function, replacing `bayesfactor_savagedickey()`, allows for computing Bayes factors against a *point-null* or an *interval-null* - `bayesfactor_restricted()`: Function for computing Bayes factors for order restricted models ## Minor changes ## Bug fixes - `bayesfactor_inclusion()` now works with `R < 3.6`. # bayestestR 0.2.2 ## Breaking changes - `equivalence_test()`: returns capitalized output (e.g., `Rejected` instead of `rejected`) - `describe_posterior.numeric()`: `dispersion` defaults to `FALSE` for consistency with the other methods ## New functions / features - `pd_to_p()` and `p_to_pd()`: Functions to convert between probability of direction (pd) and p-value - Support of `emmGrid` objects: `ci()`, `rope()`, `bayesfactor_savagedickey()`, `describe_posterior()`, ... ## Minor changes - Improved tutorial 2 ## Bug fixes - `describe_posterior()`: Fixed column order restoration - `bayesfactor_inclusion()`: Inclusion BFs for matched models are more inline with JASP results. # bayestestR 0.2.0 ## Breaking changes - plotting functions now require the installation of the `see` package - `estimate` argument name in `describe_posterior()` and `point_estimate()` changed to `centrality` - `hdi()`, `ci()`, `rope()` and `equivalence_test()` default `ci` to `0.89` - `rnorm_perfect()` deprecated in favour of `distribution_normal()` - `map_estimate()` now returns a single value instead of a dataframe and the `density` parameter has been removed. The MAP density value is now accessible via `attributes(map_output)$MAP_density` ## New functions / features - `describe_posterior()`, `describe_prior()`, `diagnostic_posterior()`: added wrapper function - `point_estimate()` added function to compute point estimates - `p_direction()`: new argument `method` to compute pd based on AUC - `area_under_curve()`: compute AUC - `distribution()` functions have been added - `bayesfactor_savagedickey()`, `bayesfactor_models()` and `bayesfactor_inclusion()` functions has been added - Started adding plotting methods (currently in the [`see`](https://github.com/easystats/see) package) for `p_direction()` and `hdi()` - `probability_at()` as alias for `density_at()` - `effective_sample()` to return the effective sample size of Stan-models - `mcse()` to return the Monte Carlo standard error of Stan-models ## Minor changes - Improved documentation - Improved testing - `p_direction()`: improved printing - `rope()` for model-objects now returns the HDI values for all parameters as attribute in a consistent way - Changes legend-labels in `plot.equivalence_test()` to align plots with the output of the `print()`-method (#78) ## Bug fixes - `hdi()` returned multiple class attributes (#72) - Printing results from `hdi()` failed when `ci`-argument had fractional parts for percentage values (e.g. `ci = 0.995`). - `plot.equivalence_test()` did not work properly for *brms*-models (#76). # bayestestR 0.1.0 - CRAN initial publication and [0.1.0 release](https://github.com/easystats/bayestestR/releases/tag/v0.1.0) - Added a `NEWS.md` file to track changes to the package bayestestR/inst/0000755000176200001440000000000015054305237013360 5ustar liggesusersbayestestR/inst/CITATION0000644000176200001440000000142214542333405014513 0ustar liggesusersbibentry( bibtype="Article", title="bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework.", author=c(person("Dominique", "Makowski"), person("Mattan S.", "Ben-Shachar"), person("Daniel", "Lüdecke")), journal="Journal of Open Source Software", doi="10.21105/joss.01541", year="2019", number = "40", volume = "4", pages = "1541", url="https://joss.theoj.org/papers/10.21105/joss.01541", textVersion = "Makowski, D., Ben-Shachar, M., & Lüdecke, D. (2019). bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework. Journal of Open Source Software, 4(40), 1541. doi:10.21105/joss.01541", mheader = "To cite bayestestR in publications use:" ) bayestestR/inst/doc/0000755000176200001440000000000015054305237014125 5ustar liggesusersbayestestR/inst/doc/overview_of_vignettes.Rmd0000644000176200001440000000410014542333405021205 0ustar liggesusers--- title: "Overview of Vignettes" output: rmarkdown::html_vignette: vignette: > %\VignetteIndexEntry{Overview of Vignettes} \usepackage[utf8]{inputenc} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console --- ```{r message=FALSE, warning=FALSE, include=FALSE} library(knitr) knitr::opts_chunk$set( echo = TRUE, collapse = TRUE, warning = FALSE, message = FALSE, comment = "#>", eval = TRUE ) ``` All package vignettes are available at [https://easystats.github.io/bayestestR/](https://easystats.github.io/bayestestR/). ## Function Overview * [Function Reference](https://easystats.github.io/bayestestR/reference/index.html) ## Get Started * [Get Started with Bayesian Analysis](https://easystats.github.io/bayestestR/articles/bayestestR.html) ## Examples 1. [Initiation to Bayesian models](https://easystats.github.io/bayestestR/articles/example1.html) 2. [Confirmation of Bayesian skills](https://easystats.github.io/bayestestR/articles/example2.html) 3. [Become a Bayesian master](https://easystats.github.io/bayestestR/articles/example3.html) ## Articles * [Credible Intervals (CI))](https://easystats.github.io/bayestestR/articles/credible_interval.html) * [Region of Practical Equivalence (ROPE)](https://easystats.github.io/bayestestR/articles/region_of_practical_equivalence.html) * [Probability of Direction (pd)](https://easystats.github.io/bayestestR/articles/probability_of_direction.html) * [Bayes Factors](https://easystats.github.io/bayestestR/articles/bayes_factors.html) ## In-Depths * [Comparison of Point-Estimates](https://easystats.github.io/bayestestR/articles/web_only/indicesEstimationComparison.html) * [Indices of Effect Existence and Significance in the Bayesian Framework](https://www.frontiersin.org/articles/10.3389/fpsyg.2019.02767/full) * [Mediation Analysis using Bayesian Regression Models](https://easystats.github.io/bayestestR/articles/mediation.html) ## Guidelines * [Reporting Guidelines](https://easystats.github.io/bayestestR/articles/guidelines.html) bayestestR/inst/doc/overview_of_vignettes.R0000644000176200001440000000035515054305237020675 0ustar liggesusers## ----message=FALSE, warning=FALSE, include=FALSE------------------------------ library(knitr) knitr::opts_chunk$set( echo = TRUE, collapse = TRUE, warning = FALSE, message = FALSE, comment = "#>", eval = TRUE ) bayestestR/inst/doc/overview_of_vignettes.html0000644000176200001440000001622015054305237021436 0ustar liggesusers Overview of Vignettes

Overview of Vignettes

All package vignettes are available at https://easystats.github.io/bayestestR/.

Function Overview

bayestestR/inst/WORDLIST0000644000176200001440000000360115052646230014551 0ustar liggesusersADE Altough ArXiv BCa BFs BGGM BICs BMA BMJ Baws BayesFactor Bayesfactor Bergh Betancourt Bridgesampling CMD CRC CWI Curvewise DOI DV Dablander DescTools Desimone DiCiccio Dom Driing ESS ETI Efron Etz Fernández Funder Gelman Ghosh Grasman Gronau's HDI HDInterval Haaf Hinne Hirose IRR Imai Iverson JASP JASP's Jeffreys Kass Keele Kruschke Kuriyal Kurz's Ley Liao Liddell Lindley Littman Liu Lodewyckx Ly MCMCglmm MCSE MPE Mathot Mattan Matzke McElreath Midya Modelling Morey Multicollinearity ORCID Ozer Parmigiani Piironen Posteriori Preprint Psychonomic ROPE's ROPEs ROPE’s Raftery Rhat Rouder SEM SEXIT SHA SPI SPIn Shachar Speckman Tada Tingley Un Vandekerckhove Vehtari Versicolor Visualise Wagenmakers Wether Wetzels Wickham Wookies Yamamoto Ying Zheng al altough arXiv autocorrelated avaible bayesQR bayesian bcplm behavioural bmj bmwiernik bootsrapped brms brmsfit centred characterisation characterises ci codecov compte containe cplm curvewise doi driiiing eXistence easystats effectsize egydq emmeans et favour favouring fpsyg frac frequentis frequentist's fullrank generalised ggdist ggdistribute grano higer https infty ing interpretability interpretable iteratively jmp joss lavaan lentiful lifecycle lm marginaleffects maths mattansb mcmc mfx modelling nbinom neq notin objets operationalizing orthonormal osterior patilindrajeets pre preprint priori ps psyarxiv rOpenSci reconceptualisation replicability reproducibility richarddmorey riors rmsb rmarkdown rstanarm sIgnificance salis setosa setosas splinefun ss stanfit stanreg strengejacke summarise summarised th treedepth tweedie un underbrace unupdate versicolor versicolors virginica virgnica visualisation visualise warmup wil xy bayestestR/README.md0000644000176200001440000004747315054272020013672 0ustar liggesusers # bayestestR [![DOI](https://joss.theoj.org/papers/10.21105/joss.01541/status.svg)](https://doi.org/10.21105/joss.01541) [![downloads](https://cranlogs.r-pkg.org/badges/bayestestR)](https://cran.r-project.org/package=bayestestR) [![total](https://cranlogs.r-pkg.org/badges/grand-total/bayestestR)](https://cranlogs.r-pkg.org/) ***Become a Bayesian master you will*** Existing R packages allow users to easily fit a large variety of models and extract and visualize the posterior draws. However, most of these packages only return a limited set of indices (e.g., point-estimates and CIs). **bayestestR** provides a comprehensive and consistent set of functions to analyze and describe posterior distributions generated by a variety of models objects, including popular modeling packages such as **rstanarm**, **brms** or **BayesFactor**. You can reference the package and its documentation as follows: - Makowski, D., Ben-Shachar, M. S., & Lüdecke, D. (2019). *bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework*. Journal of Open Source Software, 4(40), 1541. [10.21105/joss.01541](https://doi.org/10.21105/joss.01541) - Makowski, D., Ben-Shachar, M. S., Chen, S. H. A., & Lüdecke, D. (2019). *Indices of Effect Existence and Significance in the Bayesian Framework*. Frontiers in Psychology 2019;10:2767. [10.3389/fpsyg.2019.02767](https://doi.org/10.3389/fpsyg.2019.02767) ## Installation [![CRAN](https://www.r-pkg.org/badges/version/bayestestR)](https://cran.r-project.org/package=bayestestR) [![bayestestR status badge](https://easystats.r-universe.dev/badges/bayestestR)](https://easystats.r-universe.dev) [![codecov](https://codecov.io/gh/easystats/bayestestR/branch/main/graph/badge.svg)](https://app.codecov.io/gh/easystats/bayestestR) The *bayestestR* package is available on CRAN, while its latest development version is available on R-universe (from *rOpenSci*). | Type | Source | Command | |----|----|----| | Release | CRAN | `install.packages("bayestestR")` | | Development | R-universe | `install.packages("bayestestR", repos = "https://easystats.r-universe.dev")` | Once you have downloaded the package, you can then load it using: ``` r library("bayestestR") ``` > **Tip** > > Instead of `library(bayestestR)`, use `library(easystats)`. This will > make all features of the easystats-ecosystem available. > > To stay updated, use `easystats::install_latest()`. ## Documentation [![Documentation](https://img.shields.io/badge/documentation-bayestestR-orange.svg?colorB=E91E63)](https://easystats.github.io/bayestestR/) [![Blog](https://img.shields.io/badge/blog-easystats-orange.svg?colorB=FF9800)](https://easystats.github.io/blog/posts/) [![Features](https://img.shields.io/badge/features-bayestestR-orange.svg?colorB=2196F3)](https://easystats.github.io/bayestestR/reference/index.html) Access the package [documentation](https://easystats.github.io/bayestestR/) and check-out these vignettes: ### Tutorials - [Get Started with Bayesian Analysis](https://easystats.github.io/bayestestR/articles/bayestestR.html) - [Example 1: Initiation to Bayesian models](https://easystats.github.io/bayestestR/articles/example1.html) - [Example 2: Confirmation of Bayesian skills](https://easystats.github.io/bayestestR/articles/example2.html) - [Example 3: Become a Bayesian master](https://easystats.github.io/bayestestR/articles/example3.html) ### Articles - [Credible Intervals (CI)](https://easystats.github.io/bayestestR/articles/credible_interval.html) - [Probability of Direction (pd)](https://easystats.github.io/bayestestR/articles/probability_of_direction.html) - [Region of Practical Equivalence (ROPE)](https://easystats.github.io/bayestestR/articles/region_of_practical_equivalence.html) - [Bayes Factors (BF)](https://easystats.github.io/bayestestR/articles/bayes_factors.html) - [Comparison of Point-Estimates](https://easystats.github.io/bayestestR/articles/web_only/indicesEstimationComparison.html) - [Comparison of Indices of Effect Existence](https://doi.org/10.3389/fpsyg.2019.02767) - [Reporting Guidelines](https://easystats.github.io/bayestestR/articles/guidelines.html) # Features In the Bayesian framework, parameters are estimated in a probabilistic fashion as *distributions*. These distributions can be summarised and described by reporting four types of indices: - [**Centrality**](https://easystats.github.io/bayestestR/articles/web_only/indicesEstimationComparison.html) - `mean()`, `median()` or [`map_estimate()`](https://easystats.github.io/bayestestR/reference/map_estimate.html) for an estimation of the mode. - [`point_estimate()`](https://easystats.github.io/bayestestR/reference/point_estimate.html) can be used to get them at once and can be run directly on models. - [**Uncertainty**](https://easystats.github.io/bayestestR/articles/credible_interval.html) - [`hdi()`](https://easystats.github.io/bayestestR/reference/hdi.html) for *Highest Density Intervals (HDI)*, [`spi()`](https://easystats.github.io/bayestestR/reference/spi.html) for *Shortest Probability Intervals (SPI)* or [`eti()`](https://easystats.github.io/bayestestR/reference/eti.html) for *Equal-Tailed Intervals (ETI)*. - [`ci()`](https://easystats.github.io/bayestestR/reference/ci.html) can be used as a general method for Confidence and Credible Intervals (CI). - [**Effect Existence**](https://easystats.github.io/bayestestR/articles/indicesExistenceComparison.html): whether an effect is different from 0. - [`p_direction()`](https://easystats.github.io/bayestestR/reference/p_direction.html) for a Bayesian equivalent of the frequentist *p*-value (see [Makowski et al., 2019](https://doi.org/10.3389/fpsyg.2019.02767)) - [`p_pointnull()`](https://easystats.github.io/bayestestR/reference/p_map.html) represents the odds of null hypothesis (*h0 = 0*) compared to the most likely hypothesis (the MAP). - [`bf_pointnull()`](https://easystats.github.io/bayestestR/reference/bayesfactor_parameters.html) for a classic *Bayes Factor (BF)* assessing the likelihood of effect presence against its absence (*h0 = 0*). - [**Effect Significance**](https://easystats.github.io/bayestestR/articles/indicesExistenceComparison.html): whether the effect size can be considered as non-negligible. - [`p_rope()`](https://easystats.github.io/bayestestR/reference/p_rope.html) is the probability of the effect falling inside a [*Region of Practical Equivalence (ROPE)*](https://easystats.github.io/bayestestR/articles/region_of_practical_equivalence.html). - [`bf_rope()`](https://easystats.github.io/bayestestR/reference/bayesfactor_parameters.html) computes a Bayes factor against the null as defined by a region (the ROPE). - [`p_significance()`](https://easystats.github.io/bayestestR/reference/p_significance.html) that combines a region of equivalence with the probability of direction. [`describe_posterior()`](https://easystats.github.io/bayestestR/reference/describe_posterior.html) is the master function with which you can compute all of the indices cited below at once. ``` r describe_posterior( rnorm(10000), centrality = "median", test = c("p_direction", "p_significance"), verbose = FALSE ) ## Summary of Posterior Distribution ## ## Parameter | Median | 95% CI | pd | ps ## ---------------------------------------------------- ## Posterior | 5.94e-04 | [-1.91, 1.92] | 50.05% | 0.46 ``` `describe_posterior()` works for many objects, including more complex *brmsfit*-models. For better readability, the output is separated by model components: ``` r zinb <- read.csv("http://stats.idre.ucla.edu/stat/data/fish.csv") set.seed(123) model <- brm( bf( count ~ child + camper + (1 | persons), zi ~ child + camper + (1 | persons) ), data = zinb, family = zero_inflated_poisson(), chains = 1, iter = 500 ) describe_posterior( model, effects = "all", component = "all", test = c("p_direction", "p_significance"), centrality = "all" ) ``` ## Summary of Posterior Distribution ## ## Parameter | Median | Mean | MAP | 95% CI | pd | ps | Rhat | ESS ## ----------------------------------------------------------------------------------- ## (Intercept) | 0.96 | 0.96 | 0.96 | [-0.81, 2.51] | 90.00% | 0.88 | 1.011 | 110 ## child | -1.16 | -1.16 | -1.16 | [-1.36, -0.94] | 100% | 1.00 | 0.996 | 278 ## camper | 0.73 | 0.72 | 0.73 | [ 0.54, 0.91] | 100% | 1.00 | 0.996 | 271 ## ## # Fixed effects (zero-inflated) ## ## Parameter | Median | Mean | MAP | 95% CI | pd | ps | Rhat | ESS ## ----------------------------------------------------------------------------------- ## (Intercept) | -0.48 | -0.51 | -0.22 | [-2.03, 0.89] | 78.00% | 0.73 | 0.997 | 138 ## child | 1.85 | 1.86 | 1.81 | [ 1.19, 2.54] | 100% | 1.00 | 0.996 | 303 ## camper | -0.88 | -0.86 | -0.99 | [-1.61, -0.07] | 98.40% | 0.96 | 0.996 | 292 ## ## # Random effects (conditional) (SD/Cor: persons) ## ## Parameter | Median | Mean | MAP | 95% CI | pd | ps | Rhat | ESS ## ------------------------------------------------------------------------------- ## (Intercept) | 1.42 | 1.58 | 1.07 | [ 0.71, 3.58] | 100% | 1.00 | 1.010 | 126 ## ## # Random effects (zero-inflated) (SD/Cor: persons) ## ## Parameter | Median | Mean | MAP | 95% CI | pd | ps | Rhat | ESS ## ------------------------------------------------------------------------------- ## (Intercept) | 1.30 | 1.49 | 0.99 | [ 0.63, 3.41] | 100% | 1.00 | 0.996 | 129 *bayestestR* also includes [**many other features**](https://easystats.github.io/bayestestR/reference/index.html) useful for your Bayesian analyses. Here are some more examples: ## Point-estimates ``` r library(bayestestR) posterior <- distribution_gamma(10000, 1.5) # Generate a skewed distribution centrality <- point_estimate(posterior) # Get indices of centrality centrality ## Point Estimate ## ## Median | Mean | MAP ## -------------------- ## 1.18 | 1.50 | 0.51 ``` As for other [**easystats**](https://github.com/easystats) packages, `plot()` methods are available from the [**see**](https://easystats.github.io/see/) package for many functions: ![](man/figures/unnamed-chunk-8-1.png) While the **median** and the **mean** are available through base R functions, [`map_estimate()`](https://easystats.github.io/bayestestR/reference/map_estimate.html) in *bayestestR* can be used to directly find the **Highest Maximum A Posteriori (MAP)** estimate of a posterior, *i.e.*, the value associated with the highest probability density (the “peak” of the posterior distribution). In other words, it is an estimation of the *mode* for continuous parameters. ## Uncertainty (CI) [`hdi()`](https://easystats.github.io/bayestestR/reference/hdi.html) computes the **Highest Density Interval (HDI)** of a posterior distribution, i.e., the interval which contains all points within the interval have a higher probability density than points outside the interval. The HDI can be used in the context of Bayesian posterior characterization as **Credible Interval (CI)**. Unlike equal-tailed intervals (see [`eti()`](https://easystats.github.io/bayestestR/reference/eti.html)) that typically exclude 2.5% from each tail of the distribution, the HDI is *not* equal-tailed and therefore always includes the mode(s) of posterior distributions. ``` r posterior <- distribution_chisquared(10000, 4) hdi(posterior, ci = 0.89) ## 89% HDI: [0.18, 7.63] eti(posterior, ci = 0.89) ## 89% ETI: [0.75, 9.25] ``` ![](man/figures/unnamed-chunk-10-1.png) ## Existence and Significance Testing ### Probability of Direction (*pd*) [`p_direction()`](https://easystats.github.io/bayestestR/reference/p_direction.html) computes the *Probability of Direction* (*p*d, also known as the Maximum Probability of Effect - *MPE*). It varies between 50% and 100% (*i.e.*, `0.5` and `1`) and can be interpreted as the probability (expressed in percentage) that a parameter (described by its posterior distribution) is strictly positive or negative (whichever is the most probable). It is mathematically defined as the proportion of the posterior distribution that is of the median’s sign. Although differently expressed, this index is fairly similar (*i.e.*, is strongly correlated) to the frequentist *p*-value. **Relationship with the p-value**: In most cases, it seems that the *pd* corresponds to the frequentist one-sided *p*-value through the formula `p-value = (1-pd/100)` and to the two-sided *p*-value (the most commonly reported) through the formula `p-value = 2*(1-pd/100)`. Thus, a `pd` of `95%`, `97.5%` `99.5%` and `99.95%` corresponds approximately to a two-sided *p*-value of respectively `.1`, `.05`, `.01` and `.001`. See the [*reporting guidelines*](https://easystats.github.io/bayestestR/articles/guidelines.html). ``` r posterior <- distribution_normal(10000, 0.4, 0.2) p_direction(posterior) ## Probability of Direction ## ## Parameter | pd ## ------------------ ## Posterior | 97.72% ``` ![](man/figures/unnamed-chunk-12-1.png) ### ROPE [`rope()`](https://easystats.github.io/bayestestR/reference/rope.html) computes the proportion (in percentage) of the HDI (default to the 89% HDI) of a posterior distribution that lies within a region of practical equivalence. Statistically, the probability of a posterior distribution of being different from 0 does not make much sense (the probability of it being different from a single point being infinite). Therefore, the idea underlining ROPE is to let the user define an area around the null value enclosing values that are *equivalent to the null* value for practical purposes Kruschke (2018). Kruschke suggests that such null value could be set, by default, to the -0.1 to 0.1 range of a standardized parameter (negligible effect size according to Cohen, 1988). This could be generalized: For instance, for linear models, the ROPE could be set as `0 +/- .1 * sd(y)`. This ROPE range can be automatically computed for models using the [rope_range](https://easystats.github.io/bayestestR/reference/rope_range.html) function. Kruschke suggests using the proportion of the 95% (or 90%, considered more stable) HDI that falls within the ROPE as an index for “null-hypothesis” testing (as understood under the Bayesian framework, see [equivalence_test](https://easystats.github.io/bayestestR/reference/equivalence_test.html)). ``` r posterior <- distribution_normal(10000, 0.4, 0.2) rope(posterior, range = c(-0.1, 0.1)) ## # Proportion of samples inside the ROPE [-0.10, 0.10]: ## ## Inside ROPE ## ----------- ## 4.40 % ``` ![](man/figures/unnamed-chunk-14-1.png) ### Bayes Factor [`bayesfactor_parameters()`](https://easystats.github.io/bayestestR/reference/bayesfactor_parameters.html) computes Bayes factors against the null (either a point or an interval), bases on prior and posterior samples of a single parameter. This Bayes factor indicates the degree by which the mass of the posterior distribution has shifted further away from or closer to the null value(s) (relative to the prior distribution), thus indicating if the null value has become less or more likely given the observed data. When the null is an interval, the Bayes factor is computed by comparing the prior and posterior odds of the parameter falling within or outside the null; When the null is a point, a Savage-Dickey density ratio is computed, which is also an approximation of a Bayes factor comparing the marginal likelihoods of the model against a model in which the tested parameter has been restricted to the point null (Wagenmakers, Lodewyckx, Kuriyal, & Grasman, 2010). ``` r prior <- distribution_normal(10000, mean = 0, sd = 1) posterior <- distribution_normal(10000, mean = 1, sd = 0.7) bayesfactor_parameters(posterior, prior, direction = "two-sided", null = 0, verbose = FALSE) ## Bayes Factor (Savage-Dickey density ratio) ## ## BF ## ---- ## 1.94 ## ## * Evidence Against The Null: 0 ``` ![](man/figures/unnamed-chunk-16-1.png) *The lollipops represent the density of a point-null on the prior distribution (the blue lollipop on the dotted distribution) and on the posterior distribution (the red lollipop on the yellow distribution). The ratio between the two - the Savage-Dickey ratio - indicates the degree by which the mass of the parameter distribution has shifted away from or closer to the null.* For more info, see [the Bayes factors vignette](https://easystats.github.io/bayestestR/articles/bayes_factors.html). ## Utilities ### Find ROPE’s appropriate range [`rope_range()`](https://easystats.github.io/bayestestR/reference/rope_range.html): This function attempts at automatically finding suitable “default” values for the Region Of Practical Equivalence (ROPE). Kruschke (2018) suggests that such null value could be set, by default, to a range from `-0.1` to `0.1` of a standardized parameter (negligible effect size according to Cohen, 1988), which can be generalised for linear models to `-0.1 * sd(y), 0.1 * sd(y)`. For logistic models, the parameters expressed in log odds ratio can be converted to standardized difference through the formula `sqrt(3)/pi`, resulting in a range of `-0.05` to `0.05`. ``` r rope_range(model) ``` ### Density Estimation [`estimate_density()`](https://easystats.github.io/bayestestR/reference/estimate_density.html): This function is a wrapper over different methods of density estimation. By default, it uses the base R `density` with by default uses a different smoothing bandwidth (`"SJ"`) from the legacy default implemented the base R `density` function (`"nrd0"`). However, Deng & Wickham suggest that `method = "KernSmooth"` is the fastest and the most accurate. ### Perfect Distributions [`distribution()`](https://easystats.github.io/bayestestR/reference/distribution.html): Generate a sample of size n with near-perfect distributions. ``` r distribution(n = 10) ## [1] -1.55 -1.00 -0.66 -0.38 -0.12 0.12 0.38 0.66 1.00 1.55 ``` ### Probability of a Value [`density_at()`](https://easystats.github.io/bayestestR/reference/density_at.html): Compute the density of a given point of a distribution. ``` r density_at(rnorm(1000, 1, 1), 1) ## [1] 0.37 ``` ## Code of Conduct Please note that the bayestestR project is released with a [Contributor Code of Conduct](https://contributor-covenant.org/version/2/1/CODE_OF_CONDUCT.html). By contributing to this project, you agree to abide by its terms. # References
Kruschke, J. K. (2018). Rejecting or accepting parameter values in Bayesian estimation. *Advances in Methods and Practices in Psychological Science*, *1*(2), 270–280.
Kruschke, J. K., & Liddell, T. M. (2018). The bayesian new statistics: Hypothesis testing, estimation, meta-analysis, and power analysis from a bayesian perspective. *Psychonomic Bulletin & Review*, *25*(1), 178–206.
Wagenmakers, E.-J., Lodewyckx, T., Kuriyal, H., & Grasman, R. (2010). Bayesian hypothesis testing for psychologists: A tutorial on the savage–dickey method. *Cognitive Psychology*, *60*(3), 158–189.
bayestestR/build/0000755000176200001440000000000015054305237013502 5ustar liggesusersbayestestR/build/vignette.rds0000644000176200001440000000033115054305237016036 0ustar liggesusersb```b`aed`b2 1# '/K-*L-O/LK-)I- MAS(USH i%9h*q t0XD90!icKŰ% 5/$~hZ8S+`zP԰Aհe ,s\ܠL t7`~΢r=xA$Gs=ʕXVr7}bayestestR/build/partial.rdb0000644000176200001440000000007515054305207015626 0ustar liggesusersb```b`aed`b1 H020piּb C" 7bayestestR/man/0000755000176200001440000000000015052646230013155 5ustar liggesusersbayestestR/man/distribution.Rd0000644000176200001440000000676115052646230016175 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/distribution.R \name{distribution} \alias{distribution} \alias{distribution_custom} \alias{distribution_beta} \alias{distribution_binomial} \alias{distribution_binom} \alias{distribution_cauchy} \alias{distribution_chisquared} \alias{distribution_chisq} \alias{distribution_gamma} \alias{distribution_mixture_normal} \alias{distribution_normal} \alias{distribution_gaussian} \alias{distribution_nbinom} \alias{distribution_poisson} \alias{distribution_student} \alias{distribution_t} \alias{distribution_student_t} \alias{distribution_tweedie} \alias{distribution_uniform} \title{Empirical Distributions} \usage{ distribution(type = "normal", ...) distribution_custom(n, type = "norm", ..., random = FALSE) distribution_beta(n, shape1, shape2, ncp = 0, random = FALSE, ...) distribution_binomial(n, size = 1, prob = 0.5, random = FALSE, ...) distribution_binom(n, size = 1, prob = 0.5, random = FALSE, ...) distribution_cauchy(n, location = 0, scale = 1, random = FALSE, ...) distribution_chisquared(n, df, ncp = 0, random = FALSE, ...) distribution_chisq(n, df, ncp = 0, random = FALSE, ...) distribution_gamma(n, shape, scale = 1, random = FALSE, ...) distribution_mixture_normal(n, mean = c(-3, 3), sd = 1, random = FALSE, ...) distribution_normal(n, mean = 0, sd = 1, random = FALSE, ...) distribution_gaussian(n, mean = 0, sd = 1, random = FALSE, ...) distribution_nbinom(n, size, prob, mu, phi, random = FALSE, ...) distribution_poisson(n, lambda = 1, random = FALSE, ...) distribution_student(n, df, ncp, random = FALSE, ...) distribution_t(n, df, ncp, random = FALSE, ...) distribution_student_t(n, df, ncp, random = FALSE, ...) distribution_tweedie(n, xi = NULL, mu, phi, power = NULL, random = FALSE, ...) distribution_uniform(n, min = 0, max = 1, random = FALSE, ...) } \arguments{ \item{type}{Can be any of the names from base R's \link[stats:Distributions]{Distributions}, like \code{"cauchy"}, \code{"pois"} or \code{"beta"}.} \item{...}{Arguments passed to or from other methods.} \item{n}{the number of observations} \item{random}{Generate near-perfect or random (simple wrappers for the base R \verb{r*} functions) distributions. When \code{random = FALSE}, these function return \verb{q*(ppoints(n), ...)}.} \item{shape1, shape2}{non-negative parameters of the Beta distribution.} \item{ncp}{non-centrality parameter.} \item{size}{number of trials (zero or more).} \item{prob}{probability of success on each trial.} \item{location, scale}{location and scale parameters.} \item{df}{degrees of freedom (non-negative, but can be non-integer).} \item{shape}{Shape parameter.} \item{mean}{vector of means.} \item{sd}{vector of standard deviations.} \item{mu}{the mean} \item{phi}{Corresponding to \code{glmmTMB}'s implementation of nbinom distribution, where \code{size=mu/phi}.} \item{lambda}{vector of (non-negative) means.} \item{xi}{For tweedie distributions, the value of \code{xi} such that the variance is \code{var(Y) = phi * mu^xi}.} \item{power}{Alias for \code{xi}.} \item{min, max}{lower and upper limits of the distribution. Must be finite.} } \description{ Generate a sequence of n-quantiles, i.e., a sample of size \code{n} with a near-perfect distribution. } \examples{ library(bayestestR) x <- distribution(n = 10) plot(density(x)) x <- distribution(type = "gamma", n = 100, shape = 2) plot(density(x)) } bayestestR/man/mcse.Rd0000644000176200001440000001165015005147105014371 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mcse.R \name{mcse} \alias{mcse} \alias{mcse.stanreg} \title{Monte-Carlo Standard Error (MCSE)} \usage{ mcse(model, ...) \method{mcse}{stanreg}(model, effects = "fixed", component = "location", parameters = NULL, ...) } \arguments{ \item{model}{A \code{stanreg}, \code{stanfit}, \code{brmsfit}, \code{blavaan}, or \code{MCMCglmm} object.} \item{...}{Currently not used.} \item{effects}{Should variables for fixed effects (\code{"fixed"}), random effects (\code{"random"}) or both (\code{"all"}) be returned? Only applies to mixed models. May be abbreviated. For models of from packages \strong{brms} or \strong{rstanarm} there are additional options: \itemize{ \item \code{"fixed"} returns fixed effects. \item \code{"random_variance"} return random effects parameters (variance and correlation components, e.g. those parameters that start with \code{sd_} or \code{cor_}). \item \code{"grouplevel"} returns random effects group level estimates, i.e. those parameters that start with \code{r_}. \item \code{"random"} returns both \code{"random_variance"} and \code{"grouplevel"}. \item \code{"all"} returns fixed effects and random effects variances. \item \code{"full"} returns all parameters. }} \item{component}{Which type of parameters to return, such as parameters for the conditional model, the zero-inflated part of the model, the dispersion term, etc. See details in section \emph{Model Components}. May be abbreviated. Note that the \emph{conditional} component also refers to the \emph{count} or \emph{mean} component - names may differ, depending on the modeling package. There are three convenient shortcuts (not applicable to \emph{all} model classes): \itemize{ \item \code{component = "all"} returns all possible parameters. \item If \code{component = "location"}, location parameters such as \code{conditional}, \code{zero_inflated}, \code{smooth_terms}, or \code{instruments} are returned (everything that are fixed or random effects - depending on the \code{effects} argument - but no auxiliary parameters). \item For \code{component = "distributional"} (or \code{"auxiliary"}), components like \code{sigma}, \code{dispersion}, \code{beta} or \code{precision} (and other auxiliary parameters) are returned. }} \item{parameters}{Regular expression pattern that describes the parameters that should be returned. Meta-parameters (like \code{lp__} or \code{prior_}) are filtered by default, so only parameters that typically appear in the \code{summary()} are returned. Use \code{parameters} to select specific parameters for the output.} } \description{ This function returns the Monte Carlo Standard Error (MCSE). } \details{ \strong{Monte Carlo Standard Error (MCSE)} is another measure of accuracy of the chains. It is defined as standard deviation of the chains divided by their effective sample size (the formula for \code{mcse()} is from Kruschke 2015, p. 187). The MCSE \dQuote{provides a quantitative suggestion of how big the estimation noise is}. } \section{Model components}{ Possible values for the \code{component} argument depend on the model class. Following are valid options: \itemize{ \item \code{"all"}: returns all model components, applies to all models, but will only have an effect for models with more than just the conditional model component. \item \code{"conditional"}: only returns the conditional component, i.e. "fixed effects" terms from the model. Will only have an effect for models with more than just the conditional model component. \item \code{"smooth_terms"}: returns smooth terms, only applies to GAMs (or similar models that may contain smooth terms). \item \code{"zero_inflated"} (or \code{"zi"}): returns the zero-inflation component. \item \code{"location"}: returns location parameters such as \code{conditional}, \code{zero_inflated}, or \code{smooth_terms} (everything that are fixed or random effects - depending on the \code{effects} argument - but no auxiliary parameters). \item \code{"distributional"} (or \code{"auxiliary"}): components like \code{sigma}, \code{dispersion}, \code{beta} or \code{precision} (and other auxiliary parameters) are returned. } For models of class \code{brmsfit} (package \strong{brms}), even more options are possible for the \code{component} argument, which are not all documented in detail here. See also \href{https://easystats.github.io/insight/reference/find_parameters.BGGM.html}{\code{?insight::find_parameters}}. } \examples{ \dontshow{if (require("rstanarm")) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf} \donttest{ library(bayestestR) model <- suppressWarnings( rstanarm::stan_glm(mpg ~ wt + am, data = mtcars, chains = 1, refresh = 0) ) mcse(model) } \dontshow{\}) # examplesIf} } \references{ Kruschke, J. (2014). Doing Bayesian data analysis: A tutorial with R, JAGS, and Stan. Academic Press. } bayestestR/man/equivalence_test.Rd0000644000176200001440000002643615005147105017012 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/equivalence_test.R \name{equivalence_test} \alias{equivalence_test} \alias{equivalence_test.default} \alias{equivalence_test.data.frame} \alias{equivalence_test.brmsfit} \title{Test for Practical Equivalence} \usage{ equivalence_test(x, ...) \method{equivalence_test}{default}(x, ...) \method{equivalence_test}{data.frame}( x, range = "default", ci = 0.95, rvar_col = NULL, verbose = TRUE, ... ) \method{equivalence_test}{brmsfit}( x, range = "default", ci = 0.95, effects = "fixed", component = "conditional", parameters = NULL, verbose = TRUE, ... ) } \arguments{ \item{x}{Vector representing a posterior distribution. Can also be a \code{stanreg} or \code{brmsfit} model.} \item{...}{Currently not used.} \item{range}{ROPE's lower and higher bounds. Should be \code{"default"} or depending on the number of outcome variables a vector or a list. For models with one response, \code{range} can be: \itemize{ \item a vector of length two (e.g., \code{c(-0.1, 0.1)}), \item a list of numeric vector of the same length as numbers of parameters (see 'Examples'). \item a list of \emph{named} numeric vectors, where names correspond to parameter names. In this case, all parameters that have no matching name in \code{range} will be set to \code{"default"}. } In multivariate models, \code{range} should be a list with another list (one for each response variable) of numeric vectors . Vector names should correspond to the name of the response variables. If \code{"default"} and input is a vector, the range is set to \code{c(-0.1, 0.1)}. If \code{"default"} and input is a Bayesian model, \code{\link[=rope_range]{rope_range()}} is used. See 'Examples'.} \item{ci}{The Credible Interval (CI) probability, corresponding to the proportion of HDI, to use for the percentage in ROPE.} \item{rvar_col}{A single character - the name of an \code{rvar} column in the data frame to be processed. See example in \code{\link[=p_direction]{p_direction()}}.} \item{verbose}{Toggle off warnings.} \item{effects}{Should variables for fixed effects (\code{"fixed"}), random effects (\code{"random"}) or both (\code{"all"}) be returned? Only applies to mixed models. May be abbreviated. For models of from packages \strong{brms} or \strong{rstanarm} there are additional options: \itemize{ \item \code{"fixed"} returns fixed effects. \item \code{"random_variance"} return random effects parameters (variance and correlation components, e.g. those parameters that start with \code{sd_} or \code{cor_}). \item \code{"grouplevel"} returns random effects group level estimates, i.e. those parameters that start with \code{r_}. \item \code{"random"} returns both \code{"random_variance"} and \code{"grouplevel"}. \item \code{"all"} returns fixed effects and random effects variances. \item \code{"full"} returns all parameters. }} \item{component}{Which type of parameters to return, such as parameters for the conditional model, the zero-inflated part of the model, the dispersion term, etc. See details in section \emph{Model Components}. May be abbreviated. Note that the \emph{conditional} component also refers to the \emph{count} or \emph{mean} component - names may differ, depending on the modeling package. There are three convenient shortcuts (not applicable to \emph{all} model classes): \itemize{ \item \code{component = "all"} returns all possible parameters. \item If \code{component = "location"}, location parameters such as \code{conditional}, \code{zero_inflated}, \code{smooth_terms}, or \code{instruments} are returned (everything that are fixed or random effects - depending on the \code{effects} argument - but no auxiliary parameters). \item For \code{component = "distributional"} (or \code{"auxiliary"}), components like \code{sigma}, \code{dispersion}, \code{beta} or \code{precision} (and other auxiliary parameters) are returned. }} \item{parameters}{Regular expression pattern that describes the parameters that should be returned. Meta-parameters (like \code{lp__} or \code{prior_}) are filtered by default, so only parameters that typically appear in the \code{summary()} are returned. Use \code{parameters} to select specific parameters for the output.} } \value{ A data frame with following columns: \itemize{ \item \code{Parameter} The model parameter(s), if \code{x} is a model-object. If \code{x} is a vector, this column is missing. \item \code{CI} The probability of the HDI. \item \code{ROPE_low}, \code{ROPE_high} The limits of the ROPE. These values are identical for all parameters. \item \code{ROPE_Percentage} The proportion of the HDI that lies inside the ROPE. \item \code{ROPE_Equivalence} The "test result", as character. Either "rejected", "accepted" or "undecided". \item \code{HDI_low} , \code{HDI_high} The lower and upper HDI limits for the parameters. } } \description{ Perform a \strong{Test for Practical Equivalence} for Bayesian and frequentist models. } \details{ Documentation is accessible for: \itemize{ \item \href{https://easystats.github.io/bayestestR/reference/equivalence_test.html}{Bayesian models} \item \href{https://easystats.github.io/parameters/reference/equivalence_test.lm.html}{Frequentist models} } For Bayesian models, the \strong{Test for Practical Equivalence} is based on the \emph{"HDI+ROPE decision rule"} (\cite{Kruschke, 2014, 2018}) to check whether parameter values should be accepted or rejected against an explicitly formulated "null hypothesis" (i.e., a ROPE). In other words, it checks the percentage of the \verb{89\%} \link[=hdi]{HDI} that is the null region (the ROPE). If this percentage is sufficiently low, the null hypothesis is rejected. If this percentage is sufficiently high, the null hypothesis is accepted. Using the \link[=rope]{ROPE} and the \link[=hdi]{HDI}, \cite{Kruschke (2018)} suggests using the percentage of the \verb{95\%} (or \verb{89\%}, considered more stable) HDI that falls within the ROPE as a decision rule. If the HDI is completely outside the ROPE, the "null hypothesis" for this parameter is "rejected". If the ROPE completely covers the HDI, i.e., all most credible values of a parameter are inside the region of practical equivalence, the null hypothesis is accepted. Else, it is undecided whether to accept or reject the null hypothesis. If the full ROPE is used (i.e., \verb{100\%} of the HDI), then the null hypothesis is rejected or accepted if the percentage of the posterior within the ROPE is smaller than to \verb{2.5\%} or greater than \verb{97.5\%}. Desirable results are low proportions inside the ROPE (the closer to zero the better). Some attention is required for finding suitable values for the ROPE limits (argument \code{range}). See 'Details' in \code{\link[=rope_range]{rope_range()}} for further information. \strong{Multicollinearity: Non-independent covariates} When parameters show strong correlations, i.e. when covariates are not independent, the joint parameter distributions may shift towards or away from the ROPE. In such cases, the test for practical equivalence may have inappropriate results. Collinearity invalidates ROPE and hypothesis testing based on univariate marginals, as the probabilities are conditional on independence. Most problematic are the results of the "undecided" parameters, which may either move further towards "rejection" or away from it (\cite{Kruschke 2014, 340f}). \code{equivalence_test()} performs a simple check for pairwise correlations between parameters, but as there can be collinearity between more than two variables, a first step to check the assumptions of this hypothesis testing is to look at different pair plots. An even more sophisticated check is the projection predictive variable selection (\cite{Piironen and Vehtari 2017}). } \note{ There is a \code{print()}-method with a \code{digits}-argument to control the amount of digits in the output, and there is a \href{https://easystats.github.io/see/articles/bayestestR.html}{\code{plot()}-method} to visualize the results from the equivalence-test (for models only). } \section{Model components}{ Possible values for the \code{component} argument depend on the model class. Following are valid options: \itemize{ \item \code{"all"}: returns all model components, applies to all models, but will only have an effect for models with more than just the conditional model component. \item \code{"conditional"}: only returns the conditional component, i.e. "fixed effects" terms from the model. Will only have an effect for models with more than just the conditional model component. \item \code{"smooth_terms"}: returns smooth terms, only applies to GAMs (or similar models that may contain smooth terms). \item \code{"zero_inflated"} (or \code{"zi"}): returns the zero-inflation component. \item \code{"location"}: returns location parameters such as \code{conditional}, \code{zero_inflated}, or \code{smooth_terms} (everything that are fixed or random effects - depending on the \code{effects} argument - but no auxiliary parameters). \item \code{"distributional"} (or \code{"auxiliary"}): components like \code{sigma}, \code{dispersion}, \code{beta} or \code{precision} (and other auxiliary parameters) are returned. } For models of class \code{brmsfit} (package \strong{brms}), even more options are possible for the \code{component} argument, which are not all documented in detail here. See also \href{https://easystats.github.io/insight/reference/find_parameters.BGGM.html}{\code{?insight::find_parameters}}. } \examples{ \dontshow{if (all(insight::check_if_installed(c("rstanarm", "brms", "emmeans", "BayesFactor", "see"), quietly = TRUE))) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf} library(bayestestR) equivalence_test(x = rnorm(1000, 0, 0.01), range = c(-0.1, 0.1)) equivalence_test(x = rnorm(1000, 0, 1), range = c(-0.1, 0.1)) equivalence_test(x = rnorm(1000, 1, 0.01), range = c(-0.1, 0.1)) equivalence_test(x = rnorm(1000, 1, 1), ci = c(.50, .99)) # print more digits test <- equivalence_test(x = rnorm(1000, 1, 1), ci = c(.50, .99)) print(test, digits = 4) \donttest{ model <- rstanarm::stan_glm(mpg ~ wt + cyl, data = mtcars) equivalence_test(model) # multiple ROPE ranges - asymmetric, symmetric, default equivalence_test(model, range = list(c(10, 40), c(-5, -4), "default")) # named ROPE ranges equivalence_test(model, range = list(wt = c(-5, -4), `(Intercept)` = c(10, 40))) # plot result test <- equivalence_test(model) plot(test) equivalence_test(emmeans::emtrends(model, ~1, "wt", data = mtcars)) model <- brms::brm(mpg ~ wt + cyl, data = mtcars) equivalence_test(model) bf <- BayesFactor::ttestBF(x = rnorm(100, 1, 1)) # equivalence_test(bf) } \dontshow{\}) # examplesIf} } \references{ \itemize{ \item Kruschke, J. K. (2018). Rejecting or accepting parameter values in Bayesian estimation. Advances in Methods and Practices in Psychological Science, 1(2), 270-280. \doi{10.1177/2515245918771304} \item Kruschke, J. K. (2014). Doing Bayesian data analysis: A tutorial with R, JAGS, and Stan. Academic Press \item Piironen, J., & Vehtari, A. (2017). Comparison of Bayesian predictive methods for model selection. Statistics and Computing, 27(3), 711–735. \doi{10.1007/s11222-016-9649-y} } } bayestestR/man/bayesfactor_parameters.Rd0000644000176200001440000003631115005147105020170 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/bayesfactor_parameters.R \name{bayesfactor_parameters} \alias{bayesfactor_parameters} \alias{bayesfactor_pointnull} \alias{bayesfactor_rope} \alias{bf_parameters} \alias{bf_pointnull} \alias{bf_rope} \alias{bayesfactor_parameters.numeric} \alias{bayesfactor_parameters.stanreg} \alias{bayesfactor_parameters.data.frame} \title{Bayes Factors (BF) for a Single Parameter} \usage{ bayesfactor_parameters( posterior, prior = NULL, direction = "two-sided", null = 0, ..., verbose = TRUE ) bayesfactor_pointnull( posterior, prior = NULL, direction = "two-sided", null = 0, ..., verbose = TRUE ) bayesfactor_rope( posterior, prior = NULL, direction = "two-sided", null = rope_range(posterior, verbose = FALSE), ..., verbose = TRUE ) bf_parameters( posterior, prior = NULL, direction = "two-sided", null = 0, ..., verbose = TRUE ) bf_pointnull( posterior, prior = NULL, direction = "two-sided", null = 0, ..., verbose = TRUE ) bf_rope( posterior, prior = NULL, direction = "two-sided", null = rope_range(posterior, verbose = FALSE), ..., verbose = TRUE ) \method{bayesfactor_parameters}{numeric}( posterior, prior = NULL, direction = "two-sided", null = 0, ..., verbose = TRUE ) \method{bayesfactor_parameters}{stanreg}( posterior, prior = NULL, direction = "two-sided", null = 0, effects = "fixed", component = "conditional", parameters = NULL, ..., verbose = TRUE ) \method{bayesfactor_parameters}{data.frame}( posterior, prior = NULL, direction = "two-sided", null = 0, rvar_col = NULL, ..., verbose = TRUE ) } \arguments{ \item{posterior}{A numerical vector, \code{stanreg} / \code{brmsfit} object, \code{emmGrid} or a data frame - representing a posterior distribution(s) from (see 'Details').} \item{prior}{An object representing a prior distribution (see 'Details').} \item{direction}{Test type (see 'Details'). One of \code{0}, \code{"two-sided"} (default, two tailed), \code{-1}, \code{"left"} (left tailed) or \code{1}, \code{"right"} (right tailed).} \item{null}{Value of the null, either a scalar (for point-null) or a range (for a interval-null).} \item{...}{Arguments passed to and from other methods. (Can be used to pass arguments to internal \code{\link[logspline:logspline]{logspline::logspline()}}.)} \item{verbose}{Toggle off warnings.} \item{effects}{Should variables for fixed effects (\code{"fixed"}), random effects (\code{"random"}) or both (\code{"all"}) be returned? Only applies to mixed models. May be abbreviated. For models of from packages \strong{brms} or \strong{rstanarm} there are additional options: \itemize{ \item \code{"fixed"} returns fixed effects. \item \code{"random_variance"} return random effects parameters (variance and correlation components, e.g. those parameters that start with \code{sd_} or \code{cor_}). \item \code{"grouplevel"} returns random effects group level estimates, i.e. those parameters that start with \code{r_}. \item \code{"random"} returns both \code{"random_variance"} and \code{"grouplevel"}. \item \code{"all"} returns fixed effects and random effects variances. \item \code{"full"} returns all parameters. }} \item{component}{Which type of parameters to return, such as parameters for the conditional model, the zero-inflated part of the model, the dispersion term, etc. See details in section \emph{Model Components}. May be abbreviated. Note that the \emph{conditional} component also refers to the \emph{count} or \emph{mean} component - names may differ, depending on the modeling package. There are three convenient shortcuts (not applicable to \emph{all} model classes): \itemize{ \item \code{component = "all"} returns all possible parameters. \item If \code{component = "location"}, location parameters such as \code{conditional}, \code{zero_inflated}, \code{smooth_terms}, or \code{instruments} are returned (everything that are fixed or random effects - depending on the \code{effects} argument - but no auxiliary parameters). \item For \code{component = "distributional"} (or \code{"auxiliary"}), components like \code{sigma}, \code{dispersion}, \code{beta} or \code{precision} (and other auxiliary parameters) are returned. }} \item{parameters}{Regular expression pattern that describes the parameters that should be returned. Meta-parameters (like \code{lp__} or \code{prior_}) are filtered by default, so only parameters that typically appear in the \code{summary()} are returned. Use \code{parameters} to select specific parameters for the output.} \item{rvar_col}{A single character - the name of an \code{rvar} column in the data frame to be processed. See example in \code{\link[=p_direction]{p_direction()}}.} } \value{ A data frame containing the (log) Bayes factor representing evidence \emph{against} the null (Use \code{as.numeric()} to extract the non-log Bayes factors; see examples). } \description{ This method computes Bayes factors against the null (either a point or an interval), based on prior and posterior samples of a single parameter. This Bayes factor indicates the degree by which the mass of the posterior distribution has shifted further away from or closer to the null value(s) (relative to the prior distribution), thus indicating if the null value has become less or more likely given the observed data. \cr \cr When the null is an interval, the Bayes factor is computed by comparing the prior and posterior odds of the parameter falling within or outside the null interval (Morey & Rouder, 2011; Liao et al., 2020); When the null is a point, a Savage-Dickey density ratio is computed, which is also an approximation of a Bayes factor comparing the marginal likelihoods of the model against a model in which the tested parameter has been restricted to the point null (Wagenmakers et al., 2010; Heck, 2019). \cr \cr Note that the \code{logspline} package is used for estimating densities and probabilities, and must be installed for the function to work. \cr \cr \code{bayesfactor_pointnull()} and \code{bayesfactor_rope()} are wrappers around \code{bayesfactor_parameters} with different defaults for the null to be tested against (a point and a range, respectively). Aliases of the main functions are prefixed with \verb{bf_*}, like \code{bf_parameters()} or \code{bf_pointnull()}. \cr \cr \strong{For more info, in particular on specifying correct priors for factors with more than 2 levels, see \href{https://easystats.github.io/bayestestR/articles/bayes_factors.html}{the Bayes factors vignette}.} } \details{ This method is used to compute Bayes factors based on prior and posterior distributions. \subsection{One-sided & Dividing Tests (setting an order restriction)}{ One sided tests (controlled by \code{direction}) are conducted by restricting the prior and posterior of the non-null values (the "alternative") to one side of the null only (\cite{Morey & Wagenmakers, 2014}). For example, if we have a prior hypothesis that the parameter should be positive, the alternative will be restricted to the region to the right of the null (point or interval). For example, for a Bayes factor comparing the "null" of \code{0-0.1} to the alternative \verb{>0.1}, we would set \code{bayesfactor_parameters(null = c(0, 0.1), direction = ">")}. \cr\cr It is also possible to compute a Bayes factor for \strong{dividing} hypotheses - that is, for a null and alternative that are complementary, opposing one-sided hypotheses (\cite{Morey & Wagenmakers, 2014}). For example, for a Bayes factor comparing the "null" of \verb{<0} to the alternative \verb{>0}, we would set \code{bayesfactor_parameters(null = c(-Inf, 0))}. } } \note{ There is also a \href{https://easystats.github.io/see/articles/bayestestR.html}{\code{plot()}-method} implemented in the \href{https://easystats.github.io/see/}{\pkg{see}-package}. } \section{Setting the correct \code{prior}}{ For the computation of Bayes factors, the model priors must be proper priors (at the very least they should be \emph{not flat}, and it is preferable that they be \emph{informative}); As the priors for the alternative get wider, the likelihood of the null value(s) increases, to the extreme that for completely flat priors the null is infinitely more favorable than the alternative (this is called \emph{the Jeffreys-Lindley-Bartlett paradox}). Thus, you should only ever try (or want) to compute a Bayes factor when you have an informed prior. \cr\cr (Note that by default, \code{brms::brm()} uses flat priors for fixed-effects; See example below.) \cr\cr It is important to provide the correct \code{prior} for meaningful results, to match the \code{posterior}-type input: \itemize{ \item \strong{A numeric vector} - \code{prior} should also be a \emph{numeric vector}, representing the prior-estimate. \item \strong{A data frame} - \code{prior} should also be a \emph{data frame}, representing the prior-estimates, in matching column order. \itemize{ \item If \code{rvar_col} is specified, \code{prior} should be \emph{the name of an \code{rvar} column} that represents the prior-estimates. } \item \strong{Supported Bayesian model (\code{stanreg}, \code{brmsfit}, etc.)} \itemize{ \item \code{prior} should be \emph{a model an equivalent model with MCMC samples from the priors \strong{only}}. See \code{\link[=unupdate]{unupdate()}}. \item If \code{prior} is set to \code{NULL}, \code{\link[=unupdate]{unupdate()}} is called internally (not supported for \code{brmsfit_multiple} model). } \item \strong{Output from a \code{{marginaleffects}} function} - \code{prior} should also be \emph{an equivalent output} from a \code{{marginaleffects}} function based on a prior-model (See \code{\link[=unupdate]{unupdate()}}). \item \strong{Output from an \code{{emmeans}} function} \itemize{ \item \code{prior} should also be \emph{an equivalent output} from an \code{{emmeans}} function based on a prior-model (See \code{\link[=unupdate]{unupdate()}}). \item \code{prior} can also be \emph{the original (posterior) model}, in which case the function will try to "unupdate" the estimates (not supported if the estimates have undergone any transformations -- \code{"log"}, \code{"response"}, etc. -- or any \code{regrid}ing). } } } \section{Interpreting Bayes Factors}{ A Bayes factor greater than 1 can be interpreted as evidence against the null, at which one convention is that a Bayes factor greater than 3 can be considered as "substantial" evidence against the null (and vice versa, a Bayes factor smaller than 1/3 indicates substantial evidence in favor of the null-model) (\cite{Wetzels et al. 2011}). } \section{Model components}{ Possible values for the \code{component} argument depend on the model class. Following are valid options: \itemize{ \item \code{"all"}: returns all model components, applies to all models, but will only have an effect for models with more than just the conditional model component. \item \code{"conditional"}: only returns the conditional component, i.e. "fixed effects" terms from the model. Will only have an effect for models with more than just the conditional model component. \item \code{"smooth_terms"}: returns smooth terms, only applies to GAMs (or similar models that may contain smooth terms). \item \code{"zero_inflated"} (or \code{"zi"}): returns the zero-inflation component. \item \code{"location"}: returns location parameters such as \code{conditional}, \code{zero_inflated}, or \code{smooth_terms} (everything that are fixed or random effects - depending on the \code{effects} argument - but no auxiliary parameters). \item \code{"distributional"} (or \code{"auxiliary"}): components like \code{sigma}, \code{dispersion}, \code{beta} or \code{precision} (and other auxiliary parameters) are returned. } For models of class \code{brmsfit} (package \strong{brms}), even more options are possible for the \code{component} argument, which are not all documented in detail here. See also \href{https://easystats.github.io/insight/reference/find_parameters.BGGM.html}{\code{?insight::find_parameters}}. } \examples{ \dontshow{if (require("logspline")) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf} library(bayestestR) prior <- distribution_normal(1000, mean = 0, sd = 1) posterior <- distribution_normal(1000, mean = .5, sd = .3) (BF_pars <- bayesfactor_parameters(posterior, prior, verbose = FALSE)) as.numeric(BF_pars) \dontshow{\}) # examplesIf} \dontshow{if (require("rstanarm") && require("emmeans") && require("logspline")) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf} \donttest{ # rstanarm models # --------------- contrasts(sleep$group) <- contr.equalprior_pairs # see vingette stan_model <- suppressWarnings(stan_lmer( extra ~ group + (1 | ID), data = sleep, refresh = 0 )) bayesfactor_parameters(stan_model, verbose = FALSE) bayesfactor_parameters(stan_model, null = rope_range(stan_model)) # emmGrid objects # --------------- group_diff <- pairs(emmeans(stan_model, ~group, data = sleep)) bayesfactor_parameters(group_diff, prior = stan_model, verbose = FALSE) # Or # group_diff_prior <- pairs(emmeans(unupdate(stan_model), ~group)) # bayesfactor_parameters(group_diff, prior = group_diff_prior, verbose = FALSE) } \dontshow{\}) # examplesIf} \dontshow{if (require("brms") && require("logspline")) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf} # brms models # ----------- \dontrun{ contrasts(sleep$group) <- contr.equalprior_pairs # see vingette my_custom_priors <- set_prior("student_t(3, 0, 1)", class = "b") + set_prior("student_t(3, 0, 1)", class = "sd", group = "ID") brms_model <- suppressWarnings(brm(extra ~ group + (1 | ID), data = sleep, prior = my_custom_priors, refresh = 0 )) bayesfactor_parameters(brms_model, verbose = FALSE) } \dontshow{\}) # examplesIf} } \references{ \itemize{ \item Wagenmakers, E. J., Lodewyckx, T., Kuriyal, H., and Grasman, R. (2010). Bayesian hypothesis testing for psychologists: A tutorial on the Savage-Dickey method. Cognitive psychology, 60(3), 158-189. \item Heck, D. W. (2019). A caveat on the Savage–Dickey density ratio: The case of computing Bayes factors for regression parameters. British Journal of Mathematical and Statistical Psychology, 72(2), 316-333. \item Morey, R. D., & Wagenmakers, E. J. (2014). Simple relation between Bayesian order-restricted and point-null hypothesis tests. Statistics & Probability Letters, 92, 121-124. \item Morey, R. D., & Rouder, J. N. (2011). Bayes factor approaches for testing interval null hypotheses. Psychological methods, 16(4), 406. \item Liao, J. G., Midya, V., & Berg, A. (2020). Connecting and contrasting the Bayes factor and a modified ROPE procedure for testing interval null hypotheses. The American Statistician, 1-19. \item Wetzels, R., Matzke, D., Lee, M. D., Rouder, J. N., Iverson, G. J., and Wagenmakers, E.-J. (2011). Statistical Evidence in Experimental Psychology: An Empirical Comparison Using 855 t Tests. Perspectives on Psychological Science, 6(3), 291–298. \doi{10.1177/1745691611406923} } } \author{ Mattan S. Ben-Shachar } bayestestR/man/sexit.Rd0000644000176200001440000001766314614766776014642 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/sexit.R \name{sexit} \alias{sexit} \title{Sequential Effect eXistence and sIgnificance Testing (SEXIT)} \usage{ sexit(x, significant = "default", large = "default", ci = 0.95, ...) } \arguments{ \item{x}{A vector representing a posterior distribution, a data frame of posterior draws (samples be parameter). Can also be a Bayesian model.} \item{significant, large}{The threshold values to use for significant and large probabilities. If left to 'default', will be selected through \code{\link[=sexit_thresholds]{sexit_thresholds()}}. See the details section below.} \item{ci}{Value or vector of probability of the (credible) interval - CI (between 0 and 1) to be estimated. Default to \code{.95} (\verb{95\%}).} \item{...}{Currently not used.} } \value{ A dataframe and text as attribute. } \description{ The SEXIT is a new framework to describe Bayesian effects, guiding which indices to use. Accordingly, the \code{sexit()} function returns the minimal (and optimal) required information to describe models' parameters under a Bayesian framework. It includes the following indices: \itemize{ \item Centrality: the median of the posterior distribution. In probabilistic terms, there is \verb{50\%} of probability that the effect is higher and lower. See \code{\link[=point_estimate]{point_estimate()}}. \item Uncertainty: the \verb{95\%} Highest Density Interval (HDI). In probabilistic terms, there is \verb{95\%} of probability that the effect is within this confidence interval. See \code{\link[=ci]{ci()}}. \item Existence: The probability of direction allows to quantify the certainty by which an effect is positive or negative. It is a critical index to show that an effect of some manipulation is not harmful (for instance in clinical studies) or to assess the direction of a link. See \code{\link[=p_direction]{p_direction()}}. \item Significance: Once existence is demonstrated with high certainty, we can assess whether the effect is of sufficient size to be considered as significant (i.e., not negligible). This is a useful index to determine which effects are actually important and worthy of discussion in a given process. See \code{\link[=p_significance]{p_significance()}}. \item Size: Finally, this index gives an idea about the strength of an effect. However, beware, as studies have shown that a big effect size can be also suggestive of low statistical power (see details section). } } \details{ \subsection{Rationale}{ The assessment of "significance" (in its broadest meaning) is a pervasive issue in science, and its historical index, the p-value, has been strongly criticized and deemed to have played an important role in the replicability crisis. In reaction, more and more scientists have tuned to Bayesian methods, offering an alternative set of tools to answer their questions. However, the Bayesian framework offers a wide variety of possible indices related to "significance", and the debate has been raging about which index is the best, and which one to report. This situation can lead to the mindless reporting of all possible indices (with the hopes that with that the reader will be satisfied), but often without having the writer understanding and interpreting them. It is indeed complicated to juggle between many indices with complicated definitions and subtle differences. SEXIT aims at offering a practical framework for Bayesian effects reporting, in which the focus is put on intuitiveness, explicitness and usefulness of the indices' interpretation. To that end, we suggest a system of description of parameters that would be intuitive, easy to learn and apply, mathematically accurate and useful for taking decision. Once the thresholds for significance (i.e., the ROPE) and the one for a "large" effect are explicitly defined, the SEXIT framework does not make any interpretation, i.e., it does not label the effects, but just sequentially gives 3 probabilities (of direction, of significance and of being large, respectively) as-is on top of the characteristics of the posterior (using the median and HDI for centrality and uncertainty description). Thus, it provides a lot of information about the posterior distribution (through the mass of different 'sections' of the posterior) in a clear and meaningful way. } \subsection{Threshold selection}{ One of the most important thing about the SEXIT framework is that it relies on two "arbitrary" thresholds (i.e., that have no absolute meaning). They are the ones related to effect size (an inherently subjective notion), namely the thresholds for significant and large effects. They are set, by default, to \code{0.05} and \code{0.3} of the standard deviation of the outcome variable (tiny and large effect sizes for correlations according to Funder and Ozer, 2019). However, these defaults were chosen by lack of a better option, and might not be adapted to your case. Thus, they are to be handled with care, and the chosen thresholds should always be explicitly reported and justified. \itemize{ \item For \strong{linear models (lm)}, this can be generalised to \ifelse{html}{\out{0.05 * SDy}}{\eqn{[0.05*SD_{y}]}} and \ifelse{html}{\out{0.3 * SDy}}{\eqn{[0.3*SD_{y}]}} for significant and large effects, respectively. \item For \strong{logistic models}, the parameters expressed in log odds ratio can be converted to standardized difference through the formula \ifelse{html}{\out{π/√(3)}}{\eqn{\pi/\sqrt{3}}}, resulting a threshold of \code{0.09} and \code{0.54}. \item For other models with \strong{binary outcome}, it is strongly recommended to manually specify the rope argument. Currently, the same default is applied that for logistic models. \item For models from \strong{count data}, the residual variance is used. This is a rather experimental threshold and is probably often similar to \code{0.05} and \code{0.3}, but should be used with care! \item For \strong{t-tests}, the standard deviation of the response is used, similarly to linear models (see above). \item For \strong{correlations},\code{0.05} and \code{0.3} are used. \item For all other models, \code{0.05} and \code{0.3} are used, but it is strongly advised to specify it manually. } } \subsection{Examples}{ The three values for existence, significance and size provide a useful description of the posterior distribution of the effects. Some possible scenarios include: \itemize{ \item The probability of existence is low, but the probability of being large is high: it suggests that the posterior is very wide (covering large territories on both side of 0). The statistical power might be too low, which should warrant any confident conclusion. \item The probability of existence and significance is high, but the probability of being large is very small: it suggests that the effect is, with high confidence, not large (the posterior is mostly contained between the significance and the large thresholds). \item The 3 indices are very low: this suggests that the effect is null with high confidence (the posterior is closely centred around 0). } } } \examples{ \donttest{ library(bayestestR) s <- sexit(rnorm(1000, -1, 1)) s print(s, summary = TRUE) s <- sexit(iris) s print(s, summary = TRUE) if (require("rstanarm")) { model <- suppressWarnings(rstanarm::stan_glm(mpg ~ wt * cyl, data = mtcars, iter = 400, refresh = 0 )) s <- sexit(model) s print(s, summary = TRUE) } } } \references{ \itemize{ \item Makowski, D., Ben-Shachar, M. S., & Lüdecke, D. (2019). bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework. Journal of Open Source Software, 4(40), 1541. \doi{10.21105/joss.01541} \item Makowski D, Ben-Shachar MS, Chen SHA, Lüdecke D (2019) Indices of Effect Existence and Significance in the Bayesian Framework. Frontiers in Psychology 2019;10:2767. \doi{10.3389/fpsyg.2019.02767} } } bayestestR/man/p_to_bf.Rd0000644000176200001440000000463614542333405015065 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/p_to_bf.R \name{p_to_bf} \alias{p_to_bf} \alias{p_to_bf.numeric} \alias{p_to_bf.default} \title{Convert p-values to (pseudo) Bayes Factors} \usage{ p_to_bf(x, ...) \method{p_to_bf}{numeric}(x, log = FALSE, n_obs = NULL, ...) \method{p_to_bf}{default}(x, log = FALSE, ...) } \arguments{ \item{x}{A (frequentist) model object, or a (numeric) vector of p-values.} \item{...}{Other arguments to be passed (not used for now).} \item{log}{Wether to return log Bayes Factors. \strong{Note:} The \code{print()} method always shows \code{BF} - the \code{"log_BF"} column is only accessible from the returned data frame.} \item{n_obs}{Number of observations. Either length 1, or same length as \code{p}.} } \value{ A data frame with the p-values and pseudo-Bayes factors (against the null). } \description{ Convert p-values to (pseudo) Bayes Factors. This transformation has been suggested by Wagenmakers (2022), but is based on a vast amount of assumptions. It might therefore be not reliable. Use at your own risks. For more accurate approximate Bayes factors, use \code{\link[=bic_to_bf]{bic_to_bf()}} instead. } \examples{ \dontshow{if (require("parameters")) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf} data(iris) model <- lm(Petal.Length ~ Sepal.Length + Species, data = iris) p_to_bf(model) # Examples that demonstrate comparison between # BIC-approximated and pseudo BF # -------------------------------------------- m0 <- lm(mpg ~ 1, mtcars) m1 <- lm(mpg ~ am, mtcars) m2 <- lm(mpg ~ factor(cyl), mtcars) # In this first example, BIC-approximated BF and # pseudo-BF based on p-values are close... # BIC-approximated BF, m1 against null model bic_to_bf(BIC(m1), denominator = BIC(m0)) # pseudo-BF based on p-values - dropping intercept p_to_bf(m1)[-1, ] # The second example shows that results from pseudo-BF are less accurate # and should be handled wit caution! bic_to_bf(BIC(m2), denominator = BIC(m0)) p_to_bf(anova(m2), n_obs = nrow(mtcars)) \dontshow{\}) # examplesIf} } \references{ \itemize{ \item Wagenmakers, E.J. (2022). Approximate objective Bayes factors from p-values and sample size: The 3p(sqrt(n)) rule. Preprint available on ArXiv: https://psyarxiv.com/egydq } } \seealso{ \code{\link[=bic_to_bf]{bic_to_bf()}} for more accurate approximate Bayes factors. } bayestestR/man/effective_sample.Rd0000644000176200001440000001564415005147105016752 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/effective_sample.R \name{effective_sample} \alias{effective_sample} \alias{effective_sample.brmsfit} \title{Effective Sample Size (ESS)} \usage{ effective_sample(model, ...) \method{effective_sample}{brmsfit}( model, effects = "fixed", component = "conditional", parameters = NULL, ... ) } \arguments{ \item{model}{A \code{stanreg}, \code{stanfit}, \code{brmsfit}, \code{blavaan}, or \code{MCMCglmm} object.} \item{...}{Currently not used.} \item{effects}{Should variables for fixed effects (\code{"fixed"}), random effects (\code{"random"}) or both (\code{"all"}) be returned? Only applies to mixed models. May be abbreviated. For models of from packages \strong{brms} or \strong{rstanarm} there are additional options: \itemize{ \item \code{"fixed"} returns fixed effects. \item \code{"random_variance"} return random effects parameters (variance and correlation components, e.g. those parameters that start with \code{sd_} or \code{cor_}). \item \code{"grouplevel"} returns random effects group level estimates, i.e. those parameters that start with \code{r_}. \item \code{"random"} returns both \code{"random_variance"} and \code{"grouplevel"}. \item \code{"all"} returns fixed effects and random effects variances. \item \code{"full"} returns all parameters. }} \item{component}{Which type of parameters to return, such as parameters for the conditional model, the zero-inflated part of the model, the dispersion term, etc. See details in section \emph{Model Components}. May be abbreviated. Note that the \emph{conditional} component also refers to the \emph{count} or \emph{mean} component - names may differ, depending on the modeling package. There are three convenient shortcuts (not applicable to \emph{all} model classes): \itemize{ \item \code{component = "all"} returns all possible parameters. \item If \code{component = "location"}, location parameters such as \code{conditional}, \code{zero_inflated}, \code{smooth_terms}, or \code{instruments} are returned (everything that are fixed or random effects - depending on the \code{effects} argument - but no auxiliary parameters). \item For \code{component = "distributional"} (or \code{"auxiliary"}), components like \code{sigma}, \code{dispersion}, \code{beta} or \code{precision} (and other auxiliary parameters) are returned. }} \item{parameters}{Regular expression pattern that describes the parameters that should be returned. Meta-parameters (like \code{lp__} or \code{prior_}) are filtered by default, so only parameters that typically appear in the \code{summary()} are returned. Use \code{parameters} to select specific parameters for the output.} } \value{ A data frame with two columns: Parameter name and effective sample size (ESS). } \description{ Effective Sample Size (ESS) is a measure of how much independent information there is in autocorrelated chains. It is used to assess the quality of MCMC samples. A higher ESS indicates more reliable estimates. For most applications, an effective sample size greater than 1,000 is sufficient for stable estimates (Bürkner, 2017). This function returns the effective sample size (ESS) for various Bayesian model objects. For \code{brmsfit} objects, the returned ESS corresponds to the bulk-ESS (and the tail-ESS is also returned). } \details{ \itemize{ \item \strong{Effective Sample (ESS)} should be as large as possible, altough for most applications, an effective sample size greater than 1,000 is sufficient for stable estimates (Bürkner, 2017). The ESS corresponds to the number of independent samples with the same estimation power as the N autocorrelated samples. It is is a measure of \dQuote{how much independent information there is in autocorrelated chains} (\emph{Kruschke 2015, p182-3}). \item \strong{Bulk-ESS} is useful as a diagnostic for the sampling efficiency in the bulk of the posterior. It is defined as the effective sample size for rank normalized values using split chains. It can be interpreted as the reliability of indices of central tendency (mean, median, etc.). \item \strong{Tail-ESS} is useful as a diagnostic for the sampling efficiency in the tails of the posterior. It is defined as the minimum of the effective sample sizes for 5\% and 95\% quantiles. It can be interpreted as the reliability of indices that depend on the tails of the distribution (e.g., credible intervals, tail probabilities, etc.). } } \section{Model components}{ Possible values for the \code{component} argument depend on the model class. Following are valid options: \itemize{ \item \code{"all"}: returns all model components, applies to all models, but will only have an effect for models with more than just the conditional model component. \item \code{"conditional"}: only returns the conditional component, i.e. "fixed effects" terms from the model. Will only have an effect for models with more than just the conditional model component. \item \code{"smooth_terms"}: returns smooth terms, only applies to GAMs (or similar models that may contain smooth terms). \item \code{"zero_inflated"} (or \code{"zi"}): returns the zero-inflation component. \item \code{"location"}: returns location parameters such as \code{conditional}, \code{zero_inflated}, or \code{smooth_terms} (everything that are fixed or random effects - depending on the \code{effects} argument - but no auxiliary parameters). \item \code{"distributional"} (or \code{"auxiliary"}): components like \code{sigma}, \code{dispersion}, \code{beta} or \code{precision} (and other auxiliary parameters) are returned. } For models of class \code{brmsfit} (package \strong{brms}), even more options are possible for the \code{component} argument, which are not all documented in detail here. See also \href{https://easystats.github.io/insight/reference/find_parameters.BGGM.html}{\code{?insight::find_parameters}}. } \examples{ \dontshow{if (all(insight::check_if_installed(c("rstanarm", "brms", "posterior"), quietly = TRUE))) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf} \donttest{ model <- suppressWarnings(rstanarm::stan_glm( mpg ~ wt + gear, data = mtcars, chains = 2, iter = 200, refresh = 0 )) effective_sample(model) model <- suppressWarnings(brms::brm( mpg ~ wt, data = mtcars, chains = 2, iter = 200, refresh = 0 )) effective_sample(model) } \dontshow{\}) # examplesIf} } \references{ \itemize{ \item Kruschke, J. (2014). Doing Bayesian data analysis: A tutorial with R, JAGS, and Stan. Academic Press. \item Bürkner, P. C. (2017). brms: An R package for Bayesian multilevel models using Stan. Journal of Statistical Software, 80(1), 1-28 \item Vehtari, A., Gelman, A., Simpson, D., Carpenter, B., & Bürkner, P.-C. (2021). Rank-normalization, folding, and localization: An improved R-hat for assessing convergence of MCMC. Bayesian Analysis, 16(2), 667-718. } } bayestestR/man/overlap.Rd0000644000176200001440000000250614706241121015112 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/overlap.R \name{overlap} \alias{overlap} \title{Overlap Coefficient} \usage{ overlap( x, y, method_density = "kernel", method_auc = "trapezoid", precision = 2^10, extend = TRUE, extend_scale = 0.1, ... ) } \arguments{ \item{x}{Vector of x values.} \item{y}{Vector of x values.} \item{method_density}{Density estimation method. See \code{\link[=estimate_density]{estimate_density()}}.} \item{method_auc}{Area Under the Curve (AUC) estimation method. See \code{\link[=area_under_curve]{area_under_curve()}}.} \item{precision}{Number of points of density data. See the \code{n} parameter in \code{density}.} \item{extend}{Extend the range of the x axis by a factor of \code{extend_scale}.} \item{extend_scale}{Ratio of range by which to extend the x axis. A value of \code{0.1} means that the x axis will be extended by \code{1/10} of the range of the data.} \item{...}{Currently not used.} } \description{ A method to calculate the overlap coefficient between two empirical distributions (that can be used as a measure of similarity between two samples). } \examples{ library(bayestestR) x <- distribution_normal(1000, 2, 0.5) y <- distribution_normal(1000, 0, 1) overlap(x, y) plot(overlap(x, y)) } bayestestR/man/p_significance.Rd0000644000176200001440000002040315005147105016377 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/p_significance.R \name{p_significance} \alias{p_significance} \alias{p_significance.numeric} \alias{p_significance.get_predicted} \alias{p_significance.data.frame} \alias{p_significance.brmsfit} \title{Practical Significance (ps)} \usage{ p_significance(x, ...) \method{p_significance}{numeric}(x, threshold = "default", ...) \method{p_significance}{get_predicted}( x, threshold = "default", use_iterations = FALSE, verbose = TRUE, ... ) \method{p_significance}{data.frame}(x, threshold = "default", rvar_col = NULL, ...) \method{p_significance}{brmsfit}( x, threshold = "default", effects = "fixed", component = "conditional", parameters = NULL, verbose = TRUE, ... ) } \arguments{ \item{x}{Vector representing a posterior distribution. Can also be a \code{stanreg} or \code{brmsfit} model.} \item{...}{Currently not used.} \item{threshold}{The threshold value that separates significant from negligible effect, which can have following possible values: \itemize{ \item \code{"default"}, in which case the range is set to \code{0.1} if input is a vector, and based on \code{\link[=rope_range]{rope_range()}} if a (Bayesian) model is provided. \item a single numeric value (e.g., 0.1), which is used as range around zero (i.e. the threshold range is set to -0.1 and 0.1, i.e. reflects a symmetric interval) \item a numeric vector of length two (e.g., \code{c(-0.2, 0.1)}), useful for asymmetric intervals \item a list of numeric vectors, where each vector corresponds to a parameter \item a list of \emph{named} numeric vectors, where names correspond to parameter names. In this case, all parameters that have no matching name in \code{threshold} will be set to \code{"default"}. }} \item{use_iterations}{Logical, if \code{TRUE} and \code{x} is a \code{get_predicted} object, (returned by \code{\link[insight:get_predicted]{insight::get_predicted()}}), the function is applied to the iterations instead of the predictions. This only applies to models that return iterations for predicted values (e.g., \code{brmsfit} models).} \item{verbose}{Toggle off warnings.} \item{rvar_col}{A single character - the name of an \code{rvar} column in the data frame to be processed. See example in \code{\link[=p_direction]{p_direction()}}.} \item{effects}{Should variables for fixed effects (\code{"fixed"}), random effects (\code{"random"}) or both (\code{"all"}) be returned? Only applies to mixed models. May be abbreviated. For models of from packages \strong{brms} or \strong{rstanarm} there are additional options: \itemize{ \item \code{"fixed"} returns fixed effects. \item \code{"random_variance"} return random effects parameters (variance and correlation components, e.g. those parameters that start with \code{sd_} or \code{cor_}). \item \code{"grouplevel"} returns random effects group level estimates, i.e. those parameters that start with \code{r_}. \item \code{"random"} returns both \code{"random_variance"} and \code{"grouplevel"}. \item \code{"all"} returns fixed effects and random effects variances. \item \code{"full"} returns all parameters. }} \item{component}{Which type of parameters to return, such as parameters for the conditional model, the zero-inflated part of the model, the dispersion term, etc. See details in section \emph{Model Components}. May be abbreviated. Note that the \emph{conditional} component also refers to the \emph{count} or \emph{mean} component - names may differ, depending on the modeling package. There are three convenient shortcuts (not applicable to \emph{all} model classes): \itemize{ \item \code{component = "all"} returns all possible parameters. \item If \code{component = "location"}, location parameters such as \code{conditional}, \code{zero_inflated}, \code{smooth_terms}, or \code{instruments} are returned (everything that are fixed or random effects - depending on the \code{effects} argument - but no auxiliary parameters). \item For \code{component = "distributional"} (or \code{"auxiliary"}), components like \code{sigma}, \code{dispersion}, \code{beta} or \code{precision} (and other auxiliary parameters) are returned. }} \item{parameters}{Regular expression pattern that describes the parameters that should be returned. Meta-parameters (like \code{lp__} or \code{prior_}) are filtered by default, so only parameters that typically appear in the \code{summary()} are returned. Use \code{parameters} to select specific parameters for the output.} } \value{ Values between 0 and 1 corresponding to the probability of practical significance (ps). } \description{ Compute the probability of \strong{Practical Significance} (\emph{\strong{ps}}), which can be conceptualized as a unidirectional equivalence test. It returns the probability that effect is above a given threshold corresponding to a negligible effect in the median's direction. Mathematically, it is defined as the proportion of the posterior distribution of the median sign above the threshold. } \details{ \code{p_significance()} returns the proportion of a probability distribution (\code{x}) that is outside a certain range (the negligible effect, or ROPE, see argument \code{threshold}). If there are values of the distribution both below and above the ROPE, \code{p_significance()} returns the higher probability of a value being outside the ROPE. Typically, this value should be larger than 0.5 to indicate practical significance. However, if the range of the negligible effect is rather large compared to the range of the probability distribution \code{x}, \code{p_significance()} will be less than 0.5, which indicates no clear practical significance. } \note{ There is also a \href{https://easystats.github.io/see/articles/bayestestR.html}{\code{plot()}-method} implemented in the \href{https://easystats.github.io/see/}{\pkg{see}-package}. } \section{Model components}{ Possible values for the \code{component} argument depend on the model class. Following are valid options: \itemize{ \item \code{"all"}: returns all model components, applies to all models, but will only have an effect for models with more than just the conditional model component. \item \code{"conditional"}: only returns the conditional component, i.e. "fixed effects" terms from the model. Will only have an effect for models with more than just the conditional model component. \item \code{"smooth_terms"}: returns smooth terms, only applies to GAMs (or similar models that may contain smooth terms). \item \code{"zero_inflated"} (or \code{"zi"}): returns the zero-inflation component. \item \code{"location"}: returns location parameters such as \code{conditional}, \code{zero_inflated}, or \code{smooth_terms} (everything that are fixed or random effects - depending on the \code{effects} argument - but no auxiliary parameters). \item \code{"distributional"} (or \code{"auxiliary"}): components like \code{sigma}, \code{dispersion}, \code{beta} or \code{precision} (and other auxiliary parameters) are returned. } For models of class \code{brmsfit} (package \strong{brms}), even more options are possible for the \code{component} argument, which are not all documented in detail here. See also \href{https://easystats.github.io/insight/reference/find_parameters.BGGM.html}{\code{?insight::find_parameters}}. } \examples{ \dontshow{if (require("rstanarm")) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf} library(bayestestR) # Simulate a posterior distribution of mean 1 and SD 1 # ---------------------------------------------------- posterior <- rnorm(1000, mean = 1, sd = 1) p_significance(posterior) # Simulate a dataframe of posterior distributions # ----------------------------------------------- df <- data.frame(replicate(4, rnorm(100))) p_significance(df) \donttest{ # rstanarm models # ----------------------------------------------- model <- rstanarm::stan_glm(mpg ~ wt + cyl, data = mtcars, chains = 2, refresh = 0 ) p_significance(model) # multiple thresholds - asymmetric, symmetric, default p_significance(model, threshold = list(c(-10, 5), 0.2, "default")) # named thresholds p_significance(model, threshold = list(wt = 0.2, `(Intercept)` = c(-10, 5))) } \dontshow{\}) # examplesIf} } bayestestR/man/bayesfactor_restricted.Rd0000644000176200001440000002535615005147105020204 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/bayesfactor_restricted.R \name{bayesfactor_restricted} \alias{bayesfactor_restricted} \alias{bf_restricted} \alias{bayesfactor_restricted.stanreg} \alias{bayesfactor_restricted.brmsfit} \alias{bayesfactor_restricted.blavaan} \alias{bayesfactor_restricted.emmGrid} \alias{bayesfactor_restricted.data.frame} \alias{as.logical.bayesfactor_restricted} \title{Bayes Factors (BF) for Order Restricted Models} \usage{ bayesfactor_restricted(posterior, ...) bf_restricted(posterior, ...) \method{bayesfactor_restricted}{stanreg}( posterior, hypothesis, prior = NULL, verbose = TRUE, effects = "fixed", component = "conditional", ... ) \method{bayesfactor_restricted}{brmsfit}( posterior, hypothesis, prior = NULL, verbose = TRUE, effects = "fixed", component = "conditional", ... ) \method{bayesfactor_restricted}{blavaan}( posterior, hypothesis, prior = NULL, verbose = TRUE, ... ) \method{bayesfactor_restricted}{emmGrid}( posterior, hypothesis, prior = NULL, verbose = TRUE, ... ) \method{bayesfactor_restricted}{data.frame}( posterior, hypothesis, prior = NULL, rvar_col = NULL, ... ) \method{as.logical}{bayesfactor_restricted}(x, which = c("posterior", "prior"), ...) } \arguments{ \item{posterior}{A \code{stanreg} / \code{brmsfit} object, \code{emmGrid} or a data frame - representing a posterior distribution(s) from (see Details).} \item{...}{Currently not used.} \item{hypothesis}{A character vector specifying the restrictions as logical conditions (see examples below).} \item{prior}{An object representing a prior distribution (see Details).} \item{verbose}{Toggle off warnings.} \item{effects}{Should variables for fixed effects (\code{"fixed"}), random effects (\code{"random"}) or both (\code{"all"}) be returned? Only applies to mixed models. May be abbreviated. For models of from packages \strong{brms} or \strong{rstanarm} there are additional options: \itemize{ \item \code{"fixed"} returns fixed effects. \item \code{"random_variance"} return random effects parameters (variance and correlation components, e.g. those parameters that start with \code{sd_} or \code{cor_}). \item \code{"grouplevel"} returns random effects group level estimates, i.e. those parameters that start with \code{r_}. \item \code{"random"} returns both \code{"random_variance"} and \code{"grouplevel"}. \item \code{"all"} returns fixed effects and random effects variances. \item \code{"full"} returns all parameters. }} \item{component}{Which type of parameters to return, such as parameters for the conditional model, the zero-inflated part of the model, the dispersion term, etc. See details in section \emph{Model Components}. May be abbreviated. Note that the \emph{conditional} component also refers to the \emph{count} or \emph{mean} component - names may differ, depending on the modeling package. There are three convenient shortcuts (not applicable to \emph{all} model classes): \itemize{ \item \code{component = "all"} returns all possible parameters. \item If \code{component = "location"}, location parameters such as \code{conditional}, \code{zero_inflated}, \code{smooth_terms}, or \code{instruments} are returned (everything that are fixed or random effects - depending on the \code{effects} argument - but no auxiliary parameters). \item For \code{component = "distributional"} (or \code{"auxiliary"}), components like \code{sigma}, \code{dispersion}, \code{beta} or \code{precision} (and other auxiliary parameters) are returned. }} \item{rvar_col}{A single character - the name of an \code{rvar} column in the data frame to be processed. See example in \code{\link[=p_direction]{p_direction()}}.} \item{x}{An object of class \code{bayesfactor_restricted}} \item{which}{Should the logical matrix be of the posterior or prior distribution(s)?} } \value{ A data frame containing the (log) Bayes factor representing evidence \emph{against} the un-restricted model (Use \code{as.numeric()} to extract the non-log Bayes factors; see examples). (A \code{bool_results} attribute contains the results for each sample, indicating if they are included or not in the hypothesized restriction.) } \description{ This method computes Bayes factors for comparing a model with an order restrictions on its parameters with the fully unrestricted model. \emph{Note that this method should only be used for confirmatory analyses}. \cr \cr The \verb{bf_*} function is an alias of the main function. \cr \cr \strong{For more info, in particular on specifying correct priors for factors with more than 2 levels, see \href{https://easystats.github.io/bayestestR/articles/bayes_factors.html}{the Bayes factors vignette}.} } \details{ This method is used to compute Bayes factors for order-restricted models vs un-restricted models by setting an order restriction on the prior and posterior distributions (\cite{Morey & Wagenmakers, 2013}). \cr\cr (Though it is possible to use \code{bayesfactor_restricted()} to test interval restrictions, it is more suitable for testing order restrictions; see examples). } \section{Setting the correct \code{prior}}{ For the computation of Bayes factors, the model priors must be proper priors (at the very least they should be \emph{not flat}, and it is preferable that they be \emph{informative}); As the priors for the alternative get wider, the likelihood of the null value(s) increases, to the extreme that for completely flat priors the null is infinitely more favorable than the alternative (this is called \emph{the Jeffreys-Lindley-Bartlett paradox}). Thus, you should only ever try (or want) to compute a Bayes factor when you have an informed prior. \cr\cr (Note that by default, \code{brms::brm()} uses flat priors for fixed-effects; See example below.) \cr\cr It is important to provide the correct \code{prior} for meaningful results, to match the \code{posterior}-type input: \itemize{ \item \strong{A numeric vector} - \code{prior} should also be a \emph{numeric vector}, representing the prior-estimate. \item \strong{A data frame} - \code{prior} should also be a \emph{data frame}, representing the prior-estimates, in matching column order. \itemize{ \item If \code{rvar_col} is specified, \code{prior} should be \emph{the name of an \code{rvar} column} that represents the prior-estimates. } \item \strong{Supported Bayesian model (\code{stanreg}, \code{brmsfit}, etc.)} \itemize{ \item \code{prior} should be \emph{a model an equivalent model with MCMC samples from the priors \strong{only}}. See \code{\link[=unupdate]{unupdate()}}. \item If \code{prior} is set to \code{NULL}, \code{\link[=unupdate]{unupdate()}} is called internally (not supported for \code{brmsfit_multiple} model). } \item \strong{Output from a \code{{marginaleffects}} function} - \code{prior} should also be \emph{an equivalent output} from a \code{{marginaleffects}} function based on a prior-model (See \code{\link[=unupdate]{unupdate()}}). \item \strong{Output from an \code{{emmeans}} function} \itemize{ \item \code{prior} should also be \emph{an equivalent output} from an \code{{emmeans}} function based on a prior-model (See \code{\link[=unupdate]{unupdate()}}). \item \code{prior} can also be \emph{the original (posterior) model}, in which case the function will try to "unupdate" the estimates (not supported if the estimates have undergone any transformations -- \code{"log"}, \code{"response"}, etc. -- or any \code{regrid}ing). } } } \section{Interpreting Bayes Factors}{ A Bayes factor greater than 1 can be interpreted as evidence against the null, at which one convention is that a Bayes factor greater than 3 can be considered as "substantial" evidence against the null (and vice versa, a Bayes factor smaller than 1/3 indicates substantial evidence in favor of the null-model) (\cite{Wetzels et al. 2011}). } \examples{ set.seed(444) library(bayestestR) prior <- data.frame( A = rnorm(500), B = rnorm(500), C = rnorm(500) ) posterior <- data.frame( A = rnorm(500, .4, 0.7), B = rnorm(500, -.2, 0.4), C = rnorm(500, 0, 0.5) ) hyps <- c( "A > B & B > C", "A > B & A > C", "C > A" ) (b <- bayesfactor_restricted(posterior, hypothesis = hyps, prior = prior)) bool <- as.logical(b, which = "posterior") head(bool) \dontshow{if (require("see") && require("patchwork")) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf} see::plots( plot(estimate_density(posterior)), # distribution **conditional** on the restrictions plot(estimate_density(posterior[bool[, hyps[1]], ])) + ggplot2::ggtitle(hyps[1]), plot(estimate_density(posterior[bool[, hyps[2]], ])) + ggplot2::ggtitle(hyps[2]), plot(estimate_density(posterior[bool[, hyps[3]], ])) + ggplot2::ggtitle(hyps[3]), guides = "collect" ) \dontshow{\}) # examplesIf} \dontshow{if (require("rstanarm")) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf} \donttest{ # rstanarm models # --------------- data("mtcars") fit_stan <- rstanarm::stan_glm(mpg ~ wt + cyl + am, data = mtcars, refresh = 0 ) hyps <- c( "am > 0 & cyl < 0", "cyl < 0", "wt - cyl > 0" ) bayesfactor_restricted(fit_stan, hypothesis = hyps) } \dontshow{\}) # examplesIf} \dontshow{if (require("rstanarm") && require("emmeans")) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf} \donttest{ # emmGrid objects # --------------- # replicating http://bayesfactor.blogspot.com/2015/01/multiple-comparisons-with-bayesfactor-2.html data("disgust") contrasts(disgust$condition) <- contr.equalprior_pairs # see vignette fit_model <- rstanarm::stan_glm(score ~ condition, data = disgust, family = gaussian()) em_condition <- emmeans::emmeans(fit_model, ~condition, data = disgust) hyps <- c("lemon < control & control < sulfur") bayesfactor_restricted(em_condition, prior = fit_model, hypothesis = hyps) # > # Bayes Factor (Order-Restriction) # > # > Hypothesis P(Prior) P(Posterior) BF # > lemon < control & control < sulfur 0.17 0.75 4.49 # > --- # > Bayes factors for the restricted model vs. the un-restricted model. } \dontshow{\}) # examplesIf} } \references{ \itemize{ \item Morey, R. D., & Wagenmakers, E. J. (2014). Simple relation between Bayesian order-restricted and point-null hypothesis tests. Statistics & Probability Letters, 92, 121-124. \item Morey, R. D., & Rouder, J. N. (2011). Bayes factor approaches for testing interval null hypotheses. Psychological methods, 16(4), 406. \item Morey, R. D. (Jan, 2015). Multiple Comparisons with BayesFactor, Part 2 – order restrictions. Retrieved from https://richarddmorey.org/category/order-restrictions/. } } bayestestR/man/point_estimate.Rd0000644000176200001440000002051515005147105016466 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/point_estimate.R \name{point_estimate} \alias{point_estimate} \alias{point_estimate.numeric} \alias{point_estimate.data.frame} \alias{point_estimate.brmsfit} \alias{point_estimate.get_predicted} \title{Point-estimates of posterior distributions} \usage{ point_estimate(x, ...) \method{point_estimate}{numeric}(x, centrality = "all", dispersion = FALSE, threshold = 0.1, ...) \method{point_estimate}{data.frame}( x, centrality = "all", dispersion = FALSE, threshold = 0.1, rvar_col = NULL, ... ) \method{point_estimate}{brmsfit}( x, centrality = "all", dispersion = FALSE, effects = "fixed", component = "conditional", parameters = NULL, ... ) \method{point_estimate}{get_predicted}( x, centrality = "all", dispersion = FALSE, use_iterations = FALSE, verbose = TRUE, ... ) } \arguments{ \item{x}{Vector representing a posterior distribution, or a data frame of such vectors. Can also be a Bayesian model. \strong{bayestestR} supports a wide range of models (see, for example, \code{methods("hdi")}) and not all of those are documented in the 'Usage' section, because methods for other classes mostly resemble the arguments of the \code{.numeric} or \code{.data.frame}methods.} \item{...}{Additional arguments to be passed to or from methods.} \item{centrality}{The point-estimates (centrality indices) to compute. Character (vector) or list with one or more of these options: \code{"median"}, \code{"mean"}, \code{"MAP"} (see \code{\link[=map_estimate]{map_estimate()}}), \code{"trimmed"} (which is just \code{mean(x, trim = threshold)}), \code{"mode"} or \code{"all"}.} \item{dispersion}{Logical, if \code{TRUE}, computes indices of dispersion related to the estimate(s) (\code{SD} and \code{MAD} for \code{mean} and \code{median}, respectively). Dispersion is not available for \code{"MAP"} or \code{"mode"} centrality indices.} \item{threshold}{For \code{centrality = "trimmed"} (i.e. trimmed mean), indicates the fraction (0 to 0.5) of observations to be trimmed from each end of the vector before the mean is computed.} \item{rvar_col}{A single character - the name of an \code{rvar} column in the data frame to be processed. See example in \code{\link[=p_direction]{p_direction()}}.} \item{effects}{Should variables for fixed effects (\code{"fixed"}), random effects (\code{"random"}) or both (\code{"all"}) be returned? Only applies to mixed models. May be abbreviated. For models of from packages \strong{brms} or \strong{rstanarm} there are additional options: \itemize{ \item \code{"fixed"} returns fixed effects. \item \code{"random_variance"} return random effects parameters (variance and correlation components, e.g. those parameters that start with \code{sd_} or \code{cor_}). \item \code{"grouplevel"} returns random effects group level estimates, i.e. those parameters that start with \code{r_}. \item \code{"random"} returns both \code{"random_variance"} and \code{"grouplevel"}. \item \code{"all"} returns fixed effects and random effects variances. \item \code{"full"} returns all parameters. }} \item{component}{Which type of parameters to return, such as parameters for the conditional model, the zero-inflated part of the model, the dispersion term, etc. See details in section \emph{Model Components}. May be abbreviated. Note that the \emph{conditional} component also refers to the \emph{count} or \emph{mean} component - names may differ, depending on the modeling package. There are three convenient shortcuts (not applicable to \emph{all} model classes): \itemize{ \item \code{component = "all"} returns all possible parameters. \item If \code{component = "location"}, location parameters such as \code{conditional}, \code{zero_inflated}, \code{smooth_terms}, or \code{instruments} are returned (everything that are fixed or random effects - depending on the \code{effects} argument - but no auxiliary parameters). \item For \code{component = "distributional"} (or \code{"auxiliary"}), components like \code{sigma}, \code{dispersion}, \code{beta} or \code{precision} (and other auxiliary parameters) are returned. }} \item{parameters}{Regular expression pattern that describes the parameters that should be returned. Meta-parameters (like \code{lp__} or \code{prior_}) are filtered by default, so only parameters that typically appear in the \code{summary()} are returned. Use \code{parameters} to select specific parameters for the output.} \item{use_iterations}{Logical, if \code{TRUE} and \code{x} is a \code{get_predicted} object, (returned by \code{\link[insight:get_predicted]{insight::get_predicted()}}), the function is applied to the iterations instead of the predictions. This only applies to models that return iterations for predicted values (e.g., \code{brmsfit} models).} \item{verbose}{Toggle off warnings.} } \description{ Compute various point-estimates, such as the mean, the median or the MAP, to describe posterior distributions. } \note{ There is also a \href{https://easystats.github.io/see/articles/bayestestR.html}{\code{plot()}-method} implemented in the \href{https://easystats.github.io/see/}{\pkg{see}-package}. } \section{Model components}{ Possible values for the \code{component} argument depend on the model class. Following are valid options: \itemize{ \item \code{"all"}: returns all model components, applies to all models, but will only have an effect for models with more than just the conditional model component. \item \code{"conditional"}: only returns the conditional component, i.e. "fixed effects" terms from the model. Will only have an effect for models with more than just the conditional model component. \item \code{"smooth_terms"}: returns smooth terms, only applies to GAMs (or similar models that may contain smooth terms). \item \code{"zero_inflated"} (or \code{"zi"}): returns the zero-inflation component. \item \code{"location"}: returns location parameters such as \code{conditional}, \code{zero_inflated}, or \code{smooth_terms} (everything that are fixed or random effects - depending on the \code{effects} argument - but no auxiliary parameters). \item \code{"distributional"} (or \code{"auxiliary"}): components like \code{sigma}, \code{dispersion}, \code{beta} or \code{precision} (and other auxiliary parameters) are returned. } For models of class \code{brmsfit} (package \strong{brms}), even more options are possible for the \code{component} argument, which are not all documented in detail here. See also \href{https://easystats.github.io/insight/reference/find_parameters.BGGM.html}{\code{?insight::find_parameters}}. } \examples{ \dontshow{if (require("rstanarm") && require("emmeans") && require("brms") && require("BayesFactor")) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf} library(bayestestR) point_estimate(rnorm(1000)) point_estimate(rnorm(1000), centrality = "all", dispersion = TRUE) point_estimate(rnorm(1000), centrality = c("median", "MAP")) df <- data.frame(replicate(4, rnorm(100))) point_estimate(df, centrality = "all", dispersion = TRUE) point_estimate(df, centrality = c("median", "MAP")) \donttest{ # rstanarm models # ----------------------------------------------- model <- rstanarm::stan_glm(mpg ~ wt + cyl, data = mtcars) point_estimate(model, centrality = "all", dispersion = TRUE) point_estimate(model, centrality = c("median", "MAP")) # emmeans estimates # ----------------------------------------------- point_estimate( emmeans::emtrends(model, ~1, "wt", data = mtcars), centrality = c("median", "MAP") ) # brms models # ----------------------------------------------- model <- brms::brm(mpg ~ wt + cyl, data = mtcars) point_estimate(model, centrality = "all", dispersion = TRUE) point_estimate(model, centrality = c("median", "MAP")) # BayesFactor objects # ----------------------------------------------- bf <- BayesFactor::ttestBF(x = rnorm(100, 1, 1)) point_estimate(bf, centrality = "all", dispersion = TRUE) point_estimate(bf, centrality = c("median", "MAP")) } \dontshow{\}) # examplesIf} } \references{ Makowski, D., Ben-Shachar, M. S., Chen, S. H. A., and Lüdecke, D. (2019). \emph{Indices of Effect Existence and Significance in the Bayesian Framework}. Frontiers in Psychology 2019;10:2767. \doi{10.3389/fpsyg.2019.02767} } bayestestR/man/disgust.Rd0000644000176200001440000000173014542333405015127 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/datasets.R \docType{data} \name{disgust} \alias{disgust} \title{Moral Disgust Judgment} \format{ A data frame with 500 rows and 5 variables: \describe{ \item{score}{Score on the questionnaire, which ranges from 0 to 50 with higher scores representing harsher moral judgment} \item{condition}{one of three conditions, differing by the odor present in the room: a pleasant scent associated with cleanliness (lemon), a disgusting scent (sulfur), and a control condition in which no unusual odor is present} } \if{html}{\out{
}}\preformatted{data("disgust") head(disgust, n = 5) #> score condition #> 1 13 control #> 2 26 control #> 3 30 control #> 4 23 control #> 5 34 control }\if{html}{\out{
}} } \description{ A sample (simulated) dataset, used in tests and some examples. } \author{ Richard D. Morey } \keyword{data} bayestestR/man/bic_to_bf.Rd0000644000176200001440000000232114542333405015350 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/bic_to_bf.R \name{bic_to_bf} \alias{bic_to_bf} \title{Convert BIC indices to Bayes Factors via the BIC-approximation method.} \usage{ bic_to_bf(bic, denominator, log = FALSE) } \arguments{ \item{bic}{A vector of BIC values.} \item{denominator}{The BIC value to use as a denominator (to test against).} \item{log}{If \code{TRUE}, return the \code{log(BF)}.} } \value{ The Bayes Factors corresponding to the BIC values against the denominator. } \description{ The difference between two Bayesian information criterion (BIC) indices of two models can be used to approximate Bayes factors via: \cr \deqn{BF_{10} = e^{(BIC_0 - BIC_1)/2}}{BF10 = exp((BIC0-BIC1)/2)} } \examples{ bic1 <- BIC(lm(Sepal.Length ~ 1, data = iris)) bic2 <- BIC(lm(Sepal.Length ~ Species, data = iris)) bic3 <- BIC(lm(Sepal.Length ~ Species + Petal.Length, data = iris)) bic4 <- BIC(lm(Sepal.Length ~ Species * Petal.Length, data = iris)) bic_to_bf(c(bic1, bic2, bic3, bic4), denominator = bic1) } \references{ Wagenmakers, E. J. (2007). A practical solution to the pervasive problems of p values. Psychonomic bulletin & review, 14(5), 779-804 } bayestestR/man/rope_range.Rd0000644000176200001440000000601514665675671015613 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/rope_range.R \name{rope_range} \alias{rope_range} \alias{rope_range.default} \title{Find Default Equivalence (ROPE) Region Bounds} \usage{ rope_range(x, ...) \method{rope_range}{default}(x, verbose = TRUE, ...) } \arguments{ \item{x}{A \code{stanreg}, \code{brmsfit} or \code{BFBayesFactor} object, or a frequentist regression model.} \item{...}{Currently not used.} \item{verbose}{Toggle warnings.} } \description{ This function attempts at automatically finding suitable "default" values for the Region Of Practical Equivalence (ROPE). } \details{ \emph{Kruschke (2018)} suggests that the region of practical equivalence could be set, by default, to a range from \code{-0.1} to \code{0.1} of a standardized parameter (negligible effect size according to \emph{Cohen, 1988}). \itemize{ \item For \strong{linear models (lm)}, this can be generalised to \ifelse{html}{\out{-0.1 * SDy, 0.1 * SDy}}{\eqn{[-0.1*SD_{y}, 0.1*SD_{y}]}}. \item For \strong{logistic models}, the parameters expressed in log odds ratio can be converted to standardized difference through the formula \ifelse{html}{\out{π/√(3)}}{\eqn{\pi/\sqrt{3}}}, resulting in a range of \code{-0.18} to \code{0.18}. \item For other models with \strong{binary outcome}, it is strongly recommended to manually specify the rope argument. Currently, the same default is applied that for logistic models. \item For models from \strong{count data}, the residual variance is used. This is a rather experimental threshold and is probably often similar to \verb{-0.1, 0.1}, but should be used with care! \item For \strong{t-tests}, the standard deviation of the response is used, similarly to linear models (see above). \item For \strong{correlations}, \verb{-0.05, 0.05} is used, i.e., half the value of a negligible correlation as suggested by Cohen's (1988) rules of thumb. \item For all other models, \verb{-0.1, 0.1} is used to determine the ROPE limits, but it is strongly advised to specify it manually. } } \examples{ \dontshow{if (require("rstanarm") && require("brms") && require("BayesFactor")) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf} \donttest{ model <- suppressWarnings(rstanarm::stan_glm( mpg ~ wt + gear, data = mtcars, chains = 2, iter = 200, refresh = 0 )) rope_range(model) model <- suppressWarnings( rstanarm::stan_glm(vs ~ mpg, data = mtcars, family = "binomial", refresh = 0) ) rope_range(model) model <- brms::brm(mpg ~ wt + cyl, data = mtcars) rope_range(model) model <- BayesFactor::ttestBF(mtcars[mtcars$vs == 1, "mpg"], mtcars[mtcars$vs == 0, "mpg"]) rope_range(model) model <- lmBF(mpg ~ vs, data = mtcars) rope_range(model) } \dontshow{\}) # examplesIf} } \references{ Kruschke, J. K. (2018). Rejecting or accepting parameter values in Bayesian estimation. Advances in Methods and Practices in Psychological Science, 1(2), 270-280. \doi{10.1177/2515245918771304}. } bayestestR/man/p_rope.Rd0000644000176200001440000001355515052646230014741 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/p_rope.R \name{p_rope} \alias{p_rope} \alias{p_rope.numeric} \alias{p_rope.data.frame} \alias{p_rope.brmsfit} \title{Probability of being in the ROPE} \usage{ p_rope(x, ...) \method{p_rope}{numeric}(x, range = "default", verbose = TRUE, ...) \method{p_rope}{data.frame}(x, range = "default", rvar_col = NULL, verbose = TRUE, ...) \method{p_rope}{brmsfit}( x, range = "default", effects = "fixed", component = "conditional", parameters = NULL, verbose = TRUE, ... ) } \arguments{ \item{x}{Vector representing a posterior distribution. Can also be a \code{stanreg} or \code{brmsfit} model.} \item{...}{Other arguments passed to \code{\link[=rope]{rope()}}.} \item{range}{ROPE's lower and higher bounds. Should be \code{"default"} or depending on the number of outcome variables a vector or a list. For models with one response, \code{range} can be: \itemize{ \item a vector of length two (e.g., \code{c(-0.1, 0.1)}), \item a list of numeric vector of the same length as numbers of parameters (see 'Examples'). \item a list of \emph{named} numeric vectors, where names correspond to parameter names. In this case, all parameters that have no matching name in \code{range} will be set to \code{"default"}. } In multivariate models, \code{range} should be a list with another list (one for each response variable) of numeric vectors . Vector names should correspond to the name of the response variables. If \code{"default"} and input is a vector, the range is set to \code{c(-0.1, 0.1)}. If \code{"default"} and input is a Bayesian model, \code{\link[=rope_range]{rope_range()}} is used. See 'Examples'.} \item{verbose}{Toggle off warnings.} \item{rvar_col}{A single character - the name of an \code{rvar} column in the data frame to be processed. See example in \code{\link[=p_direction]{p_direction()}}.} \item{effects}{Should variables for fixed effects (\code{"fixed"}), random effects (\code{"random"}) or both (\code{"all"}) be returned? Only applies to mixed models. May be abbreviated. For models of from packages \strong{brms} or \strong{rstanarm} there are additional options: \itemize{ \item \code{"fixed"} returns fixed effects. \item \code{"random_variance"} return random effects parameters (variance and correlation components, e.g. those parameters that start with \code{sd_} or \code{cor_}). \item \code{"grouplevel"} returns random effects group level estimates, i.e. those parameters that start with \code{r_}. \item \code{"random"} returns both \code{"random_variance"} and \code{"grouplevel"}. \item \code{"all"} returns fixed effects and random effects variances. \item \code{"full"} returns all parameters. }} \item{component}{Which type of parameters to return, such as parameters for the conditional model, the zero-inflated part of the model, the dispersion term, etc. See details in section \emph{Model Components}. May be abbreviated. Note that the \emph{conditional} component also refers to the \emph{count} or \emph{mean} component - names may differ, depending on the modeling package. There are three convenient shortcuts (not applicable to \emph{all} model classes): \itemize{ \item \code{component = "all"} returns all possible parameters. \item If \code{component = "location"}, location parameters such as \code{conditional}, \code{zero_inflated}, \code{smooth_terms}, or \code{instruments} are returned (everything that are fixed or random effects - depending on the \code{effects} argument - but no auxiliary parameters). \item For \code{component = "distributional"} (or \code{"auxiliary"}), components like \code{sigma}, \code{dispersion}, \code{beta} or \code{precision} (and other auxiliary parameters) are returned. }} \item{parameters}{Regular expression pattern that describes the parameters that should be returned. Meta-parameters (like \code{lp__} or \code{prior_}) are filtered by default, so only parameters that typically appear in the \code{summary()} are returned. Use \code{parameters} to select specific parameters for the output.} } \description{ Compute the proportion of the whole posterior distribution that doesn't lie within a region of practical equivalence (ROPE). It is equivalent to running \code{rope(..., ci = 1)}. } \section{Model components}{ Possible values for the \code{component} argument depend on the model class. Following are valid options: \itemize{ \item \code{"all"}: returns all model components, applies to all models, but will only have an effect for models with more than just the conditional model component. \item \code{"conditional"}: only returns the conditional component, i.e. "fixed effects" terms from the model. Will only have an effect for models with more than just the conditional model component. \item \code{"smooth_terms"}: returns smooth terms, only applies to GAMs (or similar models that may contain smooth terms). \item \code{"zero_inflated"} (or \code{"zi"}): returns the zero-inflation component. \item \code{"location"}: returns location parameters such as \code{conditional}, \code{zero_inflated}, or \code{smooth_terms} (everything that are fixed or random effects - depending on the \code{effects} argument - but no auxiliary parameters). \item \code{"distributional"} (or \code{"auxiliary"}): components like \code{sigma}, \code{dispersion}, \code{beta} or \code{precision} (and other auxiliary parameters) are returned. } For models of class \code{brmsfit} (package \strong{brms}), even more options are possible for the \code{component} argument, which are not all documented in detail here. See also \href{https://easystats.github.io/insight/reference/find_parameters.BGGM.html}{\code{?insight::find_parameters}}. } \examples{ library(bayestestR) p_rope(x = rnorm(1000, 0, 0.01), range = c(-0.1, 0.1)) p_rope(x = mtcars, range = c(-0.1, 0.1)) } bayestestR/man/dot-extract_priors_rstanarm.Rd0000644000176200001440000000057014542333405021211 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/sensitivity_to_prior.R \name{.extract_priors_rstanarm} \alias{.extract_priors_rstanarm} \title{Extract and Returns the priors formatted for rstanarm} \usage{ .extract_priors_rstanarm(model, ...) } \description{ Extract and Returns the priors formatted for rstanarm } \keyword{internal} bayestestR/man/simulate_correlation.Rd0000644000176200001440000000464214640231526017676 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/simulate_data.R \name{simulate_correlation} \alias{simulate_correlation} \alias{simulate_ttest} \alias{simulate_difference} \title{Data Simulation} \usage{ simulate_correlation(n = 100, r = 0.5, mean = 0, sd = 1, names = NULL, ...) simulate_ttest(n = 100, d = 0.5, names = NULL, ...) simulate_difference(n = 100, d = 0.5, names = NULL, ...) } \arguments{ \item{n}{The number of observations to be generated.} \item{r}{A value or vector corresponding to the desired correlation coefficients.} \item{mean}{A value or vector corresponding to the mean of the variables.} \item{sd}{A value or vector corresponding to the SD of the variables.} \item{names}{A character vector of desired variable names.} \item{...}{Arguments passed to or from other methods.} \item{d}{A value or vector corresponding to the desired difference between the groups.} } \description{ Simulate data with specific characteristics. } \examples{ \dontshow{if (requireNamespace("MASS", quietly = TRUE)) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf} # Correlation -------------------------------- data <- simulate_correlation(r = 0.5) plot(data$V1, data$V2) cor.test(data$V1, data$V2) summary(lm(V2 ~ V1, data = data)) # Specify mean and SD data <- simulate_correlation(r = 0.5, n = 50, mean = c(0, 1), sd = c(0.7, 1.7)) cor.test(data$V1, data$V2) round(c(mean(data$V1), sd(data$V1)), 1) round(c(mean(data$V2), sd(data$V2)), 1) summary(lm(V2 ~ V1, data = data)) # Generate multiple variables cor_matrix <- matrix( c( 1.0, 0.2, 0.4, 0.2, 1.0, 0.3, 0.4, 0.3, 1.0 ), nrow = 3 ) data <- simulate_correlation(r = cor_matrix, names = c("y", "x1", "x2")) cor(data) summary(lm(y ~ x1, data = data)) # t-test -------------------------------- data <- simulate_ttest(n = 30, d = 0.3) plot(data$V1, data$V0) round(c(mean(data$V1), sd(data$V1)), 1) diff(t.test(data$V1 ~ data$V0)$estimate) summary(lm(V1 ~ V0, data = data)) summary(glm(V0 ~ V1, data = data, family = "binomial")) # Difference -------------------------------- data <- simulate_difference(n = 30, d = 0.3) plot(data$V1, data$V0) round(c(mean(data$V1), sd(data$V1)), 1) diff(t.test(data$V1 ~ data$V0)$estimate) summary(lm(V1 ~ V0, data = data)) summary(glm(V0 ~ V1, data = data, family = "binomial")) \dontshow{\}) # examplesIf} } bayestestR/man/spi.Rd0000644000176200001440000001406015005147105014233 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/spi.R \name{spi} \alias{spi} \alias{spi.numeric} \alias{spi.data.frame} \alias{spi.brmsfit} \alias{spi.get_predicted} \title{Shortest Probability Interval (SPI)} \usage{ spi(x, ...) \method{spi}{numeric}(x, ci = 0.95, verbose = TRUE, ...) \method{spi}{data.frame}(x, ci = 0.95, rvar_col = NULL, verbose = TRUE, ...) \method{spi}{brmsfit}( x, ci = 0.95, effects = "fixed", component = "conditional", parameters = NULL, verbose = TRUE, ... ) \method{spi}{get_predicted}(x, ci = 0.95, use_iterations = FALSE, verbose = TRUE, ...) } \arguments{ \item{x}{Vector representing a posterior distribution, or a data frame of such vectors. Can also be a Bayesian model. \strong{bayestestR} supports a wide range of models (see, for example, \code{methods("hdi")}) and not all of those are documented in the 'Usage' section, because methods for other classes mostly resemble the arguments of the \code{.numeric} or \code{.data.frame}methods.} \item{...}{Currently not used.} \item{ci}{Value or vector of probability of the (credible) interval - CI (between 0 and 1) to be estimated. Default to \code{.95} (\verb{95\%}).} \item{verbose}{Toggle off warnings.} \item{rvar_col}{A single character - the name of an \code{rvar} column in the data frame to be processed. See example in \code{\link[=p_direction]{p_direction()}}.} \item{effects}{Should variables for fixed effects (\code{"fixed"}), random effects (\code{"random"}) or both (\code{"all"}) be returned? Only applies to mixed models. May be abbreviated. For models of from packages \strong{brms} or \strong{rstanarm} there are additional options: \itemize{ \item \code{"fixed"} returns fixed effects. \item \code{"random_variance"} return random effects parameters (variance and correlation components, e.g. those parameters that start with \code{sd_} or \code{cor_}). \item \code{"grouplevel"} returns random effects group level estimates, i.e. those parameters that start with \code{r_}. \item \code{"random"} returns both \code{"random_variance"} and \code{"grouplevel"}. \item \code{"all"} returns fixed effects and random effects variances. \item \code{"full"} returns all parameters. }} \item{component}{Which type of parameters to return, such as parameters for the conditional model, the zero-inflated part of the model, the dispersion term, etc. See details in section \emph{Model Components}. May be abbreviated. Note that the \emph{conditional} component also refers to the \emph{count} or \emph{mean} component - names may differ, depending on the modeling package. There are three convenient shortcuts (not applicable to \emph{all} model classes): \itemize{ \item \code{component = "all"} returns all possible parameters. \item If \code{component = "location"}, location parameters such as \code{conditional}, \code{zero_inflated}, \code{smooth_terms}, or \code{instruments} are returned (everything that are fixed or random effects - depending on the \code{effects} argument - but no auxiliary parameters). \item For \code{component = "distributional"} (or \code{"auxiliary"}), components like \code{sigma}, \code{dispersion}, \code{beta} or \code{precision} (and other auxiliary parameters) are returned. }} \item{parameters}{Regular expression pattern that describes the parameters that should be returned. Meta-parameters (like \code{lp__} or \code{prior_}) are filtered by default, so only parameters that typically appear in the \code{summary()} are returned. Use \code{parameters} to select specific parameters for the output.} \item{use_iterations}{Logical, if \code{TRUE} and \code{x} is a \code{get_predicted} object, (returned by \code{\link[insight:get_predicted]{insight::get_predicted()}}), the function is applied to the iterations instead of the predictions. This only applies to models that return iterations for predicted values (e.g., \code{brmsfit} models).} } \value{ A data frame with following columns: \itemize{ \item \code{Parameter} The model parameter(s), if \code{x} is a model-object. If \code{x} is a vector, this column is missing. \item \code{CI} The probability of the credible interval. \item \code{CI_low}, \code{CI_high} The lower and upper credible interval limits for the parameters. } } \description{ Compute the \strong{Shortest Probability Interval (SPI)} of posterior distributions. The SPI is a more computationally stable HDI. The implementation is based on the algorithm from the \strong{SPIn} package. } \details{ The SPI is an alternative method to the HDI (\code{\link[=hdi]{hdi()}}) to quantify uncertainty of (posterior) distributions. The SPI is said to be more stable than the HDI, because, the \emph{"HDI can be noisy (that is, have a high Monte Carlo error)"} (Liu et al. 2015). Furthermore, the HDI is sensitive to additional assumptions, in particular assumptions related to the different estimation methods, which can make the HDI less accurate or reliable. } \note{ The code to compute the SPI was adapted from the \strong{SPIn} package, and slightly modified to be more robust for Stan models. Thus, credits go to Ying Liu for the original SPI algorithm and R implementation. } \examples{ \dontshow{if (require("quadprog") && require("rstanarm")) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf} library(bayestestR) posterior <- rnorm(1000) spi(posterior) spi(posterior, ci = c(0.80, 0.89, 0.95)) df <- data.frame(replicate(4, rnorm(100))) spi(df) spi(df, ci = c(0.80, 0.89, 0.95)) \donttest{ library(rstanarm) model <- suppressWarnings( stan_glm(mpg ~ wt + gear, data = mtcars, chains = 2, iter = 200, refresh = 0) ) spi(model) } \dontshow{\}) # examplesIf} } \references{ Liu, Y., Gelman, A., & Zheng, T. (2015). Simulation-efficient shortest probability intervals. Statistics and Computing, 25(4), 809–819. https://doi.org/10.1007/s11222-015-9563-8 } \seealso{ Other ci: \code{\link{bci}()}, \code{\link{ci}()}, \code{\link{eti}()}, \code{\link{hdi}()}, \code{\link{si}()} } \concept{ci} bayestestR/man/display.describe_posterior.Rd0000644000176200001440000000543715052646230021007 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/display.R, R/print.R, R/print_html.R, % R/print_md.R \name{display.describe_posterior} \alias{display.describe_posterior} \alias{print.describe_posterior} \alias{print_html.describe_posterior} \alias{print_md.describe_posterior} \title{Print tables in different output formats} \usage{ \method{display}{describe_posterior}(object, format = "markdown", ...) \method{print}{describe_posterior}(x, digits = 2, caption = "Summary of Posterior Distribution", ...) \method{print_html}{describe_posterior}(x, digits = 2, caption = "Summary of Posterior Distribution", ...) \method{print_md}{describe_posterior}(x, digits = 2, caption = "Summary of Posterior Distribution", ...) } \arguments{ \item{object, x}{An object returned by one of the package's function, for example \code{\link[=describe_posterior]{describe_posterior()}}, \code{\link[=point_estimate]{point_estimate()}}, or \code{\link[=eti]{eti()}}.} \item{format}{String, indicating the output format. Can be \code{"markdown"} \code{"html"}, or \code{"tt"}. \code{format = "tt"} creates a \code{tinytable} object, which is either printed as markdown or HTML table, depending on the environment. See \code{\link[insight:export_table]{insight::export_table()}} for details.} \item{...}{Arguments passed down to \code{print_html()} or \code{print_md()} (e.g., \code{digits}), or to \code{insight::export_table()}.} \item{digits}{Integer, number of digits to round the table output. Defaults to 2.} \item{caption}{Character, caption for the table. If \code{NULL}, no caption is added. By default, a caption is created based on the object type.} } \value{ If \code{format = "markdown"}, the return value will be a character vector in markdown-table format. If \code{format = "html"}, an object of class \code{gt_tbl}. If \code{format = "tt"}, an object of class \code{tinytable}. } \description{ Prints tables (i.e. data frame) in different output formats. } \details{ \code{display()} is useful when the table-output from functions, which is usually printed as formatted text-table to console, should be formatted for pretty table-rendering in markdown documents, or if knitted from rmarkdown to PDF or Word files. See \href{https://easystats.github.io/parameters/articles/model_parameters_formatting.html}{vignette} for examples. } \examples{ \dontshow{if (all(insight::check_if_installed(c("tinytable", "gt"), quietly = TRUE))) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf} \donttest{ d <- data.frame(replicate(4, rnorm(20))) result <- describe_posterior(d) # markdown format display(result) # gt HTML display(result, format = "html") # tinytable display(result, format = "tt") } \dontshow{\}) # examplesIf} } bayestestR/man/bayesfactor_inclusion.Rd0000644000176200001440000000773614765755711020065 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/bayesfactor_inclusion.R \name{bayesfactor_inclusion} \alias{bayesfactor_inclusion} \alias{bf_inclusion} \title{Inclusion Bayes Factors for testing predictors across Bayesian models} \usage{ bayesfactor_inclusion(models, match_models = FALSE, prior_odds = NULL, ...) bf_inclusion(models, match_models = FALSE, prior_odds = NULL, ...) } \arguments{ \item{models}{An object of class \code{\link[=bayesfactor_models]{bayesfactor_models()}} or \code{BFBayesFactor}.} \item{match_models}{See details.} \item{prior_odds}{Optional vector of prior odds for the models. See \verb{BayesFactor::priorOdds<-}.} \item{...}{Arguments passed to or from other methods.} } \value{ a data frame containing the prior and posterior probabilities, and log(BF) for each effect (Use \code{as.numeric()} to extract the non-log Bayes factors; see examples). } \description{ The \verb{bf_*} function is an alias of the main function. For more info, see \href{https://easystats.github.io/bayestestR/articles/bayes_factors.html}{the Bayes factors vignette}. } \details{ Inclusion Bayes factors answer the question: Are the observed data more probable under models with a particular effect, than they are under models without that particular effect? In other words, on average - are models with effect \eqn{X} more likely to have produced the observed data than models without effect \eqn{X}? \subsection{Match Models}{ If \code{match_models=FALSE} (default), Inclusion BFs are computed by comparing all models with a term against all models without that term. If \code{TRUE}, comparison is restricted to models that (1) do not include any interactions with the term of interest; (2) for interaction terms, averaging is done only across models that containe the main effect terms from which the interaction term is comprised. } } \note{ Random effects in the \code{lmer} style are converted to interaction terms: i.e., \code{(X|G)} will become the terms \code{1:G} and \code{X:G}. } \section{Interpreting Bayes Factors}{ A Bayes factor greater than 1 can be interpreted as evidence against the null, at which one convention is that a Bayes factor greater than 3 can be considered as "substantial" evidence against the null (and vice versa, a Bayes factor smaller than 1/3 indicates substantial evidence in favor of the null-model) (\cite{Wetzels et al. 2011}). } \examples{ \dontshow{if (require("BayesFactor")) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf} library(bayestestR) # Using bayesfactor_models: # ------------------------------ mo0 <- lm(Sepal.Length ~ 1, data = iris) mo1 <- lm(Sepal.Length ~ Species, data = iris) mo2 <- lm(Sepal.Length ~ Species + Petal.Length, data = iris) mo3 <- lm(Sepal.Length ~ Species * Petal.Length, data = iris) BFmodels <- bayesfactor_models(mo1, mo2, mo3, denominator = mo0) (bf_inc <- bayesfactor_inclusion(BFmodels)) as.numeric(bf_inc) \donttest{ # BayesFactor # ------------------------------- BF <- BayesFactor::generalTestBF(len ~ supp * dose, ToothGrowth, progress = FALSE) bayesfactor_inclusion(BF) # compare only matched models: bayesfactor_inclusion(BF, match_models = TRUE) } \dontshow{\}) # examplesIf} } \references{ \itemize{ \item Hinne, M., Gronau, Q. F., van den Bergh, D., and Wagenmakers, E. (2019, March 25). A conceptual introduction to Bayesian Model Averaging. \doi{10.31234/osf.io/wgb64} \item Clyde, M. A., Ghosh, J., & Littman, M. L. (2011). Bayesian adaptive sampling for variable selection and model averaging. Journal of Computational and Graphical Statistics, 20(1), 80-101. \item Mathot, S. (2017). Bayes like a Baws: Interpreting Bayesian Repeated Measures in JASP. \href{https://www.cogsci.nl/blog/interpreting-bayesian-repeated-measures-in-jasp}{Blog post}. } } \seealso{ \code{\link[=weighted_posteriors]{weighted_posteriors()}} for Bayesian parameter averaging. } \author{ Mattan S. Ben-Shachar } bayestestR/man/p_map.Rd0000644000176200001440000002056115005147105014537 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/p_map.R \name{p_map} \alias{p_map} \alias{p_pointnull} \alias{p_map.numeric} \alias{p_map.get_predicted} \alias{p_map.data.frame} \alias{p_map.brmsfit} \title{Bayesian p-value based on the density at the Maximum A Posteriori (MAP)} \usage{ p_map(x, ...) p_pointnull(x, ...) \method{p_map}{numeric}(x, null = 0, precision = 2^10, method = "kernel", ...) \method{p_map}{get_predicted}( x, null = 0, precision = 2^10, method = "kernel", use_iterations = FALSE, verbose = TRUE, ... ) \method{p_map}{data.frame}(x, null = 0, precision = 2^10, method = "kernel", rvar_col = NULL, ...) \method{p_map}{brmsfit}( x, null = 0, precision = 2^10, method = "kernel", effects = "fixed", component = "conditional", parameters = NULL, ... ) } \arguments{ \item{x}{Vector representing a posterior distribution, or a data frame of such vectors. Can also be a Bayesian model. \strong{bayestestR} supports a wide range of models (see, for example, \code{methods("hdi")}) and not all of those are documented in the 'Usage' section, because methods for other classes mostly resemble the arguments of the \code{.numeric} or \code{.data.frame}methods.} \item{...}{Currently not used.} \item{null}{The value considered as a "null" effect. Traditionally 0, but could also be 1 in the case of ratios of change (OR, IRR, ...).} \item{precision}{Number of points of density data. See the \code{n} parameter in \code{density}.} \item{method}{Density estimation method. Can be \code{"kernel"} (default), \code{"logspline"} or \code{"KernSmooth"}.} \item{use_iterations}{Logical, if \code{TRUE} and \code{x} is a \code{get_predicted} object, (returned by \code{\link[insight:get_predicted]{insight::get_predicted()}}), the function is applied to the iterations instead of the predictions. This only applies to models that return iterations for predicted values (e.g., \code{brmsfit} models).} \item{verbose}{Toggle off warnings.} \item{rvar_col}{A single character - the name of an \code{rvar} column in the data frame to be processed. See example in \code{\link[=p_direction]{p_direction()}}.} \item{effects}{Should variables for fixed effects (\code{"fixed"}), random effects (\code{"random"}) or both (\code{"all"}) be returned? Only applies to mixed models. May be abbreviated. For models of from packages \strong{brms} or \strong{rstanarm} there are additional options: \itemize{ \item \code{"fixed"} returns fixed effects. \item \code{"random_variance"} return random effects parameters (variance and correlation components, e.g. those parameters that start with \code{sd_} or \code{cor_}). \item \code{"grouplevel"} returns random effects group level estimates, i.e. those parameters that start with \code{r_}. \item \code{"random"} returns both \code{"random_variance"} and \code{"grouplevel"}. \item \code{"all"} returns fixed effects and random effects variances. \item \code{"full"} returns all parameters. }} \item{component}{Which type of parameters to return, such as parameters for the conditional model, the zero-inflated part of the model, the dispersion term, etc. See details in section \emph{Model Components}. May be abbreviated. Note that the \emph{conditional} component also refers to the \emph{count} or \emph{mean} component - names may differ, depending on the modeling package. There are three convenient shortcuts (not applicable to \emph{all} model classes): \itemize{ \item \code{component = "all"} returns all possible parameters. \item If \code{component = "location"}, location parameters such as \code{conditional}, \code{zero_inflated}, \code{smooth_terms}, or \code{instruments} are returned (everything that are fixed or random effects - depending on the \code{effects} argument - but no auxiliary parameters). \item For \code{component = "distributional"} (or \code{"auxiliary"}), components like \code{sigma}, \code{dispersion}, \code{beta} or \code{precision} (and other auxiliary parameters) are returned. }} \item{parameters}{Regular expression pattern that describes the parameters that should be returned. Meta-parameters (like \code{lp__} or \code{prior_}) are filtered by default, so only parameters that typically appear in the \code{summary()} are returned. Use \code{parameters} to select specific parameters for the output.} } \description{ Compute a Bayesian equivalent of the \emph{p}-value, related to the odds that a parameter (described by its posterior distribution) has against the null hypothesis (\emph{h0}) using Mills' (2014, 2017) \emph{Objective Bayesian Hypothesis Testing} framework. It corresponds to the density value at the null (e.g., 0) divided by the density at the Maximum A Posteriori (MAP). } \details{ Note that this method is sensitive to the density estimation \code{method} (see the section in the examples below). \subsection{Strengths and Limitations}{ \strong{Strengths:} Straightforward computation. Objective property of the posterior distribution. \strong{Limitations:} Limited information favoring the null hypothesis. Relates on density approximation. Indirect relationship between mathematical definition and interpretation. Only suitable for weak / very diffused priors. } } \section{Model components}{ Possible values for the \code{component} argument depend on the model class. Following are valid options: \itemize{ \item \code{"all"}: returns all model components, applies to all models, but will only have an effect for models with more than just the conditional model component. \item \code{"conditional"}: only returns the conditional component, i.e. "fixed effects" terms from the model. Will only have an effect for models with more than just the conditional model component. \item \code{"smooth_terms"}: returns smooth terms, only applies to GAMs (or similar models that may contain smooth terms). \item \code{"zero_inflated"} (or \code{"zi"}): returns the zero-inflation component. \item \code{"location"}: returns location parameters such as \code{conditional}, \code{zero_inflated}, or \code{smooth_terms} (everything that are fixed or random effects - depending on the \code{effects} argument - but no auxiliary parameters). \item \code{"distributional"} (or \code{"auxiliary"}): components like \code{sigma}, \code{dispersion}, \code{beta} or \code{precision} (and other auxiliary parameters) are returned. } For models of class \code{brmsfit} (package \strong{brms}), even more options are possible for the \code{component} argument, which are not all documented in detail here. See also \href{https://easystats.github.io/insight/reference/find_parameters.BGGM.html}{\code{?insight::find_parameters}}. } \examples{ \dontshow{if (require("rstanarm") && require("emmeans") && require("brms") && require("BayesFactor")) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf} library(bayestestR) p_map(rnorm(1000, 0, 1)) p_map(rnorm(1000, 10, 1)) \donttest{ model <- suppressWarnings( rstanarm::stan_glm(mpg ~ wt + gear, data = mtcars, chains = 2, iter = 200, refresh = 0) ) p_map(model) p_map(suppressWarnings( emmeans::emtrends(model, ~1, "wt", data = mtcars) )) model <- brms::brm(mpg ~ wt + cyl, data = mtcars) p_map(model) bf <- BayesFactor::ttestBF(x = rnorm(100, 1, 1)) p_map(bf) # --------------------------------------- # Robustness to density estimation method set.seed(333) data <- data.frame() for (iteration in 1:250) { x <- rnorm(1000, 1, 1) result <- data.frame( Kernel = as.numeric(p_map(x, method = "kernel")), KernSmooth = as.numeric(p_map(x, method = "KernSmooth")), logspline = as.numeric(p_map(x, method = "logspline")) ) data <- rbind(data, result) } data$KernSmooth <- data$Kernel - data$KernSmooth data$logspline <- data$Kernel - data$logspline summary(data$KernSmooth) summary(data$logspline) boxplot(data[c("KernSmooth", "logspline")]) } \dontshow{\}) # examplesIf} } \references{ \itemize{ \item Makowski D, Ben-Shachar MS, Chen SHA, Lüdecke D (2019) Indices of Effect Existence and Significance in the Bayesian Framework. Frontiers in Psychology 2019;10:2767. \doi{10.3389/fpsyg.2019.02767} \item Mills, J. A. (2018). Objective Bayesian Precise Hypothesis Testing. University of Cincinnati. } } \seealso{ \href{https://www.youtube.com/watch?v=Ip8Ci5KUVRc}{Jeff Mill's talk} } bayestestR/man/as.numeric.p_direction.Rd0000644000176200001440000000125114542333405020005 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/map_estimate.R, R/p_direction.R, R/p_map.R, % R/p_significance.R \name{as.numeric.map_estimate} \alias{as.numeric.map_estimate} \alias{as.numeric.p_direction} \alias{as.numeric.p_map} \alias{as.numeric.p_significance} \title{Convert to Numeric} \usage{ \method{as.numeric}{map_estimate}(x, ...) \method{as.numeric}{p_direction}(x, ...) \method{as.numeric}{p_map}(x, ...) \method{as.numeric}{p_significance}(x, ...) } \arguments{ \item{x}{object to be coerced or tested.} \item{...}{further arguments passed to or from other methods.} } \description{ Convert to Numeric } bayestestR/man/bayesfactor.Rd0000644000176200001440000001005315005147105015740 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/bayesfactor.R \name{bayesfactor} \alias{bayesfactor} \title{Bayes Factors (BF)} \usage{ bayesfactor( ..., prior = NULL, direction = "two-sided", null = 0, hypothesis = NULL, effects = "fixed", verbose = TRUE, denominator = 1, match_models = FALSE, prior_odds = NULL ) } \arguments{ \item{...}{A numeric vector, model object(s), or the output from \code{bayesfactor_models}.} \item{prior}{An object representing a prior distribution (see 'Details').} \item{direction}{Test type (see 'Details'). One of \code{0}, \code{"two-sided"} (default, two tailed), \code{-1}, \code{"left"} (left tailed) or \code{1}, \code{"right"} (right tailed).} \item{null}{Value of the null, either a scalar (for point-null) or a range (for a interval-null).} \item{hypothesis}{A character vector specifying the restrictions as logical conditions (see examples below).} \item{effects}{Should variables for fixed effects (\code{"fixed"}), random effects (\code{"random"}) or both (\code{"all"}) be returned? Only applies to mixed models. May be abbreviated. For models of from packages \strong{brms} or \strong{rstanarm} there are additional options: \itemize{ \item \code{"fixed"} returns fixed effects. \item \code{"random_variance"} return random effects parameters (variance and correlation components, e.g. those parameters that start with \code{sd_} or \code{cor_}). \item \code{"grouplevel"} returns random effects group level estimates, i.e. those parameters that start with \code{r_}. \item \code{"random"} returns both \code{"random_variance"} and \code{"grouplevel"}. \item \code{"all"} returns fixed effects and random effects variances. \item \code{"full"} returns all parameters. }} \item{verbose}{Toggle off warnings.} \item{denominator}{Either an integer indicating which of the models to use as the denominator, or a model to be used as a denominator. Ignored for \code{BFBayesFactor}.} \item{match_models}{See details.} \item{prior_odds}{Optional vector of prior odds for the models. See \verb{BayesFactor::priorOdds<-}.} } \value{ Some type of Bayes factor, depending on the input. See \code{\link[=bayesfactor_parameters]{bayesfactor_parameters()}}, \code{\link[=bayesfactor_models]{bayesfactor_models()}} or \code{\link[=bayesfactor_inclusion]{bayesfactor_inclusion()}}. } \description{ This function compte the Bayes factors (BFs) that are appropriate to the input. For vectors or single models, it will compute \code{\link[=bayesfactor_parameters]{BFs for single parameters}}, or is \code{hypothesis} is specified, \code{\link[=bayesfactor_restricted]{BFs for restricted models}}. For multiple models, it will return the BF corresponding to \code{\link[=bayesfactor_models]{comparison between models}} and if a model comparison is passed, it will compute the \code{\link[=bayesfactor_inclusion]{inclusion BF}}. \cr\cr For a complete overview of these functions, read the \href{https://easystats.github.io/bayestestR/articles/bayes_factors.html}{Bayes factor vignette}. } \note{ There is also a \href{https://easystats.github.io/see/articles/bayestestR.html}{\code{plot()}-method} implemented in the \href{https://easystats.github.io/see/}{\pkg{see}-package}. } \examples{ \dontshow{if (require("rstanarm") && require("logspline")) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf} \dontrun{ library(bayestestR) prior <- distribution_normal(1000, mean = 0, sd = 1) posterior <- distribution_normal(1000, mean = 0.5, sd = 0.3) bayesfactor(posterior, prior = prior, verbose = FALSE) # rstanarm models # --------------- model <- suppressWarnings(rstanarm::stan_lmer(extra ~ group + (1 | ID), data = sleep)) bayesfactor(model, verbose = FALSE) # Frequentist models # --------------- m0 <- lm(extra ~ 1, data = sleep) m1 <- lm(extra ~ group, data = sleep) m2 <- lm(extra ~ group + ID, data = sleep) comparison <- bayesfactor(m0, m1, m2) comparison bayesfactor(comparison) } \dontshow{\}) # examplesIf} } bayestestR/man/diagnostic_posterior.Rd0000644000176200001440000001562715005147105017704 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/diagnostic_posterior.R \name{diagnostic_posterior} \alias{diagnostic_posterior} \alias{diagnostic_posterior.default} \alias{diagnostic_posterior.stanreg} \title{Posteriors Sampling Diagnostic} \usage{ diagnostic_posterior(posterior, ...) \method{diagnostic_posterior}{default}(posterior, diagnostic = c("ESS", "Rhat"), ...) \method{diagnostic_posterior}{stanreg}( posterior, diagnostic = "all", effects = "fixed", component = "location", parameters = NULL, ... ) } \arguments{ \item{posterior}{A \code{stanreg}, \code{stanfit}, \code{brmsfit}, or \code{blavaan} object.} \item{...}{Currently only used for models of class \code{brmsfit}, where a \code{variable} argument can be used, which is directly passed to the \code{as.data.frame()} method (i.e., \code{as.data.frame(x, variable = variable)}).} \item{diagnostic}{Diagnostic metrics to compute. Character (vector) or list with one or more of these options: \code{"ESS"}, \code{"Rhat"}, \code{"MCSE"} or \code{"all"}.} \item{effects}{Should variables for fixed effects (\code{"fixed"}), random effects (\code{"random"}) or both (\code{"all"}) be returned? Only applies to mixed models. May be abbreviated. For models of from packages \strong{brms} or \strong{rstanarm} there are additional options: \itemize{ \item \code{"fixed"} returns fixed effects. \item \code{"random_variance"} return random effects parameters (variance and correlation components, e.g. those parameters that start with \code{sd_} or \code{cor_}). \item \code{"grouplevel"} returns random effects group level estimates, i.e. those parameters that start with \code{r_}. \item \code{"random"} returns both \code{"random_variance"} and \code{"grouplevel"}. \item \code{"all"} returns fixed effects and random effects variances. \item \code{"full"} returns all parameters. }} \item{component}{Which type of parameters to return, such as parameters for the conditional model, the zero-inflated part of the model, the dispersion term, the instrumental variables or marginal effects be returned? Applies to models with zero-inflated and/or dispersion formula, or to models with instrumental variables (so called fixed-effects regressions), or models with marginal effects (from \strong{mfx}). See details in section \emph{Model Components} .May be abbreviated. Note that the \emph{conditional} component also refers to the \emph{count} or \emph{mean} component - names may differ, depending on the modeling package. There are three convenient shortcuts (not applicable to \emph{all} model classes): \itemize{ \item \code{component = "all"} returns all possible parameters. \item If \code{component = "location"}, location parameters such as \code{conditional}, \code{zero_inflated}, \code{smooth_terms}, or \code{instruments} are returned (everything that are fixed or random effects - depending on the \code{effects} argument - but no auxiliary parameters). \item For \code{component = "distributional"} (or \code{"auxiliary"}), components like \code{sigma}, \code{dispersion}, \code{beta} or \code{precision} (and other auxiliary parameters) are returned. }} \item{parameters}{Regular expression pattern that describes the parameters that should be returned.} } \description{ Extract diagnostic metrics (Effective Sample Size (\code{ESS}), \code{Rhat} and Monte Carlo Standard Error \code{MCSE}). } \details{ \strong{Effective Sample (ESS)} should be as large as possible, although for most applications, an effective sample size greater than 1000 is sufficient for stable estimates (\emph{Bürkner, 2017}). The ESS corresponds to the number of independent samples with the same estimation power as the N autocorrelated samples. It is is a measure of "how much independent information there is in autocorrelated chains" (\emph{Kruschke 2015, p182-3}). \strong{Rhat} should be the closest to 1. It should not be larger than 1.1 (\emph{Gelman and Rubin, 1992}) or 1.01 (\emph{Vehtari et al., 2019}). The split Rhat statistic quantifies the consistency of an ensemble of Markov chains. \strong{Monte Carlo Standard Error (MCSE)} is another measure of accuracy of the chains. It is defined as standard deviation of the chains divided by their effective sample size (the formula for \code{mcse()} is from Kruschke 2015, p. 187). The MCSE "provides a quantitative suggestion of how big the estimation noise is". } \section{Model components}{ Possible values for the \code{component} argument depend on the model class. Following are valid options: \itemize{ \item \code{"all"}: returns all model components, applies to all models, but will only have an effect for models with more than just the conditional model component. \item \code{"conditional"}: only returns the conditional component, i.e. "fixed effects" terms from the model. Will only have an effect for models with more than just the conditional model component. \item \code{"smooth_terms"}: returns smooth terms, only applies to GAMs (or similar models that may contain smooth terms). \item \code{"zero_inflated"} (or \code{"zi"}): returns the zero-inflation component. \item \code{"location"}: returns location parameters such as \code{conditional}, \code{zero_inflated}, or \code{smooth_terms} (everything that are fixed or random effects - depending on the \code{effects} argument - but no auxiliary parameters). \item \code{"distributional"} (or \code{"auxiliary"}): components like \code{sigma}, \code{dispersion}, \code{beta} or \code{precision} (and other auxiliary parameters) are returned. } For models of class \code{brmsfit} (package \strong{brms}), even more options are possible for the \code{component} argument, which are not all documented in detail here. See also \href{https://easystats.github.io/insight/reference/find_parameters.BGGM.html}{\code{?insight::find_parameters}}. } \examples{ \dontshow{if (require("rstanarm") && require("brms")) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf} \donttest{ # rstanarm models # ----------------------------------------------- model <- suppressWarnings( rstanarm::stan_glm(mpg ~ wt + gear, data = mtcars, chains = 2, iter = 200, refresh = 0) ) diagnostic_posterior(model) # brms models # ----------------------------------------------- model <- brms::brm(mpg ~ wt + cyl, data = mtcars) diagnostic_posterior(model) } \dontshow{\}) # examplesIf} } \references{ \itemize{ \item Gelman, A., & Rubin, D. B. (1992). Inference from iterative simulation using multiple sequences. Statistical science, 7(4), 457-472. \item Vehtari, A., Gelman, A., Simpson, D., Carpenter, B., and Bürkner, P. C. (2019). Rank-normalization, folding, and localization: An improved Rhat for assessing convergence of MCMC. arXiv preprint arXiv:1903.08008. \item Kruschke, J. (2014). Doing Bayesian data analysis: A tutorial with R, JAGS, and Stan. Academic Press. } } bayestestR/man/map_estimate.Rd0000644000176200001440000001620415005147105016112 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/map_estimate.R \name{map_estimate} \alias{map_estimate} \alias{map_estimate.numeric} \alias{map_estimate.brmsfit} \alias{map_estimate.data.frame} \alias{map_estimate.get_predicted} \title{Maximum A Posteriori probability estimate (MAP)} \usage{ map_estimate(x, ...) \method{map_estimate}{numeric}(x, precision = 2^10, method = "kernel", verbose = TRUE, ...) \method{map_estimate}{brmsfit}( x, precision = 2^10, method = "kernel", effects = "fixed", component = "conditional", parameters = NULL, verbose = TRUE, ... ) \method{map_estimate}{data.frame}( x, precision = 2^10, method = "kernel", rvar_col = NULL, verbose = TRUE, ... ) \method{map_estimate}{get_predicted}( x, precision = 2^10, method = "kernel", use_iterations = FALSE, verbose = TRUE, ... ) } \arguments{ \item{x}{Vector representing a posterior distribution, or a data frame of such vectors. Can also be a Bayesian model. \strong{bayestestR} supports a wide range of models (see, for example, \code{methods("hdi")}) and not all of those are documented in the 'Usage' section, because methods for other classes mostly resemble the arguments of the \code{.numeric} or \code{.data.frame}methods.} \item{...}{Currently not used.} \item{precision}{Number of points of density data. See the \code{n} parameter in \code{density}.} \item{method}{Density estimation method. Can be \code{"kernel"} (default), \code{"logspline"} or \code{"KernSmooth"}.} \item{verbose}{Toggle off warnings.} \item{effects}{Should variables for fixed effects (\code{"fixed"}), random effects (\code{"random"}) or both (\code{"all"}) be returned? Only applies to mixed models. May be abbreviated. For models of from packages \strong{brms} or \strong{rstanarm} there are additional options: \itemize{ \item \code{"fixed"} returns fixed effects. \item \code{"random_variance"} return random effects parameters (variance and correlation components, e.g. those parameters that start with \code{sd_} or \code{cor_}). \item \code{"grouplevel"} returns random effects group level estimates, i.e. those parameters that start with \code{r_}. \item \code{"random"} returns both \code{"random_variance"} and \code{"grouplevel"}. \item \code{"all"} returns fixed effects and random effects variances. \item \code{"full"} returns all parameters. }} \item{component}{Which type of parameters to return, such as parameters for the conditional model, the zero-inflated part of the model, the dispersion term, etc. See details in section \emph{Model Components}. May be abbreviated. Note that the \emph{conditional} component also refers to the \emph{count} or \emph{mean} component - names may differ, depending on the modeling package. There are three convenient shortcuts (not applicable to \emph{all} model classes): \itemize{ \item \code{component = "all"} returns all possible parameters. \item If \code{component = "location"}, location parameters such as \code{conditional}, \code{zero_inflated}, \code{smooth_terms}, or \code{instruments} are returned (everything that are fixed or random effects - depending on the \code{effects} argument - but no auxiliary parameters). \item For \code{component = "distributional"} (or \code{"auxiliary"}), components like \code{sigma}, \code{dispersion}, \code{beta} or \code{precision} (and other auxiliary parameters) are returned. }} \item{parameters}{Regular expression pattern that describes the parameters that should be returned. Meta-parameters (like \code{lp__} or \code{prior_}) are filtered by default, so only parameters that typically appear in the \code{summary()} are returned. Use \code{parameters} to select specific parameters for the output.} \item{rvar_col}{A single character - the name of an \code{rvar} column in the data frame to be processed. See example in \code{\link[=p_direction]{p_direction()}}.} \item{use_iterations}{Logical, if \code{TRUE} and \code{x} is a \code{get_predicted} object, (returned by \code{\link[insight:get_predicted]{insight::get_predicted()}}), the function is applied to the iterations instead of the predictions. This only applies to models that return iterations for predicted values (e.g., \code{brmsfit} models).} } \value{ A numeric value if \code{x} is a vector. If \code{x} is a model-object, returns a data frame with following columns: \itemize{ \item \code{Parameter}: The model parameter(s), if \code{x} is a model-object. If \code{x} is a vector, this column is missing. \item \code{MAP_Estimate}: The MAP estimate for the posterior or each model parameter. } } \description{ Find the \strong{Highest Maximum A Posteriori probability estimate (MAP)} of a posterior, i.e., the value associated with the highest probability density (the "peak" of the posterior distribution). In other words, it is an estimation of the \emph{mode} for continuous parameters. Note that this function relies on \code{\link[=estimate_density]{estimate_density()}}, which by default uses a different smoothing bandwidth (\code{"SJ"}) compared to the legacy default implemented the base R \code{\link[=density]{density()}} function (\code{"nrd0"}). } \section{Model components}{ Possible values for the \code{component} argument depend on the model class. Following are valid options: \itemize{ \item \code{"all"}: returns all model components, applies to all models, but will only have an effect for models with more than just the conditional model component. \item \code{"conditional"}: only returns the conditional component, i.e. "fixed effects" terms from the model. Will only have an effect for models with more than just the conditional model component. \item \code{"smooth_terms"}: returns smooth terms, only applies to GAMs (or similar models that may contain smooth terms). \item \code{"zero_inflated"} (or \code{"zi"}): returns the zero-inflation component. \item \code{"location"}: returns location parameters such as \code{conditional}, \code{zero_inflated}, or \code{smooth_terms} (everything that are fixed or random effects - depending on the \code{effects} argument - but no auxiliary parameters). \item \code{"distributional"} (or \code{"auxiliary"}): components like \code{sigma}, \code{dispersion}, \code{beta} or \code{precision} (and other auxiliary parameters) are returned. } For models of class \code{brmsfit} (package \strong{brms}), even more options are possible for the \code{component} argument, which are not all documented in detail here. See also \href{https://easystats.github.io/insight/reference/find_parameters.BGGM.html}{\code{?insight::find_parameters}}. } \examples{ \dontshow{if (require("rstanarm") && require("brms")) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf} \donttest{ library(bayestestR) posterior <- rnorm(10000) map_estimate(posterior) plot(density(posterior)) abline(v = as.numeric(map_estimate(posterior)), col = "red") model <- rstanarm::stan_glm(mpg ~ wt + cyl, data = mtcars) map_estimate(model) model <- brms::brm(mpg ~ wt + cyl, data = mtcars) map_estimate(model) } \dontshow{\}) # examplesIf} } bayestestR/man/si.Rd0000644000176200001440000002365315005147105014063 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/si.R \name{si} \alias{si} \alias{si.numeric} \alias{si.stanreg} \alias{si.get_predicted} \alias{si.data.frame} \title{Compute Support Intervals} \usage{ si(posterior, ...) \method{si}{numeric}(posterior, prior = NULL, BF = 1, verbose = TRUE, ...) \method{si}{stanreg}( posterior, prior = NULL, BF = 1, verbose = TRUE, effects = "fixed", component = "location", parameters = NULL, ... ) \method{si}{get_predicted}( posterior, prior = NULL, BF = 1, use_iterations = FALSE, verbose = TRUE, ... ) \method{si}{data.frame}(posterior, prior = NULL, BF = 1, rvar_col = NULL, verbose = TRUE, ...) } \arguments{ \item{posterior}{A numerical vector, \code{stanreg} / \code{brmsfit} object, \code{emmGrid} or a data frame - representing a posterior distribution(s) from (see 'Details').} \item{...}{Arguments passed to and from other methods. (Can be used to pass arguments to internal \code{\link[logspline:logspline]{logspline::logspline()}}.)} \item{prior}{An object representing a prior distribution (see 'Details').} \item{BF}{The amount of support required to be included in the support interval.} \item{verbose}{Toggle off warnings.} \item{effects}{Should variables for fixed effects (\code{"fixed"}), random effects (\code{"random"}) or both (\code{"all"}) be returned? Only applies to mixed models. May be abbreviated. For models of from packages \strong{brms} or \strong{rstanarm} there are additional options: \itemize{ \item \code{"fixed"} returns fixed effects. \item \code{"random_variance"} return random effects parameters (variance and correlation components, e.g. those parameters that start with \code{sd_} or \code{cor_}). \item \code{"grouplevel"} returns random effects group level estimates, i.e. those parameters that start with \code{r_}. \item \code{"random"} returns both \code{"random_variance"} and \code{"grouplevel"}. \item \code{"all"} returns fixed effects and random effects variances. \item \code{"full"} returns all parameters. }} \item{component}{Which type of parameters to return, such as parameters for the conditional model, the zero-inflated part of the model, the dispersion term, etc. See details in section \emph{Model Components}. May be abbreviated. Note that the \emph{conditional} component also refers to the \emph{count} or \emph{mean} component - names may differ, depending on the modeling package. There are three convenient shortcuts (not applicable to \emph{all} model classes): \itemize{ \item \code{component = "all"} returns all possible parameters. \item If \code{component = "location"}, location parameters such as \code{conditional}, \code{zero_inflated}, \code{smooth_terms}, or \code{instruments} are returned (everything that are fixed or random effects - depending on the \code{effects} argument - but no auxiliary parameters). \item For \code{component = "distributional"} (or \code{"auxiliary"}), components like \code{sigma}, \code{dispersion}, \code{beta} or \code{precision} (and other auxiliary parameters) are returned. }} \item{parameters}{Regular expression pattern that describes the parameters that should be returned. Meta-parameters (like \code{lp__} or \code{prior_}) are filtered by default, so only parameters that typically appear in the \code{summary()} are returned. Use \code{parameters} to select specific parameters for the output.} \item{use_iterations}{Logical, if \code{TRUE} and \code{x} is a \code{get_predicted} object, (returned by \code{\link[insight:get_predicted]{insight::get_predicted()}}), the function is applied to the iterations instead of the predictions. This only applies to models that return iterations for predicted values (e.g., \code{brmsfit} models).} \item{rvar_col}{A single character - the name of an \code{rvar} column in the data frame to be processed. See example in \code{\link[=p_direction]{p_direction()}}.} } \value{ A data frame containing the lower and upper bounds of the SI. Note that if the level of requested support is higher than observed in the data, the interval will be \verb{[NA,NA]}. } \description{ A support interval contains only the values of the parameter that predict the observed data better than average, by some degree \emph{k}; these are values of the parameter that are associated with an updating factor greater or equal than \emph{k}. From the perspective of the Savage-Dickey Bayes factor, testing against a point null hypothesis for any value within the support interval will yield a Bayes factor smaller than \emph{1/k}. } \details{ \strong{For more info, in particular on specifying correct priors for factors with more than 2 levels, see \href{https://easystats.github.io/bayestestR/articles/bayes_factors.html}{the Bayes factors vignette}.} This method is used to compute support intervals based on prior and posterior distributions. For the computation of support intervals, the model priors must be proper priors (at the very least they should be \emph{not flat}, and it is preferable that they be \emph{informative} - note that by default, \code{brms::brm()} uses flat priors for fixed-effects; see example below). } \note{ There is also a \href{https://easystats.github.io/see/articles/bayestestR.html}{\code{plot()}-method} implemented in the \href{https://easystats.github.io/see/}{\pkg{see}-package}. } \section{Choosing a value of \code{BF}}{ The choice of \code{BF} (the level of support) depends on what we want our interval to represent: \itemize{ \item A \code{BF} = 1 contains values whose credibility is not decreased by observing the data. \item A \code{BF} > 1 contains values who received more impressive support from the data. \item A \code{BF} < 1 contains values whose credibility has \emph{not} been impressively decreased by observing the data. Testing against values outside this interval will produce a Bayes factor larger than 1/\code{BF} in support of the alternative. E.g., if an SI (BF = 1/3) excludes 0, the Bayes factor against the point-null will be larger than 3. } } \section{Setting the correct \code{prior}}{ For the computation of Bayes factors, the model priors must be proper priors (at the very least they should be \emph{not flat}, and it is preferable that they be \emph{informative}); As the priors for the alternative get wider, the likelihood of the null value(s) increases, to the extreme that for completely flat priors the null is infinitely more favorable than the alternative (this is called \emph{the Jeffreys-Lindley-Bartlett paradox}). Thus, you should only ever try (or want) to compute a Bayes factor when you have an informed prior. \cr\cr (Note that by default, \code{brms::brm()} uses flat priors for fixed-effects; See example below.) \cr\cr It is important to provide the correct \code{prior} for meaningful results, to match the \code{posterior}-type input: \itemize{ \item \strong{A numeric vector} - \code{prior} should also be a \emph{numeric vector}, representing the prior-estimate. \item \strong{A data frame} - \code{prior} should also be a \emph{data frame}, representing the prior-estimates, in matching column order. \itemize{ \item If \code{rvar_col} is specified, \code{prior} should be \emph{the name of an \code{rvar} column} that represents the prior-estimates. } \item \strong{Supported Bayesian model (\code{stanreg}, \code{brmsfit}, etc.)} \itemize{ \item \code{prior} should be \emph{a model an equivalent model with MCMC samples from the priors \strong{only}}. See \code{\link[=unupdate]{unupdate()}}. \item If \code{prior} is set to \code{NULL}, \code{\link[=unupdate]{unupdate()}} is called internally (not supported for \code{brmsfit_multiple} model). } \item \strong{Output from a \code{{marginaleffects}} function} - \code{prior} should also be \emph{an equivalent output} from a \code{{marginaleffects}} function based on a prior-model (See \code{\link[=unupdate]{unupdate()}}). \item \strong{Output from an \code{{emmeans}} function} \itemize{ \item \code{prior} should also be \emph{an equivalent output} from an \code{{emmeans}} function based on a prior-model (See \code{\link[=unupdate]{unupdate()}}). \item \code{prior} can also be \emph{the original (posterior) model}, in which case the function will try to "unupdate" the estimates (not supported if the estimates have undergone any transformations -- \code{"log"}, \code{"response"}, etc. -- or any \code{regrid}ing). } } } \examples{ \dontshow{if (require("logspline") && require("rstanarm") && require("brms") && require("emmeans")) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf} library(bayestestR) prior <- distribution_normal(1000, mean = 0, sd = 1) posterior <- distribution_normal(1000, mean = 0.5, sd = 0.3) si(posterior, prior, verbose = FALSE) \donttest{ # rstanarm models # --------------- library(rstanarm) contrasts(sleep$group) <- contr.equalprior_pairs # see vignette stan_model <- stan_lmer(extra ~ group + (1 | ID), data = sleep) si(stan_model, verbose = FALSE) si(stan_model, BF = 3, verbose = FALSE) # emmGrid objects # --------------- library(emmeans) group_diff <- pairs(emmeans(stan_model, ~group)) si(group_diff, prior = stan_model, verbose = FALSE) # brms models # ----------- library(brms) contrasts(sleep$group) <- contr.equalprior_pairs # see vingette my_custom_priors <- set_prior("student_t(3, 0, 1)", class = "b") + set_prior("student_t(3, 0, 1)", class = "sd", group = "ID") brms_model <- suppressWarnings(brm(extra ~ group + (1 | ID), data = sleep, prior = my_custom_priors, refresh = 0 )) si(brms_model, verbose = FALSE) } \dontshow{\}) # examplesIf} } \references{ Wagenmakers, E., Gronau, Q. F., Dablander, F., & Etz, A. (2018, November 22). The Support Interval. \doi{10.31234/osf.io/zwnxb} } \seealso{ Other ci: \code{\link{bci}()}, \code{\link{ci}()}, \code{\link{eti}()}, \code{\link{hdi}()}, \code{\link{spi}()} } \concept{ci} bayestestR/man/density_at.Rd0000644000176200001440000000170714542333405015614 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/estimate_density.R \name{density_at} \alias{density_at} \title{Density Probability at a Given Value} \usage{ density_at(posterior, x, precision = 2^10, method = "kernel", ...) } \arguments{ \item{posterior}{Vector representing a posterior distribution.} \item{x}{The value of which to get the approximate probability.} \item{precision}{Number of points of density data. See the \code{n} parameter in \code{density}.} \item{method}{Density estimation method. Can be \code{"kernel"} (default), \code{"logspline"} or \code{"KernSmooth"}.} \item{...}{Currently not used.} } \description{ Compute the density value at a given point of a distribution (i.e., the value of the \code{y} axis of a value \code{x} of a distribution). } \examples{ library(bayestestR) posterior <- distribution_normal(n = 10) density_at(posterior, 0) density_at(posterior, c(0, 1)) } bayestestR/man/reexports.Rd0000644000176200001440000000103215052646230015473 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reexports.R \docType{import} \name{reexports} \alias{reexports} \alias{print_html} \alias{print_md} \alias{display} \title{Objects exported from other packages} \keyword{internal} \description{ These objects are imported from other packages. Follow the links below to see their documentation. \describe{ \item{insight}{\code{\link[insight]{display}}, \code{\link[insight:display]{print_html}}, \code{\link[insight:display]{print_md}}} }} bayestestR/man/weighted_posteriors.Rd0000644000176200001440000001663715005147105017545 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/weighted_posteriors.R \name{weighted_posteriors} \alias{weighted_posteriors} \alias{weighted_posteriors.data.frame} \alias{weighted_posteriors.stanreg} \alias{weighted_posteriors.BFBayesFactor} \title{Generate posterior distributions weighted across models} \usage{ weighted_posteriors(..., prior_odds = NULL, missing = 0, verbose = TRUE) \method{weighted_posteriors}{data.frame}(..., prior_odds = NULL, missing = 0, verbose = TRUE) \method{weighted_posteriors}{stanreg}( ..., prior_odds = NULL, missing = 0, verbose = TRUE, effects = "fixed", component = "conditional", parameters = NULL ) \method{weighted_posteriors}{BFBayesFactor}( ..., prior_odds = NULL, missing = 0, verbose = TRUE, iterations = 4000 ) } \arguments{ \item{...}{Fitted models (see details), all fit on the same data, or a single \code{BFBayesFactor} object.} \item{prior_odds}{Optional vector of prior odds for the models compared to the first model (or the denominator, for \code{BFBayesFactor} objects). For \code{data.frame}s, this will be used as the basis of weighting.} \item{missing}{An optional numeric value to use if a model does not contain a parameter that appears in other models. Defaults to 0.} \item{verbose}{Toggle off warnings.} \item{effects}{Should variables for fixed effects (\code{"fixed"}), random effects (\code{"random"}) or both (\code{"all"}) be returned? Only applies to mixed models. May be abbreviated. For models of from packages \strong{brms} or \strong{rstanarm} there are additional options: \itemize{ \item \code{"fixed"} returns fixed effects. \item \code{"random_variance"} return random effects parameters (variance and correlation components, e.g. those parameters that start with \code{sd_} or \code{cor_}). \item \code{"grouplevel"} returns random effects group level estimates, i.e. those parameters that start with \code{r_}. \item \code{"random"} returns both \code{"random_variance"} and \code{"grouplevel"}. \item \code{"all"} returns fixed effects and random effects variances. \item \code{"full"} returns all parameters. }} \item{component}{Which type of parameters to return, such as parameters for the conditional model, the zero-inflated part of the model, the dispersion term, etc. See details in section \emph{Model Components}. May be abbreviated. Note that the \emph{conditional} component also refers to the \emph{count} or \emph{mean} component - names may differ, depending on the modeling package. There are three convenient shortcuts (not applicable to \emph{all} model classes): \itemize{ \item \code{component = "all"} returns all possible parameters. \item If \code{component = "location"}, location parameters such as \code{conditional}, \code{zero_inflated}, \code{smooth_terms}, or \code{instruments} are returned (everything that are fixed or random effects - depending on the \code{effects} argument - but no auxiliary parameters). \item For \code{component = "distributional"} (or \code{"auxiliary"}), components like \code{sigma}, \code{dispersion}, \code{beta} or \code{precision} (and other auxiliary parameters) are returned. }} \item{parameters}{Regular expression pattern that describes the parameters that should be returned. Meta-parameters (like \code{lp__} or \code{prior_}) are filtered by default, so only parameters that typically appear in the \code{summary()} are returned. Use \code{parameters} to select specific parameters for the output.} \item{iterations}{For \code{BayesFactor} models, how many posterior samples to draw.} } \value{ A data frame with posterior distributions (weighted across models) . } \description{ Extract posterior samples of parameters, weighted across models. Weighting is done by comparing posterior model probabilities, via \code{\link[=bayesfactor_models]{bayesfactor_models()}}. } \details{ Note that across models some parameters might play different roles. For example, the parameter \code{A} plays a different role in the model \code{Y ~ A + B} (where it is a main effect) than it does in the model \code{Y ~ A + B + A:B} (where it is a simple effect). In many cases centering of predictors (mean subtracting for continuous variables, and effects coding via \code{contr.sum} or orthonormal coding via \code{\link{contr.equalprior_pairs}} for factors) can reduce this issue. In any case you should be mindful of this issue. See \code{\link[=bayesfactor_models]{bayesfactor_models()}} details for more info on passed models. Note that for \code{BayesFactor} models, posterior samples cannot be generated from intercept only models. This function is similar in function to \code{brms::posterior_average}. } \note{ For \verb{BayesFactor < 0.9.12-4.3}, in some instances there might be some problems of duplicate columns of random effects in the resulting data frame. } \examples{ \donttest{ if (require("rstanarm") && require("see") && interactive()) { stan_m0 <- suppressWarnings(stan_glm(extra ~ 1, data = sleep, family = gaussian(), refresh = 0, diagnostic_file = file.path(tempdir(), "df0.csv") )) stan_m1 <- suppressWarnings(stan_glm(extra ~ group, data = sleep, family = gaussian(), refresh = 0, diagnostic_file = file.path(tempdir(), "df1.csv") )) res <- weighted_posteriors(stan_m0, stan_m1, verbose = FALSE) plot(eti(res)) } ## With BayesFactor if (require("BayesFactor")) { extra_sleep <- ttestBF(formula = extra ~ group, data = sleep) wp <- weighted_posteriors(extra_sleep, verbose = FALSE) describe_posterior(extra_sleep, test = NULL, verbose = FALSE) # also considers the null describe_posterior(wp$delta, test = NULL, verbose = FALSE) } ## weighted prediction distributions via data.frames if (require("rstanarm") && interactive()) { m0 <- suppressWarnings(stan_glm( mpg ~ 1, data = mtcars, family = gaussian(), diagnostic_file = file.path(tempdir(), "df0.csv"), refresh = 0 )) m1 <- suppressWarnings(stan_glm( mpg ~ carb, data = mtcars, family = gaussian(), diagnostic_file = file.path(tempdir(), "df1.csv"), refresh = 0 )) # Predictions: pred_m0 <- data.frame(posterior_predict(m0)) pred_m1 <- data.frame(posterior_predict(m1)) BFmods <- bayesfactor_models(m0, m1, verbose = FALSE) wp <- weighted_posteriors( pred_m0, pred_m1, prior_odds = as.numeric(BFmods)[2], verbose = FALSE ) # look at first 5 prediction intervals hdi(pred_m0[1:5]) hdi(pred_m1[1:5]) hdi(wp[1:5]) # between, but closer to pred_m1 } } } \references{ \itemize{ \item Clyde, M., Desimone, H., & Parmigiani, G. (1996). Prediction via orthogonalized model mixing. Journal of the American Statistical Association, 91(435), 1197-1208. \item Hinne, M., Gronau, Q. F., van den Bergh, D., and Wagenmakers, E. (2019, March 25). A conceptual introduction to Bayesian Model Averaging. \doi{10.31234/osf.io/wgb64} \item Rouder, J. N., Haaf, J. M., & Vandekerckhove, J. (2018). Bayesian inference for psychology, part IV: Parameter estimation and Bayes factors. Psychonomic bulletin & review, 25(1), 102-113. \item van den Bergh, D., Haaf, J. M., Ly, A., Rouder, J. N., & Wagenmakers, E. J. (2019). A cautionary note on estimating effect size. } } \seealso{ \code{\link[=bayesfactor_inclusion]{bayesfactor_inclusion()}} for Bayesian model averaging. } bayestestR/man/model_to_priors.Rd0000644000176200001440000000205114542333405016642 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/model_to_priors.R \name{model_to_priors} \alias{model_to_priors} \title{Convert model's posteriors to priors (EXPERIMENTAL)} \usage{ model_to_priors(model, scale_multiply = 3, ...) } \arguments{ \item{model}{A Bayesian model.} \item{scale_multiply}{The SD of the posterior will be multiplied by this amount before being set as a prior to avoid overly narrow priors.} \item{...}{Other arguments for \code{insight::get_prior()} or \code{\link{describe_posterior}}.} } \description{ Convert model's posteriors to (normal) priors. } \examples{ \donttest{ # brms models # ----------------------------------------------- if (require("brms")) { formula <- brms::brmsformula(mpg ~ wt + cyl, center = FALSE) model <- brms::brm(formula, data = mtcars, refresh = 0) priors <- model_to_priors(model) priors <- brms::validate_prior(priors, formula, data = mtcars) priors model2 <- brms::brm(formula, data = mtcars, prior = priors, refresh = 0) } } } bayestestR/man/pd_to_p.Rd0000644000176200001440000000424614677702255015112 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/convert_pd_to_p.R \name{pd_to_p} \alias{pd_to_p} \alias{pd_to_p.numeric} \alias{p_to_pd} \alias{convert_p_to_pd} \alias{convert_pd_to_p} \title{Convert between Probability of Direction (pd) and p-value.} \usage{ pd_to_p(pd, ...) \method{pd_to_p}{numeric}(pd, direction = "two-sided", verbose = TRUE, ...) p_to_pd(p, direction = "two-sided", ...) convert_p_to_pd(p, direction = "two-sided", ...) convert_pd_to_p(pd, ...) } \arguments{ \item{pd}{A Probability of Direction (pd) value (between 0 and 1). Can also be a data frame with a column named \code{pd}, \code{p_direction}, or \code{PD}, as returned by \code{\link[=p_direction]{p_direction()}}. In this case, the column is converted to p-values and the new data frame is returned.} \item{...}{Arguments passed to or from other methods.} \item{direction}{What type of p-value is requested or provided. Can be \code{"two-sided"} (default, two tailed) or \code{"one-sided"} (one tailed).} \item{verbose}{Toggle off warnings.} \item{p}{A p-value.} } \value{ A p-value or a data frame with a p-value column. } \description{ Enables a conversion between Probability of Direction (pd) and p-value. } \details{ Conversion is done using the following equation (see \emph{Makowski et al., 2019}): When \code{direction = "two-sided"} \ifelse{html}{\out{p = 2 * (1 - pd)}}{\eqn{p = 2 \times (1 - p_d)}} When \code{direction = "one-sided"} \ifelse{html}{\out{p = 1 - pd}}{\eqn{p = 1 - p_d}} Note that this conversion is only valid when the lowest possible values of pd is 0.5 - i.e., when the posterior represents continuous parameter space (see \code{\link[=p_direction]{p_direction()}}). If any pd < 0.5 are detected, they are converted to a p of 1, and a warning is given. } \examples{ pd_to_p(pd = 0.95) pd_to_p(pd = 0.95, direction = "one-sided") } \references{ Makowski, D., Ben-Shachar, M. S., Chen, S. H. A., and Lüdecke, D. (2019). \emph{Indices of Effect Existence and Significance in the Bayesian Framework}. Frontiers in Psychology 2019;10:2767. \doi{10.3389/fpsyg.2019.02767} } bayestestR/man/unupdate.Rd0000644000176200001440000000222114765755712015306 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/unupdate.R \name{unupdate} \alias{unupdate} \alias{unupdate.brmsfit} \alias{unupdate.brmsfit_multiple} \title{Un-update Bayesian models to their prior-to-data state} \usage{ unupdate(model, verbose = TRUE, ...) \method{unupdate}{brmsfit}(model, verbose = TRUE, ...) \method{unupdate}{brmsfit_multiple}(model, verbose = TRUE, newdata = NULL, ...) } \arguments{ \item{model}{A fitted Bayesian model.} \item{verbose}{Toggle warnings.} \item{...}{Not used} \item{newdata}{List of \code{data.frames} to update the model with new data. Required even if the original data should be used.} } \value{ A model un-fitted to the data, representing the prior model. } \description{ As posteriors are priors that have been updated after observing some data, the goal of this function is to un-update the posteriors to obtain models representing the priors. These models can then be used to examine the prior predictive distribution, or to compare priors with posteriors. } \details{ This function in used internally to compute Bayes factors. } \keyword{internal} bayestestR/man/hdi.Rd0000644000176200001440000002547515005147105014220 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/hdi.R \name{hdi} \alias{hdi} \alias{hdi.numeric} \alias{hdi.data.frame} \alias{hdi.brmsfit} \alias{hdi.get_predicted} \title{Highest Density Interval (HDI)} \usage{ hdi(x, ...) \method{hdi}{numeric}(x, ci = 0.95, verbose = TRUE, ...) \method{hdi}{data.frame}(x, ci = 0.95, rvar_col = NULL, verbose = TRUE, ...) \method{hdi}{brmsfit}( x, ci = 0.95, effects = "fixed", component = "conditional", parameters = NULL, verbose = TRUE, ... ) \method{hdi}{get_predicted}(x, ci = 0.95, use_iterations = FALSE, verbose = TRUE, ...) } \arguments{ \item{x}{Vector representing a posterior distribution, or a data frame of such vectors. Can also be a Bayesian model. \strong{bayestestR} supports a wide range of models (see, for example, \code{methods("hdi")}) and not all of those are documented in the 'Usage' section, because methods for other classes mostly resemble the arguments of the \code{.numeric} or \code{.data.frame}methods.} \item{...}{Currently not used.} \item{ci}{Value or vector of probability of the (credible) interval - CI (between 0 and 1) to be estimated. Default to \code{.95} (\verb{95\%}).} \item{verbose}{Toggle off warnings.} \item{rvar_col}{A single character - the name of an \code{rvar} column in the data frame to be processed. See example in \code{\link[=p_direction]{p_direction()}}.} \item{effects}{Should variables for fixed effects (\code{"fixed"}), random effects (\code{"random"}) or both (\code{"all"}) be returned? Only applies to mixed models. May be abbreviated. For models of from packages \strong{brms} or \strong{rstanarm} there are additional options: \itemize{ \item \code{"fixed"} returns fixed effects. \item \code{"random_variance"} return random effects parameters (variance and correlation components, e.g. those parameters that start with \code{sd_} or \code{cor_}). \item \code{"grouplevel"} returns random effects group level estimates, i.e. those parameters that start with \code{r_}. \item \code{"random"} returns both \code{"random_variance"} and \code{"grouplevel"}. \item \code{"all"} returns fixed effects and random effects variances. \item \code{"full"} returns all parameters. }} \item{component}{Which type of parameters to return, such as parameters for the conditional model, the zero-inflated part of the model, the dispersion term, etc. See details in section \emph{Model Components}. May be abbreviated. Note that the \emph{conditional} component also refers to the \emph{count} or \emph{mean} component - names may differ, depending on the modeling package. There are three convenient shortcuts (not applicable to \emph{all} model classes): \itemize{ \item \code{component = "all"} returns all possible parameters. \item If \code{component = "location"}, location parameters such as \code{conditional}, \code{zero_inflated}, \code{smooth_terms}, or \code{instruments} are returned (everything that are fixed or random effects - depending on the \code{effects} argument - but no auxiliary parameters). \item For \code{component = "distributional"} (or \code{"auxiliary"}), components like \code{sigma}, \code{dispersion}, \code{beta} or \code{precision} (and other auxiliary parameters) are returned. }} \item{parameters}{Regular expression pattern that describes the parameters that should be returned. Meta-parameters (like \code{lp__} or \code{prior_}) are filtered by default, so only parameters that typically appear in the \code{summary()} are returned. Use \code{parameters} to select specific parameters for the output.} \item{use_iterations}{Logical, if \code{TRUE} and \code{x} is a \code{get_predicted} object, (returned by \code{\link[insight:get_predicted]{insight::get_predicted()}}), the function is applied to the iterations instead of the predictions. This only applies to models that return iterations for predicted values (e.g., \code{brmsfit} models).} } \value{ A data frame with following columns: \itemize{ \item \code{Parameter} The model parameter(s), if \code{x} is a model-object. If \code{x} is a vector, this column is missing. \item \code{CI} The probability of the credible interval. \item \code{CI_low}, \code{CI_high} The lower and upper credible interval limits for the parameters. } } \description{ Compute the \strong{Highest Density Interval (HDI)} of posterior distributions. All points within this interval have a higher probability density than points outside the interval. The HDI can be used in the context of uncertainty characterisation of posterior distributions as \strong{Credible Interval (CI)}. } \details{ Unlike equal-tailed intervals (see \code{\link[=eti]{eti()}}) that typically exclude \verb{2.5\%} from each tail of the distribution and always include the median, the HDI is \emph{not} equal-tailed and therefore always includes the mode(s) of posterior distributions. While this can be useful to better represent the credibility mass of a distribution, the HDI also has some limitations. See \code{\link[=spi]{spi()}} for details. The \href{https://easystats.github.io/bayestestR/articles/credible_interval.html}{\verb{95\%} or \verb{89\%} Credible Intervals (CI)} are two reasonable ranges to characterize the uncertainty related to the estimation (see \href{https://easystats.github.io/bayestestR/articles/credible_interval.html}{here} for a discussion about the differences between these two values). The \verb{89\%} intervals (\code{ci = 0.89}) are deemed to be more stable than, for instance, \verb{95\%} intervals (\emph{Kruschke, 2014}). An effective sample size of at least 10.000 is recommended if one wants to estimate \verb{95\%} intervals with high precision (\emph{Kruschke, 2014, p. 183ff}). Unfortunately, the default number of posterior samples for most Bayes packages (e.g., \code{rstanarm} or \code{brms}) is only 4.000 (thus, you might want to increase it when fitting your model). Moreover, 89 indicates the arbitrariness of interval limits - its only remarkable property is being the highest prime number that does not exceed the already unstable \verb{95\%} threshold (\emph{McElreath, 2015}). However, \verb{95\%} has some \href{https://easystats.github.io/blog/posts/bayestestr_95/}{advantages too}. For instance, it shares (in the case of a normal posterior distribution) an intuitive relationship with the standard deviation and it conveys a more accurate image of the (artificial) bounds of the distribution. Also, because it is wider, it makes analyses more conservative (i.e., the probability of covering zero is larger for the \verb{95\%} CI than for lower ranges such as \verb{89\%}), which is a good thing in the context of the reproducibility crisis. A \verb{95\%} equal-tailed interval (ETI) has \verb{2.5\%} of the distribution on either side of its limits. It indicates the 2.5th percentile and the 97.5th percentile. In symmetric distributions, the two methods of computing credible intervals, the ETI and the \link[=hdi]{HDI}, return similar results. This is not the case for skewed distributions. Indeed, it is possible that parameter values in the ETI have lower credibility (are less probable) than parameter values outside the ETI. This property seems undesirable as a summary of the credible values in a distribution. On the other hand, the ETI range does change when transformations are applied to the distribution (for instance, for a log odds scale to probabilities): the lower and higher bounds of the transformed distribution will correspond to the transformed lower and higher bounds of the original distribution. On the contrary, applying transformations to the distribution will change the resulting HDI. } \note{ There is also a \href{https://easystats.github.io/see/articles/bayestestR.html}{\code{plot()}-method} implemented in the \href{https://easystats.github.io/see/}{\pkg{see}-package}. } \section{Model components}{ Possible values for the \code{component} argument depend on the model class. Following are valid options: \itemize{ \item \code{"all"}: returns all model components, applies to all models, but will only have an effect for models with more than just the conditional model component. \item \code{"conditional"}: only returns the conditional component, i.e. "fixed effects" terms from the model. Will only have an effect for models with more than just the conditional model component. \item \code{"smooth_terms"}: returns smooth terms, only applies to GAMs (or similar models that may contain smooth terms). \item \code{"zero_inflated"} (or \code{"zi"}): returns the zero-inflation component. \item \code{"location"}: returns location parameters such as \code{conditional}, \code{zero_inflated}, or \code{smooth_terms} (everything that are fixed or random effects - depending on the \code{effects} argument - but no auxiliary parameters). \item \code{"distributional"} (or \code{"auxiliary"}): components like \code{sigma}, \code{dispersion}, \code{beta} or \code{precision} (and other auxiliary parameters) are returned. } For models of class \code{brmsfit} (package \strong{brms}), even more options are possible for the \code{component} argument, which are not all documented in detail here. See also \href{https://easystats.github.io/insight/reference/find_parameters.BGGM.html}{\code{?insight::find_parameters}}. } \examples{ \dontshow{if (all(insight::check_if_installed(c("rstanarm", "brms", "emmeans", "BayesFactor"), quietly = TRUE))) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf} library(bayestestR) posterior <- rnorm(1000) hdi(posterior, ci = 0.89) hdi(posterior, ci = c(0.80, 0.90, 0.95)) hdi(iris[1:4]) hdi(iris[1:4], ci = c(0.80, 0.90, 0.95)) \donttest{ model <- suppressWarnings( rstanarm::stan_glm(mpg ~ wt + gear, data = mtcars, chains = 2, iter = 200, refresh = 0) ) hdi(model) hdi(model, ci = c(0.80, 0.90, 0.95)) hdi(emmeans::emtrends(model, ~1, "wt", data = mtcars)) model <- brms::brm(mpg ~ wt + cyl, data = mtcars) hdi(model) hdi(model, ci = c(0.80, 0.90, 0.95)) bf <- BayesFactor::ttestBF(x = rnorm(100, 1, 1)) hdi(bf) hdi(bf, ci = c(0.80, 0.90, 0.95)) } \dontshow{\}) # examplesIf} } \references{ \itemize{ \item Kruschke, J. (2014). Doing Bayesian data analysis: A tutorial with R, JAGS, and Stan. Academic Press. \item McElreath, R. (2015). Statistical rethinking: A Bayesian course with examples in R and Stan. Chapman and Hall/CRC. } } \seealso{ Other interval functions, such as \code{\link[=hdi]{hdi()}}, \code{\link[=eti]{eti()}}, \code{\link[=bci]{bci()}}, \code{\link[=spi]{spi()}}, \code{\link[=si]{si()}}. Other ci: \code{\link{bci}()}, \code{\link{ci}()}, \code{\link{eti}()}, \code{\link{si}()}, \code{\link{spi}()} } \author{ Credits go to \strong{ggdistribute} and \href{https://github.com/mikemeredith/HDInterval}{\strong{HDInterval}}. } \concept{ci} bayestestR/man/rope.Rd0000644000176200001440000003101015052646230014404 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/rope.R \name{rope} \alias{rope} \alias{rope.numeric} \alias{rope.data.frame} \alias{rope.stanreg} \alias{rope.brmsfit} \title{Region of Practical Equivalence (ROPE)} \usage{ rope(x, ...) \method{rope}{numeric}( x, range = "default", ci = 0.95, ci_method = "ETI", complement = FALSE, verbose = TRUE, ... ) \method{rope}{data.frame}( x, range = "default", ci = 0.95, ci_method = "ETI", complement = FALSE, rvar_col = NULL, verbose = TRUE, ... ) \method{rope}{stanreg}( x, range = "default", ci = 0.95, ci_method = "ETI", complement = FALSE, effects = "fixed", component = "location", parameters = NULL, verbose = TRUE, ... ) \method{rope}{brmsfit}( x, range = "default", ci = 0.95, ci_method = "ETI", complement = FALSE, effects = "fixed", component = "conditional", parameters = NULL, verbose = TRUE, ... ) } \arguments{ \item{x}{Vector representing a posterior distribution. Can also be a \code{stanreg} or \code{brmsfit} model.} \item{...}{Currently not used.} \item{range}{ROPE's lower and higher bounds. Should be \code{"default"} or depending on the number of outcome variables a vector or a list. For models with one response, \code{range} can be: \itemize{ \item a vector of length two (e.g., \code{c(-0.1, 0.1)}), \item a list of numeric vector of the same length as numbers of parameters (see 'Examples'). \item a list of \emph{named} numeric vectors, where names correspond to parameter names. In this case, all parameters that have no matching name in \code{range} will be set to \code{"default"}. } In multivariate models, \code{range} should be a list with another list (one for each response variable) of numeric vectors . Vector names should correspond to the name of the response variables. If \code{"default"} and input is a vector, the range is set to \code{c(-0.1, 0.1)}. If \code{"default"} and input is a Bayesian model, \code{\link[=rope_range]{rope_range()}} is used. See 'Examples'.} \item{ci}{The Credible Interval (CI) probability, corresponding to the proportion of HDI, to use for the percentage in ROPE.} \item{ci_method}{The type of interval to use to quantify the percentage in ROPE. Can be 'HDI' (default) or 'ETI'. See \code{\link[=ci]{ci()}}.} \item{complement}{Should the probabilities above/below the ROPE (the \emph{complementary} probabilities) be returned as well? See \code{\link[=equivalence_test]{equivalence_test()}} as well.} \item{verbose}{Toggle off warnings.} \item{rvar_col}{A single character - the name of an \code{rvar} column in the data frame to be processed. See example in \code{\link[=p_direction]{p_direction()}}.} \item{effects}{Should variables for fixed effects (\code{"fixed"}), random effects (\code{"random"}) or both (\code{"all"}) be returned? Only applies to mixed models. May be abbreviated. For models of from packages \strong{brms} or \strong{rstanarm} there are additional options: \itemize{ \item \code{"fixed"} returns fixed effects. \item \code{"random_variance"} return random effects parameters (variance and correlation components, e.g. those parameters that start with \code{sd_} or \code{cor_}). \item \code{"grouplevel"} returns random effects group level estimates, i.e. those parameters that start with \code{r_}. \item \code{"random"} returns both \code{"random_variance"} and \code{"grouplevel"}. \item \code{"all"} returns fixed effects and random effects variances. \item \code{"full"} returns all parameters. }} \item{component}{Which type of parameters to return, such as parameters for the conditional model, the zero-inflated part of the model, the dispersion term, etc. See details in section \emph{Model Components}. May be abbreviated. Note that the \emph{conditional} component also refers to the \emph{count} or \emph{mean} component - names may differ, depending on the modeling package. There are three convenient shortcuts (not applicable to \emph{all} model classes): \itemize{ \item \code{component = "all"} returns all possible parameters. \item If \code{component = "location"}, location parameters such as \code{conditional}, \code{zero_inflated}, \code{smooth_terms}, or \code{instruments} are returned (everything that are fixed or random effects - depending on the \code{effects} argument - but no auxiliary parameters). \item For \code{component = "distributional"} (or \code{"auxiliary"}), components like \code{sigma}, \code{dispersion}, \code{beta} or \code{precision} (and other auxiliary parameters) are returned. }} \item{parameters}{Regular expression pattern that describes the parameters that should be returned. Meta-parameters (like \code{lp__} or \code{prior_}) are filtered by default, so only parameters that typically appear in the \code{summary()} are returned. Use \code{parameters} to select specific parameters for the output.} } \description{ Compute the proportion of the HDI (default to the \verb{89\%} HDI) of a posterior distribution that lies within a region of practical equivalence. } \note{ There is also a \href{https://easystats.github.io/see/articles/bayestestR.html}{\code{plot()}-method} implemented in the \href{https://easystats.github.io/see/}{\pkg{see}-package}. } \section{ROPE}{ Statistically, the probability of a posterior distribution of being different from 0 does not make much sense (the probability of a single value null hypothesis in a continuous distribution is 0). Therefore, the idea underlining ROPE is to let the user define an area around the null value enclosing values that are \emph{equivalent to the null} value for practical purposes (\emph{Kruschke 2010, 2011, 2014}). Kruschke (2018) suggests that such null value could be set, by default, to the -0.1 to 0.1 range of a standardized parameter (negligible effect size according to Cohen, 1988). This could be generalized: For instance, for linear models, the ROPE could be set as \verb{0 +/- .1 * sd(y)}. This ROPE range can be automatically computed for models using the \code{\link[=rope_range]{rope_range()}} function. Kruschke (2010, 2011, 2014) suggests using the proportion of the \verb{95\%} (or \verb{89\%}, considered more stable) \link[=hdi]{HDI} that falls within the ROPE as an index for "null-hypothesis" testing (as understood under the Bayesian framework, see \code{\link[=equivalence_test]{equivalence_test()}}). } \section{Sensitivity to parameter's scale}{ It is important to consider the unit (i.e., the scale) of the predictors when using an index based on the ROPE, as the correct interpretation of the ROPE as representing a region of practical equivalence to zero is dependent on the scale of the predictors. Indeed, the percentage in ROPE depend on the unit of its parameter. In other words, as the ROPE represents a fixed portion of the response's scale, its proximity with a coefficient depends on the scale of the coefficient itself. } \section{Multicollinearity - Non-independent covariates}{ When parameters show strong correlations, i.e. when covariates are not independent, the joint parameter distributions may shift towards or away from the ROPE. Collinearity invalidates ROPE and hypothesis testing based on univariate marginals, as the probabilities are conditional on independence. Most problematic are parameters that only have partial overlap with the ROPE region. In case of collinearity, the (joint) distributions of these parameters may either get an increased or decreased ROPE, which means that inferences based on \code{rope()} are inappropriate (\emph{Kruschke 2014, 340f}). \code{rope()} performs a simple check for pairwise correlations between parameters, but as there can be collinearity between more than two variables, a first step to check the assumptions of this hypothesis testing is to look at different pair plots. An even more sophisticated check is the projection predictive variable selection (\emph{Piironen and Vehtari 2017}). } \section{Strengths and Limitations}{ \strong{Strengths:} Provides information related to the practical relevance of the effects. \strong{Limitations:} A ROPE range needs to be arbitrarily defined. Sensitive to the scale (the unit) of the predictors. Not sensitive to highly significant effects. } \section{Model components}{ Possible values for the \code{component} argument depend on the model class. Following are valid options: \itemize{ \item \code{"all"}: returns all model components, applies to all models, but will only have an effect for models with more than just the conditional model component. \item \code{"conditional"}: only returns the conditional component, i.e. "fixed effects" terms from the model. Will only have an effect for models with more than just the conditional model component. \item \code{"smooth_terms"}: returns smooth terms, only applies to GAMs (or similar models that may contain smooth terms). \item \code{"zero_inflated"} (or \code{"zi"}): returns the zero-inflation component. \item \code{"location"}: returns location parameters such as \code{conditional}, \code{zero_inflated}, or \code{smooth_terms} (everything that are fixed or random effects - depending on the \code{effects} argument - but no auxiliary parameters). \item \code{"distributional"} (or \code{"auxiliary"}): components like \code{sigma}, \code{dispersion}, \code{beta} or \code{precision} (and other auxiliary parameters) are returned. } For models of class \code{brmsfit} (package \strong{brms}), even more options are possible for the \code{component} argument, which are not all documented in detail here. See also \href{https://easystats.github.io/insight/reference/find_parameters.BGGM.html}{\code{?insight::find_parameters}}. } \examples{ \dontshow{if (all(insight::check_if_installed(c("rstanarm", "emmeans", "brms", "BayesFactor"), quietly = TRUE))) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf} library(bayestestR) rope(x = rnorm(1000, 0, 0.01), range = c(-0.1, 0.1)) rope(x = rnorm(1000, 0, 1), range = c(-0.1, 0.1)) rope(x = rnorm(1000, 1, 0.01), range = c(-0.1, 0.1)) rope(x = rnorm(1000, 1, 1), ci = c(0.90, 0.95)) \donttest{ model <- suppressWarnings( rstanarm::stan_glm(mpg ~ wt + gear, data = mtcars, chains = 2, iter = 200, refresh = 0) ) rope(model) rope(model, ci = c(0.90, 0.95)) # multiple ROPE ranges rope(model, range = list(c(-10, 5), c(-0.2, 0.2), "default")) # named ROPE ranges rope(model, range = list(gear = c(-3, 2), wt = c(-0.2, 0.2))) rope(emmeans::emtrends(model, ~1, "wt"), ci = c(0.90, 0.95)) model <- brms::brm(mpg ~ wt + cyl, data = mtcars, refresh = 0) rope(model) rope(model, ci = c(0.90, 0.95)) model <- brms::brm( brms::bf(brms::mvbind(mpg, disp) ~ wt + cyl) + brms::set_rescor(rescor = TRUE), data = mtcars, refresh = 0 ) rope(model) rope(model, ci = c(0.90, 0.95)) # different ROPE ranges for model parameters. For each response, a named # list (with the name of the response variable) is required as list-element # for the `range` argument. rope( model, range = list( mpg = list(b_mpg_wt = c(-1, 1), b_mpg_cyl = c(-2, 2)), disp = list(b_disp_wt = c(-5, 5), b_disp_cyl = c(-4, 4)) ) ) bf <- BayesFactor::ttestBF(x = rnorm(100, 1, 1)) rope(bf) rope(bf, ci = c(0.90, 0.95)) } \dontshow{\}) # examplesIf} } \references{ \itemize{ \item Cohen, J. (1988). Statistical power analysis for the behavioural sciences. \item Kruschke, J. K. (2010). What to believe: Bayesian methods for data analysis. Trends in cognitive sciences, 14(7), 293-300. \doi{10.1016/j.tics.2010.05.001}. \item Kruschke, J. K. (2011). Bayesian assessment of null values via parameter estimation and model comparison. Perspectives on Psychological Science, 6(3), 299-312. \doi{10.1177/1745691611406925}. \item Kruschke, J. K. (2014). Doing Bayesian data analysis: A tutorial with R, JAGS, and Stan. Academic Press. \doi{10.1177/2515245918771304}. \item Kruschke, J. K. (2018). Rejecting or accepting parameter values in Bayesian estimation. Advances in Methods and Practices in Psychological Science, 1(2), 270-280. \doi{10.1177/2515245918771304}. \item Makowski D, Ben-Shachar MS, Chen SHA, Lüdecke D (2019) Indices of Effect Existence and Significance in the Bayesian Framework. Frontiers in Psychology 2019;10:2767. \doi{10.3389/fpsyg.2019.02767} \item Piironen, J., & Vehtari, A. (2017). Comparison of Bayesian predictive methods for model selection. Statistics and Computing, 27(3), 711–735. \doi{10.1007/s11222-016-9649-y} } } bayestestR/man/figures/0000755000176200001440000000000015054305242014616 5ustar liggesusersbayestestR/man/figures/unnamed-chunk-10-1.png0000644000176200001440000012175015054272013020442 0ustar liggesusersPNG  IHDR `gPLTE:f:::f:f!!!!!!!333::::::::::f:f::ff:f:f:::MMMMMnMMMnnMnMnMMMMMMMMMff:f::f:fff:ffffffffffnMMnnMnnnnnnnnnnuuuuuuuuMMnMnnȎ:f:ffffېnMnff:ff:f۶ې۶ȎMȎnȫې:ېf۶f۶۶۶۶nȎȫcKKKscKcKKssscKcKssKcsscssfDfݙqq#YEUWVu,SdF= ڿ                           u7OOe]80_]{eumiF@zi`.q|{Suv W2Gv yU=;3rPͮ=׈]{~;0S}o;5_}wy)Hl8: kgH\P`HOiileﻈx @طW\sU}l{™nePQ;z`I'=c>?, @&zȾٱ{m9Գs`? vxkDS@;Co&ȁ.sgKDNz/01Go2G +/ȁ}]PC}w[Ll鸁vc9TF3{>7|&eUwq{&`^\2=:j`{i؋3".AQ}2G vN~영xINnv03w]9Ա{`Isuřm7g8b` mk8b`ᆪ<]D8|`_Ҿu {g\O_>=}w}w/?t3=y]r'Ǻ/f`b7\8yvھ\uT\9x`7Y߬{1ׂTǁak؟|5/qf`zػ{{{ۧJֿ~Q;: }7y0t&sWUqvGڼͯz={gxo`WQ :t`_vǂϖ΃l3%ן>/Δ4~M 'ޯ{ 30Eۗ:4/[앯_TJ*Cs`_uK):p`_y䫼#ԝtG{拵g'/ om`_Uf`cT=n.NܹOpa&iDy+Kbn]rKi>#$l`rG؃T"$(tJWߘo>Gf`F:d̓xkq`#fgHIQR/V'{2w8042%jvף>f`FT0ͥ{n30Ic*ӭ@7Uo[lD)S¿?y?# 80S425E^>y-W\ܚOs)W;~޿Ͽ[\юG/a&F<8@0 $t aBd:0!G2F)#@HʔLf $eJ32%q`I8@0 Lt aBRd:0!)S2F)#@HʔLf $eJ32%+qHN##@HLf $H3x$Z`I8@0 Lt aBRd:0!)S2F)#@HʔLf $eJ32%q`I8@0 Lt aBRd:0!)SI<8@0 $t aBd:0!G2F)#@HʔLf $eJ32%q`I8@0 Lt aBRd:0!)S2F)#@HʔLf $eJ32%+qHN##@HLf $H3x$Z`I8@0 Lt aBRd:0!)S2F)#@HʔLf $eJ32%q`I8@0 Lt aBRd:0!)SI<8@0 $t aBd:0!G2F)#@HʔLf $eJ32%q`I8@0 Lt aBRd:0!)S2F)#@HʔLf $eJ32%+qHN##@HLf $H3x$Z`I8@0 Lt aBRd:0!)S2F)#@HʔLf $eJ32%q`I8@0 Lt aBRd:0!)SI<8@0 $t aBd:0!G2F)#@HʔLf $eJ32%q`I8@0 Lt aBRd:0!)S2F)#@HʔLf $eJ32%+qHN##@HLf $H3x$Z`I8@0 Lt aBRd:0!)S2F)#@HʔLf $eJ32%q`I8@0 Lt aBRd:0!)SI<8@0 $t aBd:0!G2F)#@HʔLf $eJ32%q`I8@0 Lt aBRd:0!)S2F)#@HʔLf $eJ32%+qHN##@HLf $H3x$Z`I8@0 Lt aBRd:0!)S2F)#@HʔLf $eJ32%q`I8@0 Lt aBRd:0!)SI<8@0 $t aBd:0!G2F)#@HʔLf $eJ32%q`I8@0 Lt aBRd:0!)S2F)#@HʔLf $eJ32%+qHN##@HLf $H3x$Z`I8@0 Lt aBRd:0!)S2F)#@HʔLf $eJ32%q`I8@0 Lt aBRd:0!)SI<8@0 $t aBd:0!G2F)#@HʔLf $eJ32%q`I8@0 Lt aBRd:0!)S2F)#@HʔLf $eJ32%+qHN##@HLf $H3x$Z`I8@0 Lt aBRd:0!)S2F)#@HʔLf $eJ32%q`I8@0 Lt aBRd:0!)SI<8@0 $t aBd:0!G2F)#@HʔLf $eJ32%q`I8@0 Lt aBRd:0!)S2F)#@HʔLf $eJ32%+qHN##@HLf $H3x$Z`I8@0 Lt aBRd:0!)S2F)#@HʔLf $eJ32%q`I8@0 Lt aBRd:0!)SI<8@0 $t aBd:0!G2F)#@HʔLf $eJ32%q`I8@0 Lt aB]>˧Oᮯ_{:p$a&fdeONOm_~ӿmF 䌫L2ӧ_?}oq 30=*wodחɾt` LШEkzW7~|3i߶f`T ?rʣ/Levh#0S4x4/{?n4{woω\mtP Lјqgўbi]mtP Lш˧O9ko-:pa&F8@0 Lt aBRd:0!)S2F)#@HʔLf $eJ32%q`I$'@HLf $H3x$q`Id`[0>1RԎ l`L}m`,SLQ;n`56L\2 l a>l`[0:!SԎdE L1E LV00q!SԎdE L1F$h6N2U? LW00uv7 fN60]Al`ɂvma4l FNZ F0 le*f l`b Xbvmac3G l %`qa6a)7 f&/`qa6a)?f_[X 1†qWBv,d l ^ qa>n`[0&VȎ +SX!;~qfWBv,d6H2UG>!b6,b l`9? IDAT" t;~Q a`'qa6;]ki l`" t;n`16`` A0?-lpe+`Ǐ60a_0X qa6;a`[ smaXD+S`;n`06`` ` l` 츁 L[0؀}xﲯ-l`ⅹӾa,)x7fg`hǏzpxa6{u&.^ l}x6-l`ⅹa d:n`/6p>@ki l`/\k!VB .\;l F!VB qpa6F260ul`Xe -\ l`…Zw60p}`[0Zԅ 4b)hᆵ l`ࢅ9þ6a B)h7ɋf Uآu-CzZ@K0'O).V l &HK8-l~ɕ'P' $6 [_8g$Cu l\, qB 0s%BuW긁 d 9^)>W긁 !P7l98WL3:u`[0yœAށmaÐ)KxopHϻ l@;˼ l:e.>n` s7 vqEȿ8B@V l`{B]G%O[%R l a6> _]FgJ8Rg6L0z†q`g͙ؗG5ZB1ʴ6fO={ dۛ6_@'œ ,9랉62~?/>}q72 ŋ7Tdsz\Q Q:^p`[0œSɁmaQך>|Ջ>UҼJouo_짧{z6Н0ge`k(ßx_>G+=io뿾x+t l`MeYS)xqߝi_bE@gœ Lwv?{7ϟf/R;~u: l`Mel54w?ylg {o[?% !y6 O2ͮyU5g}c;ƬxȞ:~ l+aNq@ l`Mݼon>9бzo UZ00c3rzЎL@Wœr2?bu-J l+aNa`s` _vs:J[--laΫ쾶axqRW͞ۀ6Й0e`zz^ޘwq$ۭ6徸 ib"> l`]O7iΔΓ,.[/rYK ֬u|@wœ Vן?Y^~.OSeof+/=l9 79+XGf@Γϒd?O29.`ܾ†iJl 2\v{?޺ww^;ޜ9?܏go" 79#XOj.ivd<ɝ͛\־bQ:n`Ys>62}s׮Ke{[=k(7<9XWLg#yҎ7gW~tJ l`]2eOaË\{?_nA:^z`[@nsmaD(@x}m`# LCQn`, l0踁 #6'mU;60yp>Փ|O$Fl A=x†ae`m| l`CerGu60gd`z(k;2G0 9'CΚɠ60d`z(&O9x†6 J2͞|)/F l;a6/ӷKLy1:n` sV6=fvqN2 vc_0zœS/A5:K/†6 Ie~@œ lL\ >tsgJv<60{pn^w~PY!:n`Ys7M<67ݑ@œ{6 Hw̕kk79a&Q!i`[0rœ l2踁 &;Y@œ l5oA]]60gd`i0"t}ma!}k z#B ll`[i8"tX!6z#B ll`[i8"tX!627 M7-lama`Uxջ''O'B l aT/zD踁 d#y~t{&a7ˏΧhrLn`[zw \uޏ7F l a6զRO~zghGzEx†Qla(zس"W-;~gj7,9߃l wd`Ys>6N60c`۪DZKd`Ys>6E]Ks8B1m=M߼+ۭ߁max s6l ̒nvC1m*{o2>'"#tl l`[e)9yeǿz60b`z)Ƀ w< l a6Gzx†1<azzni7|9˃^v?~O?7{_0nœ6 BWk%sXW/xL2/7 0︁ ~ l~{6[0>-l;YkZTOk~~\;n` s6Т2⹕ěd;n`s.6Тx~; V#t| lE2}vO Wwma sNl lܺ﹇w~ l;aZ-eƯ/oА︁ t%̙@e:{m^W; {_ю'= t$̹ZطwBy.~+ l Es?xmaC|4;OYvzkkFEs?x}m`C|tw﹐{_@Gœ lM2{*dveXOEV: lM2ݼv#~x ;n` sn6ЦX.;rwp*† l +:Fb`sv6ЦX}z<;n`sv6NV6Ѝ0gg`m d;n`sv6NV6Ѝ0gg`m d;^g`[0œ]maCtv7n9;hc`'qF36v7n9;hc`'+ la jwa496Wt`v|†6UL_+jGU l`絁 a]W?Z/y(60N|z-GO@>2>;UVƑ l`x絁 aX 9sH l`t*۬^_?j†jG-l2(lf͙>FD5!@juLu c"9^׏U;^ן0&œCu Q6w|˫'WvP60œCu Q4wO9Txqݨ jFG2ݽ$O6 0S{Z/T- COm`s6B?[w j/j`7;m`hsB?6a49zح2}'ǔV㵇Rd&̝K[?8E;~ɃKj^*v l Q"2؟@,6DTL>w?jw}PW؟0lœkb8ZDݰ.aw6 0T{W/:nv2QsNwZGNf`7j aΩP;+7nG_ QsNgZGɻ$3@ Q; F#n:Nv2{!f`:.v2{!f`:.v2{!f`:.v2{!f`:.Eo~wMU{Uu9ګzECRF3q?TWZLsW:v7b(M3>xM1c`'34a7b8N[_y^S s;qH2{18=W1~7J`K0*`;P l1*`;P l1*`;P l1*`;P l1*`Jqwyݑ7QIQjO Юf+}g3HT{Qot9ڋzC3*+aΨP( dJGJ37T: @Jqwyݑ7QEQj/ λ$3* R2K{aZɊv|ԛT sAa``'{zKpza;  dRp{a7fw {A]d`_(v~ ^{Oo+v5޽>ӏͩ޵4:K;=9@ϫouZͧE:E[9@tiٓ/w IDAT+ anQ@mz(Ͷs"k)32W9@.U5ݢ΁ڔ/ӬۧDH{Ed"̝^-M25{OyӁث anSH- ^UHysgt:GhKDtcM#!̝m EO9nԯy&ҵ4i^{L)v$>s=;zzfؑ ]nSHa`ϮV|<ɰU7;@ӭ @}xiFțO#oמ kOvp^ݐ|GDծ=U9@6I)ݮʡZ+7.<4/{o<qA=i`r(N9S]C(Vݞ.ݠԞ aΦnWP- d*D=U9@ ;ʱljO `[ܵK]UЅ0gS{IPX*cv uU*{]cl3 U*f`'3U9 uUNf`or0V `P`07T9)j] ,aNS{HR`[*[7nܸVޥ!?x!K=w)v0>s=w)v02oaX=w*v4Ӄ;;!wwdaOy;zbG(N=xS}ڼ>6Ωhwr4;z*GCfI9W=UGgUgn50wS{FVh[z(͓Eg/sa`oq4,3zGRL'"}xw,Y[j aޣ64_}>;k m50wT{EQpʗ)IǕggJ^*s28@QGlg`/N3Y}Vr -50wT{EQpzثgGg%ORpsGW5߁}q_g`oq<+zE[yQS8@wQOl}gG/~\p} m50wS{DSxzM'MV>;@=q< =jI'I{#zb(J=xW$7>-7^P0wzzbH)ϸAʃ{; @Yko轊 Y//^4Yvb(LW[?Ct#ԞϗpPOf`Ps7*`E?/՞ϗpPnr _<Z*#a|\mi{d. Ѝ0wR{>'pT =ivۭ6Iuz)?*@GELM7B|yS)?*@WAL_߳>ޥt&ǫ=STtƍ 7 < ?6oxaPOEػTss Xw{w\C=X v2{ p aΣvNa.Ɋv|I +a൷sbHPn`?wRu;,@Vœko$ ;SS$0ػ;.ػ;. du=\q d`'3w3Kv2{ ={ cō7;g`rcԞΉ?0tՕY;g`:#^Ή?0jg%O/>ʇ{wo>s{`Xs p2n[S%~Пrw< =?0@G|9Q8K~ƍkL^ӥ?zӾޣt#ǫ`s}Y}]y+;yO6{ Ѕ0wP{9 }fSGV9*jN_Wv< ?2@Aᜪ#,Pyf,pdvg:oC9Uxݜ#t!z?2\H40.|3\5_v<> anNPž; nw[|Վ'|Lюzڻ9Y# v=xݜؑ.S~`cYj9}f=3Nqu}6Tث9"rWX3d{e'u3lM`6 ?s \>% ϽnOn&  9.U&~w?Jb:7M<.gcFlNW|42`9M^ڐ^*#37؉9] ?z&^G(>r6+Ԯ(3ߺb/hF3 p9jWglؙVo]ȣ/_Wx5׮ry6JLg<ɟa|zL' ;p9jWŇֿٙ<#^7u/:v4Q|8lcVfg??66ݝϽ;ð!yacThpB;};ep 7oڞnݩ7w(>*6 Վ1X+3=_?v7w(>J6Ԏ1X+3=yCڃwO9So&R|:ḷՎQOX)3Ym۳ "hOY>>t絛y\G pN~=ogn6𽗾W㎞>xxd'l:@n6w^; pNJ>屳=q{GxN컄MƜ@`d~o!NڽӝxN컄MƜB`՛M7Gn=b[1_v0Uz>M^=[_g Vz>l̗cp#Gw+=B6J#\|^/PxBX6+ծ ؿw/+I |ڵ|#"w`o 0jcGD| _ 0jcG}I4߲w)gH1l׫˗(<" 0߿v^Zѯ I |ڱ|#_\,ԧk4g`c^XHq;G=>g4g`c^VHa;W< 4g1gP/RxF@δz*ocX*>>NVHlnP.O>07;Q!ln,NyTLؐ+٘3yTLؐ^;N6$J6 w^;/6$N& 9ÝN˄ % DaCdcpS2aCz d䘪T6 j O N`'ة O HecΠv)_`v2T6 j O N`'ة O HecΠv)_`v2D6 j N`'+䋕|ڝ|c G>汽Oz=ɳvcR٘W/VvLt}y7ʎ Hec^NX1 d;Y1l׫+;&X:L`+;' z;re ' te$1_v&_`Jь te,NWvNp;Y>{3ras8T~cɗ pJ`'# !lP L`6({A'v2=Bؠv )6?`nH% XA‚d;bA`gPl ;ΠE ,N&3(6@`v " MXC rd;b@`Pl;Ρ%9 ,N&(6B`vF! ,XEbd;b#@`gQl;΢Y!,Ny 6C`yn†v2G yJ`'yXG av2G yJ`'+\ܘ4 2v&O`gRj;Τ LJ B`'ؙ">I!Rd;RS's)5EXL`Rj@v. ! \JMhΥa!v2M1ٔ#,N&)5Fu;Rcev6N`gSj ;Φ lJA`'#8O9"dѝhas,d;'l|K$  ,(l@;(l $ycF ͓3*5HXL`gTj@vF K JMhΩ$av2SIM9$,N&s*5Ii;R 뷄BVpc~~`+lF`'Y%4UQd;B&*4JXL`gUh@vVF hΫ,};Ϋ, ' hΫ,};Y>޽[F0(1/"6d" † ,,l0;,lb† # † ,,l0;'9Vh@<1Bӄ d;B%s+4MhN&s+4M];Bӄ d;B%+4NhN&+4NY;B d;B%+4NhN&+4NY;B d;BZ%+4OhN&+4OU;B d[Ny*1?@:L`(3PU;@Bv2@V e mBϭt IDATΝ׮ނ ,BY`#d;@@E " a@`G(,N&#MX!l;'9F(3QQDi;Qf@v2# d;Dm!ʌZ& eF I`(3RhN&C)&He;Qf@v23 d;FM1& ef 4I`(3ShN&*"P];Rf@v2Cv d;H-A % e H`)3ThNwv6Uu6†dڽ[\Tۘl`L`G *:%l ;6Vq;LXavbϥ+Рbv"c$+4K`'qhSd*L`)2W=;NBv2\8E v"#,4J`'؁ hTd(L`*2X9;PBv2`@E v"Z##,I`'ؑLhTd&L`G*2Y5;RBv}knaVlc^d`+lN&Ch6ZhN&Ch6ZhN&ch6[hNIh'9*2[hNVj-ИR ! `% E`+1\hN&.pE;Vb@[vÅ d;ZMJL$ h% 4E`G+1]hN&.tA;Wb@Kvd;\-J# p% D`+1^hN&Õ/x=;Y>~p+m*1/86\B`' I` /L`' I` /L`' h. l0̓ (1`X@Ckv2]@0F`'%0 ]B Ccv2]B %04F`'+. LhGFZ }|}>~?e^5؟y捯8̿_>vs_6HX lJ?؏ w/=~:[`#M>9㿻/~֯~'7aP "M'4w_o^5'O^M<wнigGOVPN6GON8Ns' HiU)% upD`'J`^ԩGoy|| 죯\`#M:O/9:?o{g?sG-aBvvNO]Hkw6~oK\`#M9.vB+7&'5b ɦOŷܾYuf_8' 쵷^ hĄ{{zogݾYuf_>'s† 7%=s9p}|vuu =Xm'x{p?=p})'3_F~c OP3T\ѹcݲϮWSN` lƟK&/M~WߺK-֧_ dտƝcv{{W*I ll9HgBiWYޝe}' |PZ[}xt7}|n^o=z:S;yu^pw4x#K`~3G_{ 2PA y`l}\E$Ý]SBy;J\a#x*8~}E? +:aϼWL窐N`؛fxc /x7?yN]n<\ *i*'<<՟^zÛBϥy_{3vV7w<q;S`^$ݽ3PH6Oq< )?q8G Fi)קAf}gJX}w}'0::Y}k}m87's y icc} E>:Y}p oO` l^=%fw{Mi>|zRD>th=Z{C] \46yÙOw>k v,ֽڜ~ ')WPnz{\~g^EcOМpml̫>1x {?趩wqW??*"/< h(WPǫ*rQ0/Ns75OgN`߱\ݽ idc^]6|+V[7:!7q|_}Q׸?:=~6P`^)1O:`T7޽{udn%" -ևGb;9:-7ao] idc~w0JKD:>dz*i,~xw9`u?|KA=k>́}UDI{=hwq޽aQ_ ٞa41o/y}UD)_M?3?1xД'O8~%"3ԏox.O`g W+Yml?:^bwS[lK6ܾ/7m`n 949@>s@Ž^>7sGoY?WWk{|V:#y~1owr$rU؛¾=gO^{gU[Vg^yD`Dۅ#~Y>.ؼF]{zwoW}6'Mgu]76Uz~w>qÓ׊&'ru_Q,}¬O}"+3'o.Zо|{vm ?}|H|{?p8y˷w9}os{?:wE: 4Tl#zgok{v_~6߶z쏾d3:UD64w^G3iy]~.O7-wn?aϼu9GN_Dj` mϫ-/3rjw{vϓ6y"ؽUBsE {*"5ؗ)o;{ޟ^3g}JlPGC}˭v>YH`\&0eיzE@&7nk/No4 mhcc>LzaX܋_7黮k Z`C } ξa؝w-oiwVD=?ݟqs;j^K |a`?Ys>ϊ\<5rןݟq6P`6`Vb{RUDF[gat{}3{{z]Cag ν lhOm`Lhx=w\`O`wOZw(6R`NrlyG7{_z{d YɕrՍ56uOhfY>c=DWlu_wsԫVzys#>=ߴyI暊Z=]c{s/^ kac}'ǓJ?cs1QFug.|ϻ0AMD<ّ8лL#\}ݻς\g7'_1?x?qj\={sD W﮴v^_wtvC }]j\t'h{ߴJO s oӓƒ_Ϲ޼*E ln_Fsyulק|]w~m%橯ǟ;qp??y.gy}]$ozvWy^ ě" ol~t].7I}櫈|o͛^z:̗v>wC^hW}+|voM|l?𶿽O`w \/r޾Y)Qsߘ?Z}+|vWطo/M|wo? p}{UHzch..Bo>5$?ww?L׷{n2٫c";9_ۯX`ߞ¾ٕ͇__y{})_>ٯkd=(!NVd?~= wB|}1a%OtfWoyzX"wo̷l~O/F9d]7.gYc$|k3SzҺp?xˏ2:}-E"ئjuOK}_[ lG`'+ǎzK8f UuWhZ-OCt0/R>9}t{)!!NV.z {bm9 tNt,+y-J?g7>Oû{@~V3g.PNV*?zա3h߽=oi';ϾkC/(f>3m-?Cş}@?soO"zw6"/zwpg ឫo5HJ}w8ЂЍy{~pseWn=_JޘPM!9QRg''|>=[>q?L`W>r`v#v#fL`W>r=L`W>r`v#v3K`>s;L`>s`v3v%CfK`W>t9L`W>t`v%Cv%CfK`W>t9L`>u`v-Syv-SJ`>u7L`W>v`v5cYB;]6v`JlvCv2]M؁Մ z̛'lL`6w`v=asd%KS;d'+z<ؘÜ d$+<̙N&+<0OÜ d$k<̘N&k<0KÌ ds$=̗N&=0G| ds$=̗N&늞=0Cl d3$+>̕N&+>0?\ d#k>̔Ny톝UbcMM`'صM+]]avx킝sU`c~ ?̚N& ?0SÜ dKS;`.xؘ$ ̎/x0O;/x ^%L`<`n/̒N&' xS0G;fF`OA  d{=+s$ )^`^$/̐N& xyؓ0C;%fE`OCd{= k# i^`ND/̎NovBS`c)fJ`'['L`OD"#"l`v}vD"1?IVI`'.MrUf$~c؉bF`'S Ɉ]L`OF2!#v`fv2=̇utĮ0{BbE`'l ]L`OHBs!$v%`Vv2=! ̅ؕYĮ0{RbD`'SLI] L`OJR %v-`Fv2=)K̃صBnY [ `F7f=FZd{Z=1a3# i [ `>Ą-̌N&'&l1S0/;'9NLbs1?ㄮ̇N&&v5ؓ0;f@`ONrld{rb>==s! ɉ]`̅N&'v= 0;&O`OPLd{b:=E+ ]`̃N&(vEؓ$0 ;&N`OR,d{b6=Mks ;ݪ$ o"aks! i [`D- ̄N&)lM(0;E&O`OUت<d8QLY@`_(rU`v2=ULeTŮ 0a{"O`'ؓ,t \ \.L`O_S$/p}`v2= L .0A{&K`'sB9\!*L`A # p`v2= KL%<05{I`'Fy\#&L`D"#g"p`v2=LEIB.V05$l`v2=aLUil-01{6– &I`'N['",4<1u&%zc$| &H`'3P[(L`IJ"$n`rv2='q+L-0{V F`'سTtY[*L`KZ!%n`bv2=/qkL-0{f E`'sZD칉[-L`Mj!&n`Rv}\DDo;IB0 .l`Jv2=?aL) , IDAT-0 { &D`'sb9 [1̓(lŀ)$9 [1L`Rؒ g)l`2v2=Oak')l`*v2=Oak')l`*v2=SaT'g*l`"v2=Wa&*l`v2=Wa&*l`v2=[aT&g+l`v}n@en0 ;3p0;sr0;sr0;Y>ڴj ߘvv2=oQkT$-j:L`[ yZ;N`'3x@={j?! 1 :B v K*PnUȪB9;nA/~C(@`7+dYL`ʐu v+"yc @8OrlXB!;nko3$[PN&[p?즅,-!  gSa 춅-  †؍ Y\(@`'v\3a~7f][B;nV0;y v2݂anv" d[aX`&  E`/A C0L`V0+{"b dw†9J`' w^;>V0#EH;nAB`+l v}.)%%6F,'#`!N&[ fB`/HBC,tIe$†yޘOڕd+ Qv2݂V0 {QdɁa5v [a  Xl! l '&`!N&[0&%6L^v2݂qa,8d'v\aڢ7f=EWv ĆI˔!3L``|`+l0Pv .l %*CV;n%-ar_wH`' w^;>V0Q d†iK!L`0I{=d"q]偭a7f=m2BXaz7mvQҕ}!L`09{鲯>! \&F`/j?z;nm#IK^N&[pu`aJm+c% \vrt(dxw߮K#0z~Uv2݂ o`ɡY`O\ߦv vr*|c׿g~Lx;nACE{6 M`'-l9#nhSv}'9vr&zc>at /X# *Kwn2?2` L` *P^^V*@\Cy*v2݂Cq{>> N&[vr(L`/]ʦIv2݂CQ{6H  K}v2݂2m'Bҍ>R,,>~_;>T`ɡvP5nSXsv2݂rm'ҍԳ>Z`N&[P2N`/z B׎%(6r1HJ.Գ>dN&[P:mJ`/śzև r㝟t]HC䇹vPuŦv2݂:m# {s>tK`' xc{@fqM=v2݂m#es>`K`'˼׎%؛uޘOk%]6"Xv 2KmS0=n{%# L`/]M=Cv kNP{oN,NwK0õ7ӟAIWŒ d |*XtAzK% L0W=`YnX,N&[0^K{p4;Y}Ǹv|.{% "zc>tԳ=XL``⁽A ЦAbdk W=Рqfঞ"d9[]L{%׃ Z1\J olO`'-Q`ozAUs=hN&[0^쥫z*,>>Z;>`MޘOk%]7L>$ 872={&gz,>>Z;>`{<a7ӟAIׄ6<DZ\`e/w/}nÄ< !|7ӟAI7<H殱w3<;omK0zYfO[kz%3V`W?|7[-^C'6vP5M=Ój*??ΛmLJkL~/#|U;(Ѧ|4X_onzO/O<Λmh/!3gvP5M=k)')^g+;pͶجv|.{LߘO~ j%]6i,~xw?=Λm]؅؋\Af1{GXM/{ɽ{?(׳1 \`@M26d$ #  lH`O^7GA^;=6y&C4s60_߽gײuqko{o(l٘ ~+w{k5{}/ُ_3Q٘1Gص}b=`#m&Dyhgc.J`׶ϯ.Zw߂+ `חU7y&C8sy6PR͟ߊ|OmW~V#~ޝ)1y&C4s6X/v;vbs7;qkn5ߵs"&DyhfcK`Wv}%e7 :f(ͲlUؘc V?Gw7 __֠ՎM,<6v]9xR^'^͖5(m<Yyl̹ V߾H^'5(i Hg<4 1O97]M=>n [m_9<Y&svdn}#}y3AUؘk1g'H>^kݚ<Y"s~.*߿!3QUؘ1uy$WafokLf6jlv]U+Yn&DyhVacC`UxzO}6nLf6:lAv]^p /Zj߾(*lUؘ:L*yA?p3QUؘk1ؕ=q^=n ֠ճhϋܘ[Rn ΓtmoLf6ḻvuq/S>n ?Ʒ7y&C<sa6X(}r5>nLfq6ḻe/w__b T},le٘ lH`@F26d$ #  lH`@F26d$ #  lH`@F26d$ #  lH`@F26d$ #  lH`@F26d$ #  lH`@F26d$ #  lH`@F26d$ #  lH`@F26d$ #  lH`@F26d$ #  lH`@F2.kȑ~󋾻WշqZ7><_# ~]k~Ԕ G>{je>wg;n.B` K(j '>O{'z}K! $oxsׅ=ł|e 7wɤo\`/.w> om{G@EF&{u9}{)6pLy1 r~Nɔ8 {F$W`k@`6BW~'=Ǽ{~;>SnW'mK~loܸ'vz_ ǧq}c6p~ߣ_;luO?6~oO;ۿ>~.>vwm{vǻ=mG;C_s  $HҊu> '>Wjwx?KFdM?4m9\}ĶWuE>x'}'9/~؇d.}] ݧăL>6=G^Gosto>1@`r;ZࣷI<\bzsR釯a2xz~tJ{NRlDyo݃`㧗v<ٽm=^EzN _s  ֯9?/zlwA`]Swpb`o_&bC_ځΫz \m9_^=};t[5EL5"=>1݁7F``<޼[5s3߶^ \^/zѓ;x Kay#{wS~xemO`#Guy>}Ûm9G&Oa}.+<1}xvc}n?ا_P=2-o.wt>Ê~|g>@`H P}#w}ڽsG~;v׹9Eš]=ppr`O~I\WW}#{sȿy'Ǟ{5l?0}}́.*"zkom8PNM>苾{] Gx{3G&/nȣ 7~ۇ}z~Kx;?y;a6p+[z\NP%/`}/x߷ ]o|ܓ|!^oᓊ=s=?e}{flwϑÓ ;~tw lw÷>[b7y޷b}Go7+CyEs 3 ' #?Ï_>n*};tW*c6WD?w؛Nz{5}kzÝ\N<  lH`@F26d$ #  lH`@F26d$ #  lH`@F26d$ #  lH`@F26d$ #  lH`@F2$vIDAT6d$ #  lH`@F26d$ #  lH`@F26d$ # `g&IENDB`bayestestR/man/figures/unnamed-chunk-16-1.png0000644000176200001440000010470615054272016020455 0ustar liggesusersPNG  IHDR `gePLTE:f:::f:f!333::::::::::f:f::ff:f:f:::MMMMMnMMMnnMnMnMff:f::f:fff:ffffffffffnMMnnMnnnnnnnnnnMMnMnnȎ:f:ffffېnMnȫff:ff:f۶۶ȎMȎnȫې:ېf۶f۶۶۶۶nȎcfȎې۶M3- pHYs.#.#x?v IDATx$řjU вB 5g;-/#FYVVi-v~oUVFUDo<;M8\=YEd""  lH`@E*P6T$"  lH`@E*P6T$"  lH`@E*PQ^=} t)g lP @*C6Arv H١ 9;T`$g lP @*C6Arv H١ 9;T`$g lP @*C6Arv H١ 9;T`$g lP @*C6Arv H١ 9;T`$g lP @*C6Arv H١ 9;T`$g lP @*C6Arv H١ 9;T`$g lP @*C6Arv H١ 9;T`$g lP @*C6Arv H١ 9;T`$g lP @*C6Arv H١ 9;T`$g lP @*C6Arv H١ 9;T`$g lP @*C6Arv H١ 9;T`$g lP @*C6Arv H١ 9;T`$g lP @*C6Arv H١ 9;T`$g lP @*C6Aס=xj/{?76A֡Ƀ3l/l03?ۋ|~ۖ_x \`f^z-W_w6AաOg~l?w?u ̬:tsNb-/߶x-l @Yu?Y?o/kV /\ium߼A]!l̩C@.y{Dg} Ȝ:ƞ۷7:_Sʦ/z ~,a#l̩Co캾#{OgOSk=3߻^ps1Mps1w׻]S 6oueN:*/?Z?tO l̩C>EdWzO l̩C>׿x9 @9u7~`fա_[{H`vfաv]ؑn+7o>'2vzWGd۞"̪C7> O 9nu~rU6Aա7>?/|_n;D6Qա[o`_~?l̬C;7D "3O.\j摵E`d~у_]~ٿ~ !C6Arv H١ 9;T`$g lP @*C6Arv H١ 9;T`$g lP @*C6Arv H١ 9;T`$g lP @*C6Arv H١ 9;T`$g lP @*C6Arv H١ 9;T`$g lP @*C6Arv H١ 9;T`$g lP @*C6Arv H١ 9;T`$g lP @*C6Arv H١ 9;T`$g lP @*C6Arv H١ 9;T`$g lP @*8ޗX*8җ7D9;T`pq-9;T`p]urv@}-rv {Za{P !١k ١bZarvTI_Kl`@)k 얳C6EZaP @CZa;P @ZaP @C[aP x١юk lC6c -g lF:6pKst_+lজ*B^+lজ*N`+l*R_+l*A`-P ~ZaWP ^Z`WP ^5[arv`}K9;T`GV*V6p.g l5l ١A-Zarv`HF h ١ݚrv`v4k <tG`mP .MZa4k l:6C65k P vh.g l6t/g l$6t.g l6t.g l(6-g l6-g ln6-g ln$rv ZaCrvIZ`Crvi[aCrvZ`Crv[aCrvZ`Crv[aCrvЭ*&"6*g l l ١bZ`CrvઠVЧ*"6)g l l ]١Kq}-K9;T`p)06(g l.DP оVУ*8'P VС*8:C6K ١SuP Ft\oD0*؈nA&C6m=rv`-:D0*X.3L+g lΦ6t&g l'6%g l(6t%g lC6 l =١YP [aCGrv o`:9;T`]7E0*ӷD0*ӷE0*z][D 0*z][D 0*:[E 0*:[E 0*E 0*;D 0*];D 0*;E 0*;E 0*;E 0*zB=CL!g lEGC6@Ǣ#zP١c =(zp P Я#zxrvWtA=<@{9;T`+*{E\݊Gh-g lnE~#C6@y!Z١WDq%ԕC6@˸q١a\K8UP Ѕ.&z rvBt=@M9;T`t!:I*z]E%PS=⚢(g lDGqMc TC6@*١M\WhP _tW=@=9;T`]ĕE'POEqm TC6@z=\]P ^tW=@59;T`dE(PMEpC ԒC6@v5@P ]t 7=@-9;T`$MD*PIEpу TC6@r-DP \t 7=@%9;T`D+PGEp# ԑC6@n%HuP Zt=@9;T`D,PEEwp3 TC6@jN5P Yt7=@ 9;T`d] E-PCEWpKc TC6@b TP Xt=rvH,:ۊ]x9;T`]E/p*.֢8ZyEpk -g lC6@Z^١ m/zcP UtN zcP UtN zcP UtN!z#P TtN"z#P TtN"z#P StN#z#P StN$zP StN$zP StN$zP RtN%zP RtN&zP RtN&zP RtN&zP QtN'zP QtN(zcP QtN(zcP QtN(zcP PtN)zcP PtN*z#P PtN*z#P PtN+zP OtN,zP OtN,zP OtN-zP OtN-zP NtN.zP NtN/zāCP NtN/zāCP MtrP9;T`d]8PDn1C6@2ѭ"zЁP Lt놈t@9;T`$ݺ1G8LDnaC6@.ѥ$z؁P Ktv09;T`a8HDwnC6@*ѝ&zP ItƉy 9;T`d8DDWnC6@&ѕ(zCP HtF{9;T`$8@D7nC6@"э*zP GtƊ}9;T`(C6@х,zr9;T`](C6@с-zr9;T`'(C6@}.zb9;T`ݷ'(C6@y;S١ 躝)JP Et@rv"n zB9;T`$ݶ= @*nYPIDqk`9 uᢧml|Ğvl сxRС'߽hoݧ=-e/c lňŋ@ ؎-#O/z[Ы۱UD`,Et._ ء^~úGTX lZɅqm}a IDAT *ГO.w<_|s*Gqi=G`Onݦ)L:c~%e_oiXh~ߟ> -y=qbGi 0ןG/x\_-/l v9L6]8:o%}} [U8׻~G8;zĎN$&-`1ͫ[C=zUWz]6qzŽ.$,`οEd]ΗYiן l ;Lduwus?o>Gדvt1\ YcYM!_l /8ʈ4Sr֐n캾#znz"si?U@9uh-l pQ}ݾ439C/R~B_o 0Ⱦn^YH"ڽ{o=o*?/wi l tPdo?ӯ//K׭ ;Ji;Q@E/+"U׍ ;Ji:O@ɪE`=EjT lC&4ГwW;yx{=7~ȆM`grBCxϋUgnQ` 3j9Q@=yk_`Y`.4/&j(`>/W>ko~*(떅ɴ(p>Nbvt lvN& (uSDX 9k]}}5@;vU`n=}ǟ9(Լ{s /܍-U;:Hi6Q@QgZ? 36@E;f3['}g 줚Pj~|_]~"*I5)T; 줚Pjw~ڽ{?:i|M_'lB}OMlr`7T lUΰ5Piji8uα5ԱyÇ?N~~6Sa>$] ='@%9;T` dp>4)7%@59;T`c";FhJGPO@V{"X`tԌP tn_n)١];99 VϽ1llwn3U#I7)@-cϐyz젎&#zyx%ԑT2y_~u͖v "_n|2="` <`㛗?m Y> [`7+4f4CDyU?G-pc9 쳒~|! z=ݠ?ѕl9qN s(7;j{{#8{5p4c`O`_݂."B+ڽ*ś~-:A;r >ooL?9F~u{w@q0E3[؛g<|;lcv*=p 4&b￾xɧ߿| l =]9@ɏ/Oا{[`#Ghz"pq'GWy/J@7%q5EX#Oj__ luw)}*"p޽O_]!U nT9MLoYvb>*FedYP PbL1*JTF`H%v*SL#0$g l;)2C[vr%&  ١'?[=C l; 2Clk eyЯvHA`2<Ct[;Z` s`!c:&+ lvtzvDCtuP*%Y9<;n"1Ftfȋ" Ds6f8W{b&I`,Fmm`_ Pbή4K`@`x.!Oڃ~0ӯ= lvtr\rv(!j;*JN&0$g l; ١$i:*J쌚&0CO~koo J쌚&0dt|g_?Y^aI`"s?-g4C՗<~yE`)N`}|퍏?yA` ٗ סON|w'oELP`?;WO=i9Quo^=O6&E؛[5ُ sܰlxu۰6dӟ&[w0;p>!c؛ ח}+-X?+^9? r%7_t ,w'on|CF7;DNr5_||v ,W''p>!#N>m}5<7XI5O`>+i XD_O[O_nB!`^ /us&rcDn /uk&r#@6P`d`Mѡ٫f MK_ ΪٌCƿ*W{׳!"쬚(0dTnna^";8o>C l;f3 סOJ?u(#j6xuۋmD̎R`;k PF`jJ!;[ձ6@ V)t߹};!g6@ V)P Pq`GGf)0$g l;Fs ١ΫќCrv(!j4zkYӣE`hщٷ6s ӡ'ڕyGu, PB`'fR!;7߿y?[`S`dfL*0d_-/IY? [`1l{si}j3='n!s-i`O LSY ן\|b/{q}[8 cGMf2xV?}燿oޞ}ar0j=R`7dV!C'Nv7laj0F]Pna7bVAC'ǻS~AmW+fs lh1} ž-9\I_-[fZL+0dd&9ž `/}nŴCNQoou,W#|0 [P_`^!GuX `9ش`^!nQ?&l PB``^!Á=z5G ANK+0hw~jv_HNC6; O,0D` ? @KTY`@`gW}f!]lvvg"UY`U`Gg%jO-0d8W+ NC ^lvz"hQɥS ;WU[`HL4ΩCy?ig^z?DL^€ޝbQ6ؑX!0{Wyn!3[v*ԝ\`N5to%K]O4_,_@f l;+ .;+ lzƁCx?L`@zvjuP_WSuv!<m0yfu6Lgg l3l:*JT]`H%ZvtNrSP PB`wCrv(!Psz!9;T`]9*J>Ԝ_`@%vtKEГq%J>T_`ȘKl~$5 l22W;/T#JNT``~.e(!;Qq%6?6@]lUo!:.{۱6@ ݋z3 ۡ'~"[`ؽ7=yEc|;(!{Qoe/c li$VMԪ00Cc l l}}jS 8CnǞV REK u_KUb`O/ca#A`т?\)xo_?^5 H$}#v}ǩ5CNw_u<(qrE[-ؗWfj10'g7_iql[ Hn%G5';C{cƯ^;2.h@n ځݲ>R9w-rE[L_/)}H2?^rI Tc`ȸ378~K>l[% Ho!'%`$Ɯyrkȶ^^`,W.Hՙd`HQ`w"\#u&2>w;zN(!;Rg!c{_Vp*%vt@2$Fܾ5d6@ ݓ* ١IYӡ_)(!{Re!v[[?z^$JTe`/9~~}I`^5"]1J` Rc!zzju;[h6@ +52ԡW8q%vWjL30dCOoNve6@Pa!dO_?5 2/l?5 C%*vt3R١PlC6@ ݡ*+*g lCUVt vt1R~>^4Ql!=v-6U;)Ti *}J@`n١QܖC6@ ݥJ%g l5;)Vk7P PB`wn١RܔC6@ ~_~*/Mt v- ͜Hf+gh d>6l2} Qm le9 Sa,D\7W~iW];8g.nEB[BU [RiK]Zea;`R<vzKjmR\K׉-"pU9.xܥ넁} Qo We0ܖ̜Wp<ܼ!\r_piW-hY.y8ۤ l T\DWkϿ#J^U\DqzW6@c;:9TU\סwύ/l PB`w2΍ǧB='ƾ3]"Veӡ߼z~0קl[5pnL>ٶdkT(!Usgtm77{|!U Jэ.$Ԉݤ{OfGD`S#:t[[? PB`BN;> PB`wJ6Fn U]I/9ރ"KхQ.%`mcnq{vσ(!{Vw-O1"7=ׇl"gut1}yU:@gutd`/yS_% >H0'<:YWw>׿w>LlkW01}U^M y``*&@`pT`GUA\ k/^>|E^OнtW?`-q8p>{/}KtأT_PлtW?`uV؃/(]Z/ 켁8Wt.eb-2K_QIAߎN'2K^QIAF_'N~x{ t'%q`Go#/~ߋfR^0k 6Bxk^ lv,*٨}W/lv,*٘=y4=;/ށ6@ ݻ z6C77{Zw{o6[_i}p%v,*٘}:^Ӭ~| lvtRMeѡ"/n~|rӦ_lx`G%vZ,+~sWw/ͫ}("bYAFt:v\qs ͆Gv PB`wŲ~ {Wnf_.2;^e* 쫷E^ݑC`80,,Ua`_>Fd֛6@ M%Wl,6Mjsn[l_ȕ'̎(!iWcx>}% $6+ :5CU}vzӻُVFaCE:tҫ;/+8cRlhTnnaޯ>O? 6*סէ7wu("QP.S PA*kGc;=~vu$ Tr~G!8#'38\4>;kOޘG`݃V {XyjLݢshuAr^4:裯 <[Z-/OΫFC` %ޣjp9I_w {X)l|7=s]\ ݢc {P}8yW|x [`(lAgFuM!Mcߙ. PMD`؜jƠ+c:ɶ ]#o8 6@ ͩvk 2CmY- P8;F.2Ɉݤ{OfGD`؜jȠ'#:t[[? PB`s";> PB`sJJ IDAT*""+ 1K`#@3-ccnq{vσ(!9rA?t1"7=ׇl"tA7Fu:o#JA`sB^Maϛ7?E, PB`sB^Гw^z};|;`{D6@ ͹ zC׏c2؜kРN@jԠͅK 0دs``n.5C 13m!e#P裘XQ`G`=gcn/6A΋HWGDǴ ުb伆wqj{E_= {Ƌ zϾ\xAJ."G<}I@+WtU$jڠ/"'߽;/%::t׽ޮr^E>כ~q#P#j[^nȫm=cxys`G ^n߸ȓ>|xfpD5۵^oިjw~xl?Q6@n?&|Av::X_Ɠӏ6@ Ud7C77o9%64_qܘ][Wg=G PB`sMɍG[Sl;D%ГnYoya%6׵_sڈ]oCddz Jlk 5 kR;l Fvt1 d6r6h6@&Xuا~^OA`0t^[[? PB`sӡw؅y\` l"c;աwʬr뾞6@MS;HkTn,_M^+)5C7Dn Pfd`G7dAV#;Id5Cv]_#{~6@ M,k_*{d`G_k4⊲yl_ڸUJ~*O)&Z|шK-3wk(uF[ n q@}-k&Z|-"`y l}J^_3⃌\S?;Hjrtѹ7}}T^(!bMay闋il PB`Tա_5gG PB`TӡLjl7ׇl#;:0l6m&[@`d w~Q`l3lrv(?Sӭ@H&g l[K%6[M!*Jln B.c:~8%6M!}n3}`V/N`J` o&]o i" }M`v-B 莾E`i" [;3/F@`rA`oL! tI>3j6C]wz9 l6^|̀#,` xGX^=wҷW7>wL{[` |A =zC[` |A l6C&_t݁v+N` 4e١fK-g lA/IX*J c$,Z%6%,Y%6%,Y%6%,Y%vvt1,g l="%,W%6{D,KX*J1V)bYrP 0H`,UV .Da'>r"X!*]* 9sμt@ڻ׏}f  sl  t: sl.Ϣ^{X)쐅 9o RX +†)rs8 a 6Lsܘ}}E`G]M r856Ls!}}J_ l/1IC} r807{2kk 8F)pUaf 9q6V'4(g l);NP 0f ١` QC6=rT(lBkrvB`3FP 0f ١`N9D-9;T`L!%nB[rvB`3JP 0Y`Gu \Д*،H%9;T`L!'pBKrvB`3N"P 0fE -١`8L9;T`L!)rB;rvB`ߠ>ڑC6{͜YeaG.ShGSvtUV ١V5`=I'A9gsF`O]Њh{hF`rsVn?#vtO`rNcVN_O$'R\ VF9~UX7}=huR`9 &: kj†rV͏8N&/xB rz}f -96 W+4 g l);ި^rP 0f ١` C6)+T/g l);h@zP 0f ١` $ jC6ρ]n4!zBrvB`3IP 0f ١`6br9;T`L!&zBrvB`3MP 0>fD/Y[Sl&^P*wO`aKꖳC6>Ž^P*†!9;T`L!u,5 U١VȮNHVȾ:oaGYZy$a}v}wYe`+ls>¾[Ȕ=}F,L#ﳾ~)aHid;/_*ls:.W-T,82ga}=6EZXqd wLjb95o'AzG` -+g l)67^P*"zBrvB`su ժC?Ww/}w~B`su ժCG}_[?B`s CO=~o}l)67^P::{_M- 0& jUW~?֛̓z/6}<_}6D\TUA?蹅'C PFKTSny~c{)$(zBj{{o! (zBjM`<J`=PrL`L!3XL҅:ԡ'O]&Ǐ!`<]&†UCOa0DD`.ذz5u~q93&n/DxJ5u~13VF/E`$V{#^K]jN3.®b"]jMcE䣯]k kϘH@`У4}7 uU>5͇_$ß.a u1/vak =M&{:z㍇{. *Tl:zCƵodB~{D/`PUiOψ^SN/cH;D/aOUiT'7?]F.^¦6w^P~իۧ_ォC`)$0ԧ~b_{skSH;D/aOezO/NZ`i[H{D/bNmƭ)mI`sE թC?W>> lI`sE ١`] l:9;T` $]1&g lqv}$K2P 0ή6w^P*Fy#}2T&g lQH`s ١`<'z!Cerv16w^P*בNK꒳C6:s3X@RP 0ƾ|6IaP 0>l: +C6Oq$gE3T%g lH`e=@ؐ]S ٬W3$ؐ]s lXJ`CJP>DH`g-}@QeCJ.9|֒TF0d H`h-}-YIeCF4/zACEr*3r49G H`S@zU0$$ ;,#MKsV]Q l ^PlrH`SBjV0ؔ9;T`\qF5T#g laa$)"zYC-rvvE"5"g la]$)"zYC-rvvE"5"g lA']$)#zaC%rvtE26T"g lA'Y$)#zaC%rvrE2W6T"g l!Y$)$ziCrvrZEB6!g l!U$E6!g lgU$P ١`Y t5١`Y et]Ž^P*E^u١( q !g lΛH`/E`*P 98o"Ž^PK`C >lΓ蟞,ck6@YDvC65 , P pn=Z`ϩ6dC6ą!Rg#96kC6}z 9P u]}-YL`CY=GجYYfHCv}Y=H޾VجYIfDCFGR#+6+s$=I9LhhAl!NIfDC:=,"zC̈&9f ыef4I`8|6^%(3U\NG_eQr2 p|5 ^%g l#g#YB‡ 9;T`9/}>١P_k!FzG_31rv8p}5^#g lG^3!rv8p~53^"g lC#U9;T`<@ښD/C6 M9;T`<@D/C6 $O9;T`<@]}EoC6 $k_ ;z@*$D'!*$B}"EoX^{c H`!iaGoX^ {u~c H`WB`C9gFجU@naC9G صDY6*v-Rf}r6ڰ[} ]uR`B9皁 m@ f}r506Ćt{G`3mK9Llh*zrN6#!66Ȇfج?yEoXX&6ff[C6,z+rvx56s *^ l*`Q9;T`T$z3rvEoXRDoXRZ`3K١@`S  ١@`S  ١@`S  ١`Jl%`99;T`L),'g l)#Y@P ޤ5EG9ћC6zG`רKWћC6zG`WI`CrvnZ*+`19;T`ClͥL ]' 96\ʴؕЦN`Cv}k0{vf5r:*)L]TyRa9uR&fVجE9gCbff;sЙPN`/fjl5`9 uKZ=Do XFIgC2&Wf!{sP-s G`96jG`١X#YL%P ,&z{rvV,'zrvV,'zrv,(zrv,(zrvV,(zrvV,)zrvV,)zrv,)zrv,*zrv,*zrvV3( zrvVح7 ١XsG`7Є*u5wv_NP ӭ#ې'6P ukF)96Tf\͵#!9tc Dz\;YZa\gC:cG`"I]$sPN`/Hr=jv{l]`N9׋u6.0H*e:ESgCh7 (3ӡH7 (ܓ:[擳C6>&@P ]#i`69;T`sWl"DoM:撳C66u&DP ̝# o`.9;T`+sglbDoIܙ9fC6.f&F΁P ˽# u`9;T`rolDoG]9 {摳C6*wW&JYP ݑ#E7"g l`Mݰkw"g l`Mݬk<6)P Al7nU5_s la(8Y&А>4V@f 59 x4Odm $nW}-( 4!Q"qvkMF9 u#|_ l9 xc)ݰ*" IDATZ`PQhCH5 rB3BeG`)zAq9g!Aŕ Mm圅z +7PJ9 6F`+z#Aa9;T`P*o6wC6F`,z+AY9;T`Pn6C6 F`-z3AQ9;T`P,n6Ѣ7C6F`.z;AI9;T`kPm6ᢷC6F`.z;AI9;T`+Pm67C6F`/zCAA9;T`LMP W2m6RPN@~%F`S-P Wl65TPL@zEF`E-7C6,FT4lv.)baGo*(&xذ;-Rؙٰt]I#t4a )}kUkv"]naGo+(%l4a)}Tkv&f>.pL†p9Gs gNݾds60;tTkM9gבJWΥݾ$s8}t\kM9KK}-!t4a6ZQ#FrG dVi6Ո^PB@fF`SP $6Cl*fHMEw/g l 9F`S-١kT$zrvҚh65dp*f MM7-g l yF`S]w١jT%zrvg6Ufp*fM]7)g l rF`Sw١iT&z}rvrfv~]}Do5OPMUjF`gMv^j0[[aӶS`CqU fZ}"D6G΁}+7_ܚ,9.u4Odk\iYi)0]SF`'b_ lZsZ(K_WaƔٵ/6-9.u Y2;Z`ӰR@asF{ns^ (M_5d65tpS @yvUYCF`S]916ͼ#R[P d3olF9;T`̜1:EoI@*GVћnC6#V[P d2l9;T`,0jE?E@" 4^nC6 #XrvX`*u]a*<(BQa9;T`i,0{}: ١cUdkE8ѻ9E62"WŽކ0U!s74Z_4 tg+DxT_ j69AA`d|+P^Tش&p;}]E`QK}-iL v&KՋv{&9CA`cx.z3$9g8{Z,/EF$Up[ŋz9D6"^~)rNQ $`l!a*-.EH g l}K[١hޢ"i@rvhlZ)a*-.&DoK+g luv ١h"iCƄrvpl3a*-]-FDoM'g lmKWQE{F١h"ڌ ;zs(9;T`-[>Z6]iB@˖o>/ cP c+ hK 䜭FY*,6 ;z9Gfqv4,8,&ZaӂU2(i} )V}-i@`_jR,&ZaӀsU0( B1"j69V,&ZaSsU0H6A"iJ>krVc- M[7*\sJfJTlUaX*6EՊ1[P 4)V6ެ0(g lIa"iNn!9;T`-kMs+ ١hQ\l_a@@SE`Ӟ rv*EoY,g l="iPrvZ*EoZ(g l5"iQrvZ*!]}Do[$g l1"pAƅ rv)˺Ž޸pAöXtl.: SE]WuaG]s 8gAH\u 9nuOOtl.6u9mexgBB 6U9lU;[sLt 6U9kE5Au¦F9S^u!Rw_ l*s8]'Ea8s8g_T*M6Mp*öRtlT΁+FD#Mۢ72١hCt lD@dO`Ӹ rv%O6p$g l MD`Ӻ GrvZ$6͋p(g lAr@`Ӽ rv$6p g l~9rH`@g9;T`Ct]$g lz-rD`3fFÉ] {9;T`.cɺŽְC6+g4 D l: UDw DݳS9a'Y']!6t]a'~EC#f" CraujS9'ξ! nAtLWe_+lsk VL_/:@zlnPe_+ls VL^/?zlnRc_ ljs ]t~ܤƾV _%I$zXXt|dA`,*:= lR搳C6P@`KFgrvl]@M6{١Rts\$I'zn9;T`5.6Dow-g lB1@`OgrvC6DxV-g l>ѹ1D`STY*D MI)}ћ١56u;ѧRa'جeޖиB`SPWMaG{+V䷛_ghѡqSج^,ˇ lPA_+lsp+rxFsc"6uUf9 ! ݦzf 5f9Ƕ! ݢzf5f 9Ƕ! }ݞxf!5f~9 !5pbZa3[ܖpf]/st l Ft6J`2ї ١ 7جMłrv"D'6k} *pD_/-g l`yѽpD_1H-g l`qѵp E_3,g l`iѭpMǎjXشǒmmP&ZȇG_7+Pشƚm]P&Z$L+4ft' -/xTΙ.VhX> nt' )lR9 y+g9lf9GV%q6Za3#]ݴ&X`_+lfsK[ l¦#]yݮ(O`SVsc*ج\lru ,! ج]Մdrv] lJs$ rve lPC60(L`F4rvf] lE_["g ldYШ_j ~MՅ$r|M|LE ljǨG__!2Tǧ'̔_f¦59罐6`ao4Mźe[bS@qcNb4M[bs^Pr}a- {2TmV'1G^'x_n.9>VeTo#ė2TȚlҳ}YE_uhXΙ/6Ь*6 ّ١0V^FsNJl5?F_hR2srWC`Ӯ/Drƀ!wNjƱ]M*;GvU6ZԣD4'g 0fו4k~)(g tuشj$D:Ӻ*f-/%6LC}ú&- r!NeY]MӖ lX9k@KdI]MkH9@wXjRWD`U@ф*^qMVs<=}9cA`3k+ғy  "y1l]U.g+ f}B+<"jɎbQ 7έm&9?&Erb;9oC6YA.34YY3y"jOR~Q iMZsĦOT@C^g4m&?I=*FrƂ WuΜ3)*V3*6 Ȋ9lV$4j$$CkRd!Y?}Y9KB"qeiuf/gIH$n}V(D`N{#GrB6{ L64ӻdG_爗3$7lZ`^W; Đ塯3yPfkᎾ(gIH$.RNVجp`I(9sB`sG@VѸ"xF; H@`O9>~2Bro.%JlS]F,+ggպ{E z`O" j^"uM6ؓopG_YHP5.Wwo?@`'6<'26F`-=}wE"?ã_PXi l+&#E>S* Ó-l)vm]'+;&WO`-}R{b @-(v])NASx>%acWY S>dëW#u-hf3Ou0ѫЋrmOc/P_=g86'|0KMĊU__/{xxϧr׌}]MdyG? (;g{'.3ɼ?z#vWwݢxc F>e[:N]yA6:y{-/K+_o? :G`C[_ h[M/k|_x{gow^1/}TE`C}O@6|Nj<`O`u)6 }_@ tZ'U=p=_:.Fmm/?| 6KhK,_\Z Xz4a/A.Fm`K|9,_fR Ÿ0˗YԦ]D K< |iKmj r !\iK,_jSS`o>|\iK,_jSS`oqMM?j%Y46UC'\;"0˗YԦ$# = K< |iKm C/zzDra/ |MU懯~ի@.4a/+wOxmԢ޾[vPG_[O|ؽ@-j W>W>Q`/a P6$  l(H`=>zw?=5l]_]W(.@`>U?;_JO}~K6:=G|JOu}oo9 }!•/ܞ[W(.B`íCsB;<ʺQ\vimǽR6ѲgWlo~c/P_= 7< |N搳߄PUtu]= 7aߕC?"Fu^<$q< E5yש/Пc5z.PD~6|+>UؼwvP ]*&Vٿa7 Nm=xTe l(a%g-yd'U'.L`C ?~b~'':R3 Dʅe lp٥j2~\vm.Wd-ڸ wc ER? Fވ?N 5>4el'||;{+=5جjzr٥b'JaljT\vi۽,gY u8J\vi}8s6ǟa"64xeFlJ  ?|-B lߋgYa\vi[]{֟}'N }^h,ߞ~Dq٥nrlw%|」/{/pn8meflы (TM%88X@<]!F]>>m?o뭧|>\@<]!6~Jտߍ:O7?x\vi6$  l(H`@A P6$  l(H`@A P6$  l(H`@A P6$  l(H`@A P6$  l(H`@A P6$  l(H`@A P6$  l(H`@A P6$  l(H`@A P6$  l(H`@A P6$  l׻o>e~K9'}y!:_fy]vwrEd?7 l PT`wg}f~Ww/f+?.\6@Q#ψmSb _ G6(,)?_v,wQ/\`lo\P`_zFdE>{n„,ٖo/\6@QO?4 9oE6!+|FT`xE`ا~ݧ|]/h7?ҧ>a671w?}B͇KS8#N[aCP PT_`nUxa,w_:xO8ӏ+?z>Q}#'ψl1^z#{!?qf>ݓ>%;p<`G N9ϝ#ɇ|=2 }{wz~'xs뫳J?|G^[ dl`@=6@Q m?xq\`|~юɾPOlR쑁=x~Eko7{n_x: );xd`.b{Kv]=.<C`>>԰b/|3]:rD`=#sP P/#b`_>޽6|D@.}ï|;'?xԋWo}C/[`l,|W篾9]Dzq}r؉g/9& { ~a(Z`oC=tFg}Qqd>:ze< lo4@`u-wr|.#{ަ④q|C}WY+}0?.7~;ο{yg8'@=6@QWn`<6OR9.?.'heaE؃P PԘ> wWc0Qdq'>x_C`5Hܿ93|>?n ppq{F?ٱ/0w!0q" dц*vс/'ÿ~06ڗjȑ_SR{|Og )w>x%E]| gg;p~ӻ=G 7<_`e)⏎{TC`u5~+}{)꧿qm>}~ʑc?qqe'C׾Wǁ=Jj!؟Dzcon{t~_}W0=Ɨ96w>}e~ysN_(/Jl(H`@A P6$  l(H`@A P6$  l(H`@A P6$  l(H`@A P6$  l(H`@A+ k0yIENDB`bayestestR/man/figures/unnamed-chunk-8-1.png0000644000176200001440000012036715054272011020372 0ustar liggesusersPNG  IHDR `g|PLTE:f:::f:ffff!c!!!!!!!"c"c"d"""""d"""$c$$$$$$$$d$::::::::f:ff:f:f::LPLQLQLRLMMMMMnMMMnnMnMnMMPMPMQMRMMNPNQNRNff:f:f::ff:fffffffffffnMMnMnnnMnnnnnnnnnMMnMnnnȎ:ff:ff:fېnMnnMnȫff:ff:ff۶۶ȎMȎnȫnȫȫȫې:ېf۶f۶۶۶۶nȎȫccd d e c c d e!e! c d!d!e! c!d!ePQRfȎې۶QRde2Z pHYs.#.#x?v IDATx,Was-q<$ؙp,sEIf Y gc"^08edacY${DȯÀ!WoFGtu骮U{~/[]]~urDs2lH`@D"6D$ "  lH`@D"6D$ "  lH`@D"6D#9M|k^DʷYpi<7tt7=|1.3*n_ݱј]y{L}rr'; 쓓ݏO w''7t`w{GػΟK`oS/>41$G>N}]>1][`oK>UvO Np+ˤ+li|ǯ'{΁>!B}1&ݤt 0)2F5 ;p`~8olI9 WdD^=nC{GmՎSN=/&~rcmɻn)rí?qx-}_X7ZZy˷{;{5G9>7:;ßc}Ctk閞z9I2oЛX}zjK]?,6/^U~[~\hob5o»>+TvϱP=@Cw Nuun~;_F9 )"AߪR%L۞\f^ţ[ߞqq{lJq=o{t_CQ֎vϱD>Z}l=yb3[ ]c^Ӎ_/9 nVJ'j*xk|KGBe\eT5PsY@:]nY5rCOxJ=R,/ZQy>ZkTx^|gպXϱ]`g=Z_T7=0ݨsM#yڏlP^(]FX5յzFruLjvͪ+A9_\͸^?HY 4Uci!~9 =:2joz`Xvuٹ5 ʥ*IrVM;o_HZͪ+ڼXT_]ϰ^ݷOʓK~Ky}/um5Ի߸儮rd-j={7|lS<vPnbY+~o0æܶoJgm):r/*wqT{k;N2*P;%Z`vPOt;CW[o0{׊jEmeS75X}MV@[GJd*P;%Z`v#j8#;`XV9Zc5ݺ#3DBAV ?`7RT}cj OcM#F3Ơ;GM(U&3Dn{}n\hֻ [u }c44`Xel5Vm:kQAʢ!w5;)w^ed+'|mz-{OK`9 ^=Rێ=JsD*3DV.okB&}4v =cӨl Cۧ+2G2C]Q=c[h2ݟc8$w P|vVyHeHۢj V;{ 4 ]0қ(֟(. u^he*oMscnh ~:&:?)M\R!f2ogU^bѤ 챯"R>1~`i4]x`XjB?ו"mnLR}+c(kq4u1z`i[i )" {º:Cd6mې-&a`Mʈ{sm~M}`X_xieFWR'*=6:}Wƭjth揬R1z`ill^!SZx׷R(P\_ vjns4 9oXusح~[/0o4\QWO[H.]SDM^ G*Ol{O`o4_ d後[ѭ**?_/_mLjThvո`sL{~/{`X`)j;JR1yRM[M^MO6]`sL4{Oce9 c`>~qsu]쥋/_s5{o+'&]vڥnZnUDvM^feǪ9rh`z붫kg?ڛs "Jw۳ro|k\'Wur=ɭfmX{$ a|bq·&wՐsg:}c"Q?c:4KUմߥ&ZZ-k>?];.Dzt7`i>;Ech:#:,/Sz]Di;O05=~{swMn,Kwm{>;m~;0Cչ 퉦[90.xoT&q`z.tBd=;-~Mw`X[ 3-ƛ^7K*tvx_JogޞI{W\j.%h4stz#;y[sÛ~~bE/F{#K\>="nMw>No\ktػg~ֽ'1;_r}ح\x"0}v1C$Bw0;=v 0k}} `0k}vif[/#؛{YByeՆ[ 뛮x 3K`x哓K}!z'G;  lH`@D"6D$ "  lH`@D"6D$ "  lH`@D"6D$ "  lH`@D"6D$ "  lH`@D"6D$ "  lH`@D"6D$ "  lH`@D"6D$ "  lH`@D"6D$ "  lH`@D"6D$ "  lH`@D"6D$ " '>d|32̌OyVƕ"ʸ23>u^[*DL @"yv <;S`H)H$$gg lɳ36ٙDL @"yv <;S`Hq`g|32̌OyVƕ"ʸ23>u^[*DL @"yv <;S`H)H$$gg lɳ36ٙDL @"yv <;S`Hq`g|32̌OyVƕ"ʸ23>u^[*DL @"yv <;S`H)H$$gg lɳ36ٙDL @"yv <;S`Hq`g|32̌OyVƕ"ʸ23>u^[*DL @"yv <;S`H)H$$gg lɳ36ٙDL @"yv <;S`Hq`g|32̌OyVƕ"ʸ23>u^[*DL @"yv <;S`H)H$$gg lɳ36ٙDL @"yv <;S`Hq`g|32̌OyVƕ"ʸ23>u^[*DL @"yv <;S`H)H$$gg lɳ36ٙDL @"yv t{ܽݞ_nO`$:lcLJ>:@/[_r3_OƧЋ oО~OƧЋ V1. B{>3K2̌O㏭E5_l diϝT&3y՛abH(c?E63yե!3nWl}g>5n^xl{g=]1+vrn۷[Yy[~in^xuu_[l֯_zм=Թ/T7~ƧAc6O;xt2-ݸEW ;Ĵ 쭖nܸKjƍ3tæ"\_?^ݟ5-[[*Z;]aZ+./>%`nޙ.wV8:U8W爼_mد)gD & g6;}`rO5Oɭi[ۊY,U`WbineOo75Ťvz~|<^n?Bh1][sD|ͯ~m?SO} 2pcM1iqO\QoxrS93D"sk]\ h"_.7%:G& s2<&q`Vjz-^;A]W$/Wjz+5ffds`#z.5+;"2GdQث!_^XIߪʢeFo\ܯſ5?oj>oj`ŵOOO/Ÿ\x?=Ɵ?] nUz="*sDޱG5WC[7*sD^G9rrbb{.~?z grbRb?ټO/en읹X _"Go9"l~K5/&_2E$w9rV*/kl[E`}?r`zSҫ=nzߧ˻+lwf}z1]M s bH5^kï9(&د>W=Ra??\ϻ|~`0c%jr9Nn9" j`_t5k6K{V`Ōϴ OᏟ0v|/p WV3CsE>[{5E`/WXMd" e#_&R՟/W3K㋽/Z`76r1u90z̗w.fblş\ ^֞U;^)_0;..s&sD:6^串"in}J3& ]r;|eC _(a{9Gց~ui[~+Vbȳہ]]wB570FyvfƁݗM`/}=췔QdŠ{z`|-c۫}{{nbȟ<wc\:]7]`ٙ;M`W YNYvmjL o[EW2}1u1:߽ "#..f ať*m{u5ju`c{Ȫ]=P`XK_v1N鯬Ky"^ ^p_ZU7) gg J׫ c*ffrA=WW_Fk9}glra z!|^/b?9[o0vyvN9"`^6c_}se+M.}1]{ffqה{gV(.Rj cgg JU`p9a{9z5|mcKkl6n{s^b t뿬v9/͋#*ɕkH/K|./W]<W@q; IDAT '@P[{F\Z e/ymmlin1ʳ36ٙDL @"yv <;S`Hq`g|32̌OyVƕ"ʸ23>u^[*DL @"yv <;S`H)H$$gg lɳ36ٙDL @"yv <;S`Hq`g|32̌OyVƕ"ʸ23>u^[*DL @"yv <;S`H)H$$gg lɳ36ٙDL @"yv <;S`Hq`g|32̌OyVƕ9S͈bG^\6D2՘*3_o~f(#mC@yv S)Wk C) <;S`1+ }hGɳ3vs:<;S`n`Kl`L=ƾl LQ).%60=yv\ٙ{p[bӒggfc9.-)Eln,9ќzV#6XNcջTFrul Lb#|ܭ60yv֪!؃sV$ٙ{H` <;S`˝[agg <;S`ˇ-؃ <;S`e+ l Y)'60byvv( <;S`$Z`+l`̌{Seၭʳ3 |D`+l`L=qʳ3V(ٙ(h*#[a#gg l O)GF'ؽk[acgg % l L)qɳ3vRF%= qVٙg[a#gg ~8V`+l`dٙgg ٙS){0t`+l`yvNU' l " l SyvN] l !ɍ"60<;S`''\ٙvO2Sgg  ])k<;S`'6V@̌# l ,؉ l _yvNu W)jݾ<;S`պ}{ l *iN~[a}ʳ3vRW`gg ڗoOٙ;W`+l?yvNC l`~LR-6Л<;S`!{60?yvfƁéw[a}ɳ3vB]V@OLPٙ;.g`+lyvfƁ\ٙ;Nk`+lyvNS C)T<;S`'ӭx{l t ޾[aٙ;n+ɳ3v*{V@ryvNc l`~LJ?6Z)SX˳3v"]cW`ggfiOk H+؉tm] O)ںʳ3vSwV@JyvfƁTٙ;Υ;P`+l <;S`'=t60?yvN{ H'It[aٙ;+ɳ3v dpTL+ɳ3v TDL!;d`+l <;S`'pH l`~L!;h`+l <;3Nw$'Rȳ3v|LA+ɳ33T ܡ[aٙ;V`gg [aٙ;V`gg [aٙ;vLہq+ɳ3vl[aqٙ;V`gg lQʳ3vl8[a1ٙ;CV`ggfiNвI`+l <;S`Gvh l`~L١a;V@s\݇#+V`3\)yg[<+GV[a@g.f{7>/qd l`~&ЙW\̾~"l3^+GF[ag>_nl󔇮ٰwf1R s+/G6[a}gөKC֋z=%plŽ;6pwC[5#? }8`60?c"+W5N?g/ɱ;Vޙ3Dy~9-{ql l`~ޙ'wYqؑcl t7|vg"Z=(pt l`~ޙO{U]-:V@gċj\qXP"v7>+ cN]<-vG>, ċ5ݰE\3vH݃uޙ~*{g"[a݌3_XMj݃U`3l[|=8]' dYͬo~^!B l`~ޙ[\~+[`!] b읹7cy*{gǫ&[ ~E(׉:{g/i_ !\60?!;zZ!BN%6;s{u1T]]#ӋЭwf1GdH1. ; ީZgCo_`l"3r0,ϟbH;H`(·d`Y ))wf: |h@Й_>yD`(FN(6$:ljt߻h{3KpD[aٙxQU`gg EiIZɳ3$'Nj l gg E)V O)X' h!G<;S`-NN-6_)W60?yvfƁ[a{ٙhqruz}L}8*ɳ3"[a{ٙv$bU`gg cE)v˳3"'NJԪ l gg cEJU O)+U %GU<;S`)VN46C)+T60?yv>RPj`+l ,Gթ l ٙ82u}ڰ0*yvfƁԣet{Q73gg DI7+lA)R'ݼ-·A)-R60?yvfƁEHp`ZaKyv>JJn`7 yv>JLٙ(2U`gg T O)/S'؍}-<;S`%^N6`Ayv>JLٙ3U`gg cDT O)1Sٙ+u]/졿`$̌;©GT O)1Rۅf G٨5$GRbrYȳ33 Թ 0&yv>B@ٙ1U`gg # T O)5P60?yv>\@ٙpQU`gg E T O)5P #gg E T O)5Pg gg T Oq`{qu><;S`,n l`~L}:VJ)7P60?yvfƁ}:VR)9Pg (ٙPU`gg CEyL}ȁ*ɳ3"[agn"'s l l:<;S`(v.6 "v l`~L}؁:V@q`wU`gg Dٙ0U`gg D9ٙQ'؇ l ˳3a'؇ l y˳3a'؇3 l Y˳3A's l 9˳3A' l kљ߼'HAM`֏? | s=hq`suƁ [=K@.}yL؇Hsl j[ݵIYL؇HuػJ=[a@w'߸IO؇HN1[`"A;6kg>~c C$T Yqӱ![a@rc_T}*93f>SQ` E=6|{Z)U`sPgx_"}d)  u̗>K\*2Hl ؙbӣM)/ }*ҙʹG-w?TRj ;;|[\]l|t.t8?*i[oӚ#""Psl i[+w 1 lElrkы]LO=If 2%56rSK*>X@|ͮR% <[a@^ZvI% T O[M.If gg 2}׮]k~遏bP/vwI5V96,r}iW"K#{jw)؝ l[a@F6 T }D`#M ~`Ssfss&P60?|䤍 D`w&P3 l wfH^X8=4*[[4gjN b\Z`w&P60?bŏgEozϖ?@'A`w(P l 8b 8(P60?6j^ 6MgtڵNn$J,ٙD*ɳ3vW5V<;S`w(P60?<]}=[jv ,-R2EvGU`#i!U O`B@lZH vf2?CySO<;S`w*Ps l sgg f(%S Pq`$Qf f.ݤr l vPg<H$I ֝yoXط? c&I Ykٙ'qݤhӂUgQGJqla nRi!V0g:j7-7}7J]%H% OμW-\//󩻊?H}|inR`Zt釋xlG&<-;kBjMOl.ƲǗ$~ Eg>r"wl)%#;ޥ60?-:ꦩ֫CWMr1lI.q`+lYDuE9 ;>cggl;Ii"1`|{vLGإE4 6P2sq\7s7Jl "[Y/QJ`v;.ZYvQ*V0K-:z^HuH'F`wHl`~ZtrWi'ڞE"ݒ{`+l6zR^TgN"6 OμZ "WMt M ݖ}`+lVs/I$^졿\ }$ף5[axO_'}z#';V ɳ3vjV0?yv V6'8zm"S0;yv R6 l<;S`wE 440<;S`w'E 440u{8va]gxIw왋!0w Dժ3_k={QB4D`/ %1}-g/F 쥡d 6y5{bth^["jљKyoCQƁUt C@:EZt C@m:k~p[Su$f "cbh{c4m*]5 %V9-C@]c3m/8fxH`o "Ǡ%?v[ʱgg "c]37-mE,&n8V)ۊY v_9p<;S`3Č۩G IoCKv+qn`vo]}]Rw|;hqFjl"k֝yz5ot*V[3V__cmĮ;"gtե3]o,5ӣ榹Ex(8-[_5; eY~2/*m &N=~5 F3y-r{؃j{Ȏ9"2EV0)-bn2s=K`+'wbNv`>Bh!V0%b-wIx/G b{¹#X'C6Lמz]My]$v?wr|+^)~4 쐭V0!;P^/>~ַ|lQ`?vٙWK9}˭9wK}Gw@`?yemď: cWgmC^Իz,솥@vrb}-ۈu;D`t뫼GN_RZ{`#a Gcw7߹bدwl?OlG`?v w#; a0Ed@[ [`?vH=6LE3O>}*"b~񛱁il ..h_D*9"6jF` G P/Sy9[qM~񛱁i l ̫{+)!Unޘc{@`lPg./b9>}^BWyǥkC 7c &ag`a]N}c}N& Zzfl C6LW(-;Hi7۫mHS߁` vznKW9CM\WLx9/~36!k=z1Cq vceh2} vL }`<6M3D;JhלD`lr] `vv1ey[`?P`+lrd}rJȕ+o;w~JmN+~26!kRRLe} {D`[aM WB#[+lv{D`l)+) JZ-;o.-{D`[aM"]4󽋙%~=^񓱉0] ^񓱉 Fn?Vi'c"`vv {c#+6Ibl$C6LIbl$CvQؙiy񋱑0]Vn"c*6Y)_vʳ3>񋱑 F,8[_vȾV0^yv#~06!+{f;do`+l<;S`?  ӕgg =c3?6U)vHV0Ryv#~06!+{f;M`+l<;S`?  ӕgg =c3*6R)wߋ;D`tٙ{ C1ʳ33VvHV0Byv-~/ ӕgg bi 'ػ"`̌ CZɳ3n{1@`l cӢ3y=O?^ b0]m''~w]lzt;~=!w_A;V0]:r޻vHG%~- C[a8KmruUӹQ`?vjٙoӚ#"wa;V0 m:|Yc}eN=c1L`lYʭwF/v1Edb9,6A._ذMQ,)CX !}`/kv7xG!~, [a u&wa;D`t ( b946 /8zX !2}׮]k~遏bP/c1L` "džEO?;D`t?w9=[Mw5!GL߄ 찘0]966 G`S[!MgtڵNn$"6b ;V0<;3w r|`+lJ)b+;D`tٙ;(Z!"C"btO\r$o5#bb;;D`tbtHg4W8`H}ؒ !m  %)S 8`,]fg4o@#Ca{;D`tٙ;$J'C"ٙ;$Fv C6LW)Cba;$V`+l_)C"a;$Z`+l])CNvʳ3v=׉ ~_{ÍD`D.vyhfbzB`D l =ؙ}1r 36K`gF`$:D[a@9~="9s  ӕgg 89מ gg H9ך zgg X9ז0]yvsm ؁?yvs-  ӕgg h9ג zs|;WLߜīvHVЗ7YEf=ghfN"\;;D`t l6"\;;$A`+lf#f͵"C6wo͒\e3HDsT!YT@4(ČʊS2jr$L!9/I J]?4q/#=|=U;b-sy8P/ʐ5׋ .#߿ꗑw/j85ׇlW) Zs}("LmؚA`G l fg na;"^mvfÁ5w .؝vdV0:;?Rvc PO>wjt1w F׷3{a38vcP~qݑm<1w ֫39/wc؝FvdV0^uQf79~ [`w# ә76d7<~gبv1bn?36Gg.|닟iF&;s{ ż)(Jot؝FvdV0zb?}qv-vqbn76pg..UwןӰ=>R##u3Wjos]Fj}vdV07'r{z ! s vDb;2z`+lMμ5C`Xɭ љس^_-.kvܝu['#l љ.V?\O_Gr!N`G. r3ת_zZ}+-w\0z El//[6 vt 5; 3CUl/d<⾍G`dȭzQ=_/R l 82-I[a zxE v!wM`G6ԫg`_^_"!wM`Gr[a~vDzyl 쉸p- zeT]`OąCnI`G[azt,~1"weKwܒdyIOgwszBM;nA`G6ԫOg.R֭'v}9?43 I l gՙկ\-w\vD`@uo|:/` 9 l ٙ!^0vcuؑVp6 ˼[#2:;&^'u/~O;s1{[F^`2[ay̆;81 j3رVp6;S`o8P6;S`o8 l 83ko.q; п3c?qF'ded̛^=d[*N`G6ԫ_g>v^Ϟ3ݑm<{KV Hz`+l8Y|͎yvzؑVp^uQf79~ [`oj8P>pycOV_zkq'~6ڎJ`oj8) 6Gg.|닟iF&d% Nӣ3v+Q`oJ+8Pzb?}qv-ؑ"[aIwRuGHߛ}*? [`oJ 8)#6pgίTw-S(7 N3::FCO7P/=\.2{H {3A{SZ zR+կH7)'6Og/` -i&##΂K_xޒn;"^:s~ 35֬tؑ[aq|X,Io=wg{7{}ސWn;RT`+l8Jԙ;{9n;"^}V[a16d#l6+?U~ސm;RZ`+lM`G6ԫM`G l ٙ vǡ'F j3f j3fؑ[a@Ou'ig.F`KL6)16ӿ3㯟xwjt vb ۙg:ꎍJ`K,6)26ҳ3Coi]^l;"^:asҫ-l.1vVGμ~_Ï~_,~y^k;Rh`+lOg鋋%!/+X$'~6⮍H`5P>yoOn7_H52&3vVpX\\~W_}͊/a 5&#գ3w8 \-d֚ љrS^&3vVp\RkHVؑ[a;s{wrHT ]~| g ؍Cj46eHcv=5vV#$]=:^wH/.vVG^\i;Rz`+lťU؋9ֹ[`I4)>6tZy_H52i;"^}:sq ֭O+oJ8 u&#HΜRٟ?> {%vVיiu^h;RC`+lֳ3{a?qwm݇ozc~w6~[_J`-4PޝӛyėܭM΍}φ|on'oT :љ3u_]_GwцFn H˱ycH?͟`Gwl)olWμ| wglo/Q{e^ͶľM>ؑJ[a@9Ͼ;t_rbHw6X@X\n8=7vVGgK}Gԟq;RM`+lq3q|鏯lfs?vOϋ{9g;"^;sOuް+8ؑz[a{tp> vVoٳF㫛H`f;"^=:,G?5t䎸}Xr HM`S|}֣}W:^߻>q|He&#U }:gAɔ M IDAT^r(~Zf;"^:fIz?mȟ}w*t݌u.+a&#uu}3<}+0NjDd/k}-vVp?zj󩍟vke{5{!vV7^]kDV#2ݽd^}-cL` l {O_ [O&y~_/`{)vH`@μ\ww/_pזykWg?:xu{!vVv}]^,XY{̃Eu^N186 ;%KԖ6 ^ (%&C5uag۾`F꣟Qg_ 콲CL`6+wrzOK]>|Y.:[f-eU E; Bf-^0;6m;"ơggU E9_q`/J`eG lne ne nXv z e' v eM 6 0B`+lZ%_;6V4J`7,;vlhnWv|^`6K`+;~/c l @v;6Vho`vW݁M&6 =yE[`dvl:hnWvx^`& Ƭzvw^`6K`7F`I` gz'-fg "Z` ٙ Jvs[aД6;S`_ !쾲pImvCؽeL}% ~ٙJ`A`! fg +=}7<\L)1p)mvCG~ٙJ`A`'=fg 2Z`6ԫ{Hz6;S` AcĕгKkN`G6ԫ{h6;S` A%6;S` A$_p`ά9P6;S` ASd`lmv.vhʁ6;S` Ad`dmv؃ا~L-!OQٙ[`B`*cj3>Yٙ[`B`.{xL-!O=`FMmvfÁ]GX؅}L]GZl5#hce.Xi3v)vD`-{t&;S`B`G\k3v)vD` {|R)K!# L] 'a0dg R>M&;S`B`G\ d.اe 2ˏ١''f [2=$]q-v.e3h3v1vD`!{м;.sd4Zbg b>KPq-v.d5bg b>S`i-vf<vD`+{в;J`C`Gٲ k3v1vD`/{Ю;J`C`GGj3v1vD`!{Ъ;J`C`G j3v1vD`#{Ц;J`C`G@GMj3١:Cvμj4!$+=hP{9# !#{8#י3;"=hN{9p`E`Gi3gv!vD`*{И:sF`B`GGmi3gv!vD`,{Д:sF`B`Gв-i3gZ $#{p#4יs;"=hGs9' !#{cf4יs;"ǐ=hEs9' !#{٣F4יs;"G=hBs9' !#{,4יs 3fdD\g5u!$k=:sA`B`G#Zg.BSh`Z̅,;"Ǖ=:sA`B`GȲ$Zg.B[`Z]أLXk !#{|cj3 Q d5֙K;"/"{\0Qu.LSc$ !#BG&Xg. B 5֙K;"/&{l0Au.LOcp`=/e 4֙K*%DdP&μ& !#G(Vg^؅}aCIi35e}ic)i3 BA ՙv!vD`'LF[yM`B`Gvq Tՙv!vD`LD[yΖCih3oBN=V:.,ك)h3oBN=Z:.<5ՙ7v!vD`'Tμ! !#;SvMu捆{vYٲK`Gv @z9! ԭ\؅-{P:sE`B`Gva @Z̕,;"e[RgB.AV-u."d]*RgB.CN-u.Bd^Rgىbd_*Pg؅] @}5;" =NCF`B`GvI0i3B.K . u]؅TW|կžo B.M0&UtG?s^Ȅ١JT+woD`/e>;"K=Ayw;{%;H] @JO{y0>r#.Xxw kDkcd/%q`?/Jv%#lޙv,3Ey-e^;" =(Z`'xM`d^;"K=(Yyw_%;d/ʮvD`W {lP;#sMv!vD`W!{tP;S`kfQ"vD`!{xP;S`i;v8E*3{@`I`#UzgL5 $Q*3c?|4Wv{'#&٣ҔޙҿUFvz'#*”ޙ|Ν~[vy'#2ޙ׫g״7{ MH$w-=ײܾq~IqdvQ|gn/9[`nvD`W({P;sFd9" ؇VqewJQ~g/Y|>8@`Gv<e(3痰<]]}l{%7]!@ *5kϝTG}-6]1@j6 _#9]Q@*:󣟯~=$Mq@J:7_k~=$M@6:s.e}Rљ^ QM`B`GvG?ym;" {& !#{'љv!vD`OD@6:s[+ I Mt]ؓ= ! !#{B.! !#{J'.! !#{Z.! !#{b.! !#{j'.p`i==Dgh$N{H#dO\L K`B`G$eO \H K`B`G4e \F .ʞ:s.ʞ:s. ˞] ΈvD`OY;"'-{n`d tf]=;0:.˞QA`B`GvgF@gv؅݂)4ЙxC`Gvg 1" !#c~gv؅ݎM34سCh=%#!S~gv؅ݔM3BnKlߙ]ZD.$J&ߙnN&ߙv!vD`7({`0N;"[=i0wf']mʞ6;.Fe b٩~vD`*{`Sn;"ە=wpwf7]-˞=8;.egM`B`Gv'26΃vD`7/{ ̀.l$6@&ޙ{v+CvD`{ Pwf@`B`G63 xgv!vD`=pwf@`B`G6ײ'7 L>/جd'kڝ؅͚ #M3#;"ِ=piwfD`B`G6['0ΌBfG@ö.lve+5ΌL=;7tɞZgҝ؅M>&ݙ!]Dtgv!vD`˞`8dҝ؅>S M3C;"/{`Iwfh⁽<; @;"9 {`)wfL`B`G6eO4ܙ1]=rgv!vD`Kd@)wf.lzʞn0Ό B Sش;t!{`˄;s]%{`Ä;s]){`̈́;s]-{Ƅ;s]؜ {`iǤ; ؜${`n.lN=p%'(~ ؜,{ @`OO;"9]t;s]؜#{ht;s{v9"#dCmng3>pCؔ+{*hd;s/]؜/{2hd;s/] !{:hd;s,;"FЦv^;"JТv^;"PМv^3qvD`3 1Sv!vD`3 )Sv!vD`3 !Sv!vD`3 Sv!vD`3 Sv!vD`39 S# f<M3؅͘'*hg !#eU6<vD`3 `&ڙB+ɚhg0>|FC.{iv!;"Y `ٙ4ed[39;")BP vf;")EP vf;")GP vf;")IP vf S)vD`S *>v!vD`S9>v!vD`SY>v!vD`S >v!vD`S>'ݓd@&יBp3@&יB|ٓ@&יB @&יL0ϳM%>"M3؅M=g)D(IDAT?L3؅MU'@L3i8g>iH;"MPuf?;"P4Puf? g@=$ gBBL3{؅MgCL3{j8"#eO&֙= Brs"@ufO svD`Sy ϴ:/]LBdZٗ.l"{v0K`B`G6=A\ܴ:/]LK pY̾Γ fjIKTg& !#)ʞ*.eRٛ.l&*{Iufo;" `|&٧ fҲ'MM3k8g>);"`DSv!vD`ӂ`,Sv!vD`ӊ` Sv!vD`Ӑ)`pS'31(&ԙG؅M{RL3 !#&eϦPgA`B`G6ʞP0<¤;txش,{V8t:;"i\ pt1v!vD`5<.lɞ_N3<Ɣ;wp-{8d:(;"aM< ptQv!vD`Ọ̈L(y [ҙi8g>yD;"!=0<.l#{8`*y%=$ ig_=&ҙG؅ =dt~'a.#M&Om eۦљ؅ Gɞ6L3% !#=#\Fgk*}6;l8M 03<.l8] 0p h:x;"aٳ4Ъ)tv!vD`PgjESM#Z ʞL3O !#=g @g@`B`G6"{1<$=]E`G6'{<.lYLZy ]p80Yw)'O6/{2Ū3O& !#*}>TugP dTݙ'<9 ! l`C(D͝y:]P<]݁}ִ/$XSqg.ls~7Ȗ2UEg~;7^$' 8 $ {Й|Κquvu;"N} :?|g-vD` %|N̫V~n7_ goYC`gG!#LVyhVʟ_^ƹ[Vseȕԭμ6zW|[ʥ(Vv$P;c|eB2s?8o;{UN QrgݺYq~۲Ξ.I`G6e#+3敼q`G-Y;"NSjg^^e=/M`G6GN6ٙ+nYl`g ddؙvKM遽{͌n )=w]_h[XԳu o c)>+?V2,ݽ0{q!sX/,KI=0}繯7g@u؋k֞&3qY{ ==w{ϖp5G?_Uk-nv%\D=[S꬙_^p6A`6 H`uN_7p٧z.S׼k{ӔnX|o/;MG[۔ Kz RGl>V$g25xÃ7hA`b>q=7[}s-Yz5{uIS5d-|qu7ySD݈G맹j!w7-um,Cs?H#*gn=*I'=OփqFzX6GNzv2 6߁kaȥ{.M;M?.=K{smOmm,@$taoyvg zqƣ󷏩~'ؤ}Õ(㫛'GM?ENJR75q#?I룠 oI9NASa6N$?nf2F`7aҀ9͢7ez] &^ kXFVi6\_+OO׼oZ!o,6_xlzY,[\<.dozͿf5݄]%k@]H6lzY{wR¦/a?WlϽ3~Wy`:_򻫫~;-h6YV:FGzPM/|eko^/zXnrpqsemv6 e#6,ﶾ[}}Q`7M遾ͤ2_"VyPSŇ].f|yT`빱^_KVn)vvq4CK/pk^oծqs<*enBށG~N@eϼ;8,k??kvVU[GܖM6}sMؽ^N"/:lNlNv ݄@E_{Г4+g~ fɶo4K['r#잕dݛy6lኹ˖9ۥTǭQIj?X+۰uR,>V_s:MOOn_r7oT`]}͙6}+\hJb7=]]}+s%j lZٝ׏ŧ=av<|7˩놕m|2 P Mo{>=힩 -_`7ネ~sN`s4M˧^{ nG?/GɂO4A`s~gakW~\-WgoWg+ lo:<_ ={ zl l l l l l l l l l l l l l l l l l l l l l l l l l l l l l l l l l l l l l l l l l l l l l l l~k3gVߜ'~۟:cǛ=a `@`D=OsΏ{$ lEgfzG/Ư N`([޷{u^^szQ=! l [/g\?[˼~w@6^ |k}bC>8{'FWr+Y=d`_ ׈Nz9(Qw`/ H4zlt`DA`﹒[A; ^6P: 0(q~G O` ?udQ`w\}<=_dW;_0ۧg{zc'?K[|'/x|`[ l3cٳXn>}]7'j{\݃/Ӭ~a뻏ɭ'I7^mm{z6{#\{;?{{ 0 pe`oZ [[VWhz׫+`ֿ.YFa;|c=xd`[ql3,[ (7?McgnK̛fE:}"77n~ܤY>f+㭃ҵdg̾<*-86 ^79>~oO~<'onmګM~vwϖGoݽۺngu}m{` 7.nq eK˷_?|멍:_]=~f>97濽7;\}s {Y~o^!t;wvb7uo31o!pVmnwOl[kxo' z؛׫5^O9ڪ[}F!ΰ[7|/m|_O]*H [݇?:xD`:6Ql3>|s9n`˿_^Q^kz< vi%9% t#m tc8â] osC=vKh߿U;/ ͏,٭W[o]ݸ ؇~ ` #o[`#9:E[}^%o{>/'A>xlg؉پåxz`duÛkD:`>|lg8>o?gzw ?wb`jxn/7/W"I`W`\7zzG{.p^[`>|lg${3 {}Î{(8Ձ?Ov>E#^pZDd}?cm_}zo`yh[9zN[O_sרo}[}F!0M˫dS+Qz}n?ٶo٨COzm??{wo;"!uKXRӎx 0EiSOAlQ{zD1/i+ Ur_d#}:{k]::L£ϽuϏ:k_ 6 8`(# 6 8`(# 6 8`(# 6 8`(# 6 8`(# 6 8-طwqUVw6؟~+xw}J~}?{$*+J*+L̬{-ؗ, ^}qq_,`_\eU١`w~eKW/.a_Exw{Ug,ؗ/ ^‚}uJ w{ҧrr '^;3O]^{uլV53ܬٿ(دp{T`5y x {6' ŸqFp`oڦ{p`o<욏Fp`K{`uu/lYd+/sA`_y ` lOo^>(x]~A??C˟tC `'6]̀| $ (O?{fY2Ⓩ+( +ǿ}7>x~OSMsoǗoݗպ.{𗺿{}I:2n,XƨXd+/Ϟr|x~wtq? p}SD-o\eU'iH=ᄈ[fM|O/c\^*ө7D'{qͧo?yK<]^;μɣ:z-~ O;lIPۢOCOeK}xKGi]пF{qS?w[̻kp{+,zC~ܯ`S'|;/J>|K</+\kygU'3y~}l}7>wջ?_p__~ G<}o y'>`l] $-+w/~MǾ)>ѱxIT4N[ש?: v^ |6-z]XXɣػwa|jO5nwf,yw9W.؛?\OuD5|@>m]$/o[7%1geKݽ Rɭ~d%4_@5H秬?~~k`޼} #(ܿ`w{?yRS|q %[O}[VyoZN \_%'oV[NzY} $-؝ouӗ?+t{Q/?w\!r._x7;շ/9#%>`_$+j?`q& %a5g~qwm al˾7g~/f? d\"]/{kNj`F{ x/ݖ3oOztoǻ?w^7P:ݓx^<`ywկ~nd}ORs-d6">ΔX$*K뻮v敖(;{G}~WZOGc %Oҹ p@$LK}t:^o[~;^źiKLb~< wgJ^샛cl^ѶWe=4;|𠋝x33_`Oc>qq %]AcymZdL׎5} }t ^;$swgJ^7U\׮w쇟Y}`dYW\~3W67N;l?r9~ و X$-؝>.n ˗_ïWyzKw/ͷ)ث=InlGk+ɋjvm7{Wj,V{xw/'d!`=InlĽǙ+|~=7\#b_^_:m]?)O8c>n}9 eOW>qq& %]^׫UMZ[KV%V~\klwy}|}?o'ɫ`8b`ԫD߶P?uP?/czک?$<6໇CePI}+d{sSMD:%qw޷-K?޵~)O|`5O|{Â}힐^ d{ p%"=I?~b,2 %S~﷫͋}dY{w$Ue^}N俷ɯ<';}P~cˊwwt~컭 P/{~?r<2F E`g;O?[蟲?}1{XwKT<lm'Ǐ/g#>XXd,wfbBqÝ3+f(؛!Z K> )ؗm?I?~B>2F %]~o|瞼8W'8~ॏ?OsGzw;+O?䠂m?I?~J>2F %Sw}>V?qgW~U}{?]L瓇}ǟ`/~~?-αz~}.+ق3{) u(ؗ]{u3pR`~G[};{6bT.ػ;$6T.7l\wU=r^5ݼ6.O?~ \# 6 8`(# 6 8`(# 6 8`(# 6 8`(# 6 8`(# 6 8,/VupտN(2+}F.Q"o^*KEUxeV0H\$10E߼U2#I̋*r"I~bTY vҋˌX$1/E#X$͋Qe)ءJ/.3bļ"I (b7/F`*̈E\$10E߼U2#I̋*r"I~bTY vҋˌX$1/E#X$͋Qe)ءJ/.3bļ"I (b7/F`*̈E\$10E߼U2#I̋*r"I~bTY vҋˌX$1/E#X$͋Qe)ءJ/.3bļ"I (b7/F`*̈E\$10E߼UbPxeV0H\$10E߼U-ˬaHb`D$y1,;[T=Y 5r"I~bTY v( {.҇Aj"E,ŨPHb^T$FHRC^\f"yQE.Q"o^*KUzq$EHb`D$y1,;TeF,U"E,ŨPHb^T$FHRC^\f"yQE.Q"o^*KUzq$EHb`D$y1,;TeF,U"E,ŨPHb^T$FHRC^\f"yQE.Q"o^*KUzq$EHb`D$y1,;TeF,U"E,ŨP W;{.҇Aj"E,ŨPlQ\fE#X$͋Qe)ء(آ J$FHRCQEs> R#I (b7/F`*̈E\$10E߼U2#I̋*r"I~bTY vҋˌX$1/E#X$͋Qe)ءJ/.3bļ"I (b7/F`*̈E\$10E߼U2#I̋*r"I~bTY vҋˌX$1/E#X$͋Qe)ءJ/.3bļ"I (b7/F`*̈E\$10E߼U2#I̋*r"I~bTY vҋˌX$1/E#X$͋Qe)ءJ/.3bļ"I (b7/F`X's> R#I (b7/F``*2+}F.Q"o^*KEUxeV0H\$10E߼U-ˬaHb`D$y1,;TeF,U"E,ŨPHb^T$FHRC^\f"yQE.Q"o^*KUzq$EHb`D$y1,;TeF,U"E,ŨPHb^T$FHRC^\f"yQE.Q"o^*KUzq$EHb`D$y1,;TeF,U"E,ŨPHb^T$FHRC^\f"yQE.Q"o^*KUzq$EHb`D$y1,;U8ˬaHb`D$y1,;[T=Y 5r"I~bTY v( {.҇Aj"E,ŨPlQ\fE#X$͋Qe)ءJ/.3bļ"I (b7/F`*̈E\$10E߼U2#I̋*r"I~bTY vҋˌX$1/E#X$͋Qe)ءJ/.3bļ"I (b7/F`*̈E\$10E߼U2#I̋*r"I~bTY vҋˌX$1/E#X$͋Qe)ءJ/.3bļ"I (b7/F`*̈E\$10E߼U2#I̋*r"I~bTY vҋˌX$1/E#X$͋Qe)ء.w \fE#X$͋Qe)ء(آ J$FHRCQEs> R#I (b7/F``*2+}F.Q"o^*KUzq$EHb`D$y1,;TeF,U"E,ŨPHb^T$FHRC^\f"yQE.Q"o^*KUzq$EHb`D$y1,;TeF,U"E,ŨPHb^T$FHRC^\f"yQE.Q"o^*KUzq$EHb`D$y1,;TeF,U"E,ŨPHb^T$FHRC^\f"yQE.Q"o^*KupտN(2+}F.Q"o^*KEUxeV0H\$10E߼U-ˬaHb`D$y1,;[T=Y 5r"I~bTY vҋˌX$1/E#X$͋Qe)ءJ/.3bļ"I (b7/F`*̈E\$10E߼U2#I̋*r"I~bTY vҋˌX$1/E#X$͋Qe)ءJ/.3bļ"I (b7/F`*̈E\$10E߼U2#I̋*r"I~bTY vҋˌX$1/E#X$͋Qe)ءJ/.3bļ"I (b7/F`*̈E\$10E߼U2#I̋*r"I~bTY vpB=Y 5r"I~bTY v( {.҇Aj"E,ŨsW?7o|'ͽ;%"[T=Y 5r"I~b gWoR!ˬaHb`D$y1ڰ6KTzq$EHb`D$y1{^տU?g?~_8C|,2#I̋*r"I~bTYlWz%O}gɂJ/.3bļ"I (b7/F.؟W͛?vxUϒ^\f"yQE.Q"o^*]?-=nӟ>',YPeF,U"E,ŨW2oҋˌX$1/E#X$͋Qe x@_;?gɂJ/.3bļ"I (b7/Fu._{{m>]_0>׏P?ߺ]_o[}~_~O}޿xޥG7xϷ=?|ngoO ,ݹ(q|?oy̓?`..}o_^>*؇_pҺ`tQ>- 6"`߆Ϋ=x/u y.>S_w`h؛.O>N./R@KDw9Ȇe.|mzq7:≯'u"~,ߺi\[qVxeV0H\$10E߼-7Ox5% r*ؽ2}c.]o?ت`?c]?g>vm^[&}PuX\fE#X$͋Qr`۟Y -t>E~[L/_X;(/[e>(M)V?s`P 5r"I~b`~1ͪ9e"?,qwU}.ؿ5"[҇Aj"E,(3`ڼz=uq^^~{{S/+KNXC8`?kGJ$FH:mB_Y[?]#l_7#GC {׈|a٭ ?{n׽sU_|ý¢[ V_7l>|m~~~#p[/[s> R#I (b7/F.׋X׬},hV5"o_G`o^׵G{׈q݂{w E!o/ gU^`?ѿ9p\fE#X$͋Q BK@z{dAS.kD{{w{/uOD5"˒݂+AbWy`>m&>lz?X߀ى ˬhe IDATaHb`D$y1 w^^i}xHe'?Y[5"+Dۛ# b"*ſN^h}7GO/~- V ת_Go/+o>vjݻ WxeV0H\$10E߼}ؽ`/k#W? 뗰o*3OcϒM]ܺ`wY]!/ػ"ݯGob"˯XY^qϛBk`+|`K؛{?Ǫ`w; J$FHʺ!;yƯ>qH8uqFdٚ_h _iN5"ˎMN^7v^^+ˏ|ú.s޷n՛ WW_Om_~維zU[s> R#I (b7/F/w>Џw'n~ϒM]ܦ`ya]Ӫ{xҿ!l >v{ՂߴK`/7{"n盷B/{?ev;?-njv{.҇Aj"E,Ũ3;~gwns!gɂ.nS׈|qju`{ۡOzM_#us H^wXW]xݣ]_Wo{*2+}F.Q"o^*;Ggɂ.n[w׈mWw/vD>wF}~kB6>q[^ 撏]>; 7w`R0H\$10E߼Uj6Sog`Fo>,ׇ<`y[7[}kp\oKnؼݹS|J$FHRCM]ܶ`oq{H`ۯSߦo]oJm~㉂C -/).]^ 5r"I~bTY v)߿-ثZ}~ą[L+ثߴ{j|=}}> R#I (b7/F`r+뻇&/Wlqb^wD[\Z_Y:(ێ!~}?҇Aj"E,ŨPn{}Hoݘo?sX׈t9n#ջj[W 7J?Ѥ)ؕ> R#I (b7/F`r+kD޾-ϻݽm =`yn}ۂvp7է_`owPRHh]U 5r"I~bTY veqo^Kٽ7Y݋S /Lh{ES/׫y97]^ 5r"I~bTY vB֗l "״{Uwmz׫{ {/aS{6W, }G뗰7wHaHb`D$y1,; 7w^!^=`ony]zWW/?oM/M?{o`ӭK_g)B 5r"I~bTY v?>Bde\=y+CV/OE:_U{ ꚑ{¢ ~!+ث#[~J$FHRCM]\`M~aiS osz7].om/^m޼n{[J$FHRCM]\`/ٴM~{ӽY=`/boms6}Mo L =ws~]U 5r"I~bTY v:^lBmħ7e E:/bӛ|[ 7ֺ`wޭKk^3xsm6҇Aj"E,ŨPHb^T$FHRC^\f"yQE.Q"o^*KUzq$EHb`D$y1,;TeF,U"E,ŨPHb^T$FHRC],\ J$FHRCQEs> R#I (b7/F``*2+}F.Q"o^*KEUxeV0H\$10E߼U2#I̋*r"I~bTY vҋˌX$1/E#X$͋Qe)ءJ/.3bļ"I (b7/F`*̈E\$10E߼U2#I̋*r"I~bTY vҋˌX$1/E#X$͋Qe)ءJ/.3bļ"I (b7/F`*̈E\$10E߼U2#I̋*r"I~bTY vҋˌX$1/E#X$͋Qe)ءJ/.3bļ"I (b7/F`*̈E\$10E߼UbPxeV0H\$10E߼U-ˬaHb`D$y1,;[T=Y 5r"I~bTY v( {.҇Aj"E,ŨPHb^T$FHRC^\f"yQE.Q"o^*KUzq$EHb`D$y1,;TeF,U"E,ŨPHb^T$FHRC^\f"yQE.Q"o^*KUzq$EHb`D$y1,;TeF,U"E,ŨPHb^T$FHRC^\f"yQE.Q"o^*KUzq$EHb`D$y1,;TeF,U"E,ŨP W;{.҇Aj"E,ŨPlQ\fE#X$͋Qe)ء(آ J$FHRCQEs> R#I (b7/F`*̈E\$10E߼U2#I̋*r"I~bTY vҋˌX$1/E#X$͋Qe)ءJ/.3bļ"I (b7/F`*̈E\$10E߼U2#I̋*r"I~bTY vŽukˬaHb`D$y1,;T5UQxeV0H\$10E߼U*`qo}+ ]=Y 5r"I~bTY vC>\\fE#X$͋Qe)ء_{{.҇Aj"E,ŨP/_Ӱ)2+}F.Q"o^*Ku0?ѯ֯Es> R#I (b7/F`2WxeV0H\$10E߼U 62\fE#X$͋Qe)ء`Ӱm\fE#X$͋Qe)ءf^~/4lS=Y 5r"I~bTY vw`Ӱ-\fE#X$͋Qe)ء^ܹM6s> R#I (b7/F`{qg 6 {.҇Aj"E,ŨPs/|aTxeV0H\$10E߼Ujŝ4 J$FHRCͿ}J=Y 5r"I~bTY v7`Ӱs> R#I (b7/F`}q_~M>VxeV0H\$10E߼Uj}a4#\fE#X$͋Qe)ء^ܺ>4#\fE#X$͋Qe)ءf^W\fE#X$͋Qe)ء._^i}\fE#X$͋Qe)ءf-ػ<`Ӱ{ J$FHRC lvW=Y 5r"I~bTY v(M(2+}F.Q"o^*K5y`as> R#I (b7/F`sq J$FHRC͹M)2+}F.Q"o^*K5:ypaos> R#I (b7/F`qq) J$FHRC͸QQxeV0H\$10E߼Uju[sCas> R#I (b7/F`oqI J$FHRCͷRxeV0H\$10E߼Uj:3Y=Y 5r"I~bTY v7`Ӱ J$FHRC͵~en+49D> R#I (b7/F`XR'*2+}F.Q"o^*K%Yi؜J$FHRCT skasi*}F.Q"o^*KEUxeV0H\$10E߼UjM-4{.҇Aj"E,ŨP,.Ss> R#I (b7/F`gq o؅\fE#X$͋Qe)ءYC ˬaHb`D$y1,;,;*f\fE#X$͋Qe)ءfYK ˬaHb`D$y1,;;f\fE#X$͋Qe)ءXS ˬaHb`D$y1,;`;(2+}F.Q"o^*K5 n؅\fE#X$͋Qe)ءfX܉LnVxeV0H\$10E߼Ujܰ J$FHRC],8H {.҇Aj"E,ŨP7{.҇Aj"E,ŨPTO`7+2+}F.Q"o^*K%^o.2+}F.Q"o^*K8 {.҇Aj"E,ŨP-׶as> R#I (b7/F`r_ɖLnVxeV0H\$10E߼U}q6{.҇Aj"E,ŨP`(2+}F.Q"o^*K8}Mv=Y 5r"I~bTY v(ŝf\fE#X$͋Qe)ء7C ˬaHb`D$y1,;(N J$FHRCy/n}-v=Y 5r"I~bTY͂a<`_R`gFY}^t){.҇Aj"E,ŨPlQ\fE#X$͋Qe)ء| Ukذ J$FHRCQEs> R#I (b7/F`]M=Y 5r"I~bTY v(V_.2+}F.Q"o^*K8Ss> R#I (b7/F`r]܌5{.҇Aj"E,ŨP`)2+}F.Q"o^*K8 {.҇Aj"E,ŨP۱Cn ˬaHb`D$y1,;(؎ J$FHRCy.nނ}v=Y 5r"I~bTY v(Qs> R#I (b7/F`r\%ݘݬˬaHb`D$y1,;.׫as> R#I (b7/F`XpQlO\fE#X$͋Qe)ء eا`_]xeV0H\$10E߼U-ˬaHb`D$y1,;T}v=Y 5r"I~bTY v(Q]s> R#I (b7/F`[\DF ˬaHb`D$y1,;(خ J$FHRC-^LnVxeV0H\$10E߼Umq14{.҇Aj"E,ŨPn`*2+}F.Q"o^*K嶸}mv=Y 5r"I~bTY v(Q}s> R#I (b7/F`r[\T. ˬaHb`D$y1,;./f\fE#X$͋Qe)ءGvVxeV0H\$10E߼Ukqq4{.҇Aj"E,ŨP ?ˬaHb`D$y1,;T‚}=v=Y 5r"I~bTY v(}Ss> R#I (b7/F`XE.2+}F.Q"o^*K8 {.҇Aj"E,ŨPN`{+2+}F.Q"o^*K崸؂}v=Y 5r"I~bTY v(Qs> R#I (b7/F`YܹB]A.2+}F.Q"o^*K8 {.҇Aj"E,ŨP>`+2+}F.Q"o^*K峸]as> R#I (b7/F`Y]=Y 5r"I~bTY v(ŝ|.2+}F.Q"o^*K8 {.҇Aj"E,ŨP.`+2+}F.Q"o^*Ku0\Eް J$FHRCQEs> R#I (b7/F``*2+}F.Q"o^*KQϗ9 v]xeV0H\$10E߼Ucqs> R#I (b7/F`X ˬaHb`D$y1,;(3(2+}F.Q"o^*K8 J$FHRCy, v]xeV0H\$10E߼Uaq0Y=Y 5r"I~bTY v(]]ܰ J$FHRC9,={.҇Aj"E,ŨP`ϡˬaHb`D$y1,;⮰`n؅\fE#X$͋Qe)ء/nH`7+2+}F.Q"o^*K5}qYs> R#I (b7/F`X`m؅\fE#X$͋Qe)ء(آ J$FHRCe/ev=Y 5r"I~bTY v( {.҇Aj"E,ŨP7Ss> R#I (b7/F`.Uv=Y 5r"I~bTY vɋ`ϣˬaHb`D$y1,;]y.ڰ J$FHRCM^{\fE#X$͋Qe)ء.nX`7+2+}F.Q"o^*K5uqf.2+}F.Q"o^*K5uqs> R#I (b7/F`8 L J$FHRCM]B.ٰ J$FHRCM]{&\fE#X$͋Qe)ء.N`Wl؅\fE#X$͋Qe)ء.|?{&\fE#X$͋Qe)ء&ݬˬaHb`D$y1K+A IDAT,;T]as> R#I (b7/F``*2+}F.Q"o^*K5qq*^.2+}F.Q"o^*K5qqs> R#I (b7/F`8 \ J$FHRCM\L.װ J$FHRCM\{.\fE#X$͋Qe)ء-nh`7+2+}F.Q"o^*K5mqBZ.2+}F.Q"o^*K5mqs> R#I (b7/F`8]as> R#I (b7/F`ݗݬˬaHb`D$y1,;Դ f\fE#X$͋Qe)ء-nh )صv=Y 5r"I~bTY v=Rs> R#I (b7/F`TW_ v{.҇Aj"E,ŨP v]xeV0H\$10E߼U-ˬaHb`D$y1,;Ԥ nf\fE#X$͋Qe)ء&-np *ؕv=Y 5r"I~bTY vI\|) J$FHRCMZKnVxeV0H\$10E߼Ujߨ]as> R#I (b7/F`ὗݬˬaHb`D$y1,;Ԕ aN.2+}F.Q"o^*K5eqk/Y=Y 5r"I~bTY v)^{) J$FHRCMYW4{.҇Aj"E,ŨPS7Rs> R#I (b7/F`᭗ݬˬaHb`D$y1,;omhJ.2+}F.Q"o^*KEUxeV0H\$10E߼U-ˬaHb`D$y1,;T]as> R#I (b7/F`KnVxeV0H\$10E߼Uj:/Y=Y 5r"I~bTY v k輡F.2+}F.Q"o^*K5aq ݬˬaHb`D$y1,;ŵT^ v{.҇Aj"E,ŨPRyc v]xeV0H\$10E߼UjZ/Y=Y 5r"I~bTY vkif\fE#X$͋Qe)ء/ ˬaHb`D$y1,;ŵ^ v{.҇Aj"E,ŨPRx) J$FHRC^\S.v=Y 5r"I~bTY vQw) J$FHRCQEs> R#I (b7/F`*Z7{.҇Aj"E,ŨsW?7o|/͛o~gɂ(آ J$FHP?͵Gkџ,أTw) J$FHI>zݢ ˬaHb`D$y1{˗?{Տ-f|ժSkjf\fE#X$͋Qe GWWZVmC vKnVxeV0H\$10E߼Uֻ`/Z/awoR+(v=Y 5r"I~bTYN^?`7h+f\fE#X$͋Qe GKUnsk>K4vqmeݬˬaHb`D$y1sh՛7f˯~|,Yŵݫ(v=Y 5r"I~bTY炽|/vS[u] v{.҇Aj"E,ŨkO|X _ hWR)^q//"`hWRi+sQȢLn˦`7hl]7\ׂ}0.Z/^ bosA˂=l'+R=Y 5r"I~bTkwܸݠ^MNݰ J$FH R#I (b7/FC?y3ދ#Xt) J$FH; ӇRS-Rs> R#I (b7/F!. ]`'n؅\fE#X$͋Q v2=OZ{.Y=Y 5r"I~bTY,ΛVv`s) J$FHʺe~=Ǟl v֞{e;o.2+}F.Q"o^*^^ay p5ݬˬaHb`D$y1'6n>B֚KnVxeV0H\$10E߼Uv}Տ^*5{kZk ˬaHb`D$y1{:% ֖KnVxeV0H\$10E߼UjZ[.Y=Y 5r"I~bTY vjmWX6{.҇Aj"E,ŨP vsɥ`7+2+}F.Q"o^*KEUxeV0H\$10E߼U~Nڰ J$FHRCZ\sǥ`7+2+}F.Q"o^*K5jqݬˬaHb`D$y1,;Ԩ5w+-9v=Y 5r"I~bTY vQkf\fE#X$͋Qe)ءF-Rs> R#I (b7/F`{;e.2+}F.Q"o^*K5jq ݬˬaHb`D$y1,;Ԩ57\ v{.҇Aj"E,ŨPcp`gl؅\fE#X$͋Qe)ء,Rs> R#I (b7/F`KnVxeV0H\$10E߼Uj U ˬaHb`D$y1,;B[ v{.҇Aj"E,ŨPפ`k؅\fE#X$͋Qe)ء(آ J$FHRCQEs> R#I (b7/F`z{;].2+}F.Q"o^*K5fq햂ݬˬaHb`D$y1,;Ԙŵ[ v{.҇Aj"E,ŨP#7 l ˬaHb`D$y1,;Ԉō(f\fE#X$͋Qe)ءF,nD`7+2+}F.Q"o^*K5bq#ʭBNְ J$FHRCX܈nKnVxeV0H\$10E߼UjFt[ v{.҇Aj"E,ŨP#7J\ ˬaHb`D$y1,;Ԉōf\fE#X$͋Qe)ءF,nD`7+2+}F.Q"o^*Ku-#FNհ J$FHRC]},O{.҇Aj"E,ŨP{L)v|)2+}F.Q"o^*Ku &j؅\fE#X$͋Qe)ء7J{:)2+}F.Q"o^*Kվ1ݖݬˬaHb`D$y1,;Tt[}ԯv=Y 5r"I~bTY vō)Dn؅\fE#X$͋Qe)ء7Rs> R#I (b7/F`j_ܘvKnVxeV0H\$10E߼U}qc-Y=Y 5r"I~bTY vōiW_Ok 6> R#I (b7/F`j^ܨz{WaHb`D$y1,;TF[ v{.҇Aj"E,ŨP͋n) J$FHRC5/nT`7+2+}F.Q"o^*Ku ڭ@aHb`D$y1,;) {G=Y 5r"I~bTY v( {.҇Aj"E,ŨPר`6왞Q'\fE#X$͋Qe)ء7j$לJ$FHRC5/nT)؝=ˬaHb`D$y1,;TFZ=ӳˬaHb`D$y1,;TZ v{.҇Aj"E,ŨPk) J$FHRC.n\`7+2+}F.Q"o^*KպqݬˬaHb`D$y1,;TZ v{.҇Aj"E,ŨPk) J$FHRC.n\`7+2+}F.Q"o^*KպqݬˬaHb`D$y1,;TZ v{.҇Aj"E,ŨP -_?|)2+}F.Q"o^*KEUxeV0H\$10E߼U`f\fE#X$͋Qe)ءko؅\fE#X$͋Qe)ء7Rs> R#I (b7/F`j\^KnVxeV0H\$10E߼Uqq#{`Vo؅\fE#X$͋Qe)ء7Rs> R#I (b7/F`j\^KnVxeV0H\$10E߼Uqq#{bo؅\fE#X$͋Qe)ء7Rs> R#I (b7/F`j\^KnVxeV0H\$10E߼Uqq#{dn؅\fE#X$͋Qe)ء7Rs> R#I (b7/F`j[^KnVxeV0H\$10E߼Umqc{fn؅\fE#X$͋Qe)ء.^KnVxeV0H\$10E߼U-ˬaHb`D$y1,;[T=Y 5r"I~bTY vk]v=Y 5r"I~bTY vōf\fE#X$͋Qe)ء7Rs> R#I (b7/F`j[^Zv=Y 5r"I~bTY vōf\fE#X$͋Qe)ء7Rs> R#I (b7/F`jZ^+[uv=Y 5r"I~bTY vōf\fE#X$͋Qe)ء7Rs> R#I (b7/F`jZ^KnVxeV0H\$10E߼Uiq{nm؅\fE#X$͋Qe)ء7Rs> R#I (b7/F`jZ^KnVxeV0H\$10E߼Uba[as> R#I (b7/F``NQxeV0H\$10E߼U=;E=Y 5r"I~bTY v=*lц]xeV0H\$10E߼Ueq{-Y=Y 5r"I~bTY vōf\fE#X$͋Qe)ءZ7Rs> R#I (b7/F`jY^+]5v=Y 5r"I~bTY vōf\fE#X$͋Qe)ءZ7jlɆ]xeV0H\$10E߼Ueq{-Y=Y 5r"I~bTY vōf\fE#X$͋Qe)ءZ7׊lņ]xeV0H\$10E߼Ueq{-Y=Y 5r"I~bTY vōf\fE#X$͋Qe)ء7תl]xeV0H\$10E߼UbaNf\fE#X$͋Qe)ء(+>ǣs> R#I (b7/F``$QxeV0H\$10E߼U6 R#I (b7/F`)ݬˬaHb`D$y1,;M) V.2+}F.Q"o^*YqgJQ;°k)WG`h/hY}^`K]#RxeV0H\$10E߼U1SݦˬaHb`D$y1,;cާM=Y 5r"I~bTY vዛkl]xeV0H\$10E߼UjZ v{.҇Aj"E,ŨP7f)B ˬaHb`D$y1,;Mf\fE#X$͋Qe)ء/nR`7+2+}F.Q"o^*K5xqzmӰ J$FHRC ^ܤ^KnVxeV0H\$10E߼Uj&Z v{.҇Aj"E,ŨP7)2 ˬaHb`D$y1,;Mf\fE#X$͋Qe)ء/nR`7+2+}F.Q"o^*K5xqzmҰ J$FHRC], IݬˬaHb`D$y1,;OP\fE#X$͋Qe)ء(Gf~*2+}F.Q"o^*K5`Of\fE#X$͋Qe)ء.nZ`7+2+}F.Q"o^*K5tqzmѰ J$FHRC ]ܴ^KnVxeV0H\$10E߼UjZ v{.҇Aj"E,ŨPC7* ˬaHb`D$y1,;Mf\fE#X$͋Qe)ء.nZ`7+2+}F.Q"o^*K5tqzmа J$FHRC ]ܴ^KnVxeV0H\$10E߼UjZ v{.҇Aj"E,ŨPC7f+ ˬaHb`D$y1,;Mf\fE#X$͋Qe)ء.|^KnVxeV0H\$10E߼U}O9\fE#X$͋Qe)ء(اğSxeV0H\$10E߼Um?ˬaHb`D$y'F IDAT1ق}ßr̓,YMf\fE#X$͋Qe쯿8|,h&Z v{.҇Aj"E,ŨC ōݩ:dA7&,Wݰ J$FH+ w=xϒ \^KnVxeV0H\$10E߼U5zN,% ݬˬaHb`D$y1쐻K4lqS{-Y=Y 5r"I~bTJzױ{% ݬˬaHb`D$y1l{]þ{,YаM9 6{.҇Aj"E,Ũs˾g{G% Z>ejj`7+2+}F.Q"o^*TW`3vR6,Y?{.҇Aj"E,Ũ v"뷛YIU4% `_fTxeV0H\$10E߼Uv`~{Goo},}S=Y 5r"I~bT!37ݜOs2JdA7Rs> R#I (b7/F=_?_4ݗ`76k†]xeV0H\$10E߼Ul^v5!Of,YаMf\fE#X$͋Qel}p vAk) J$FH*Z'~d7LYAkk؅\fE#X$͋Qe,%s)% ɽݬˬaHb`D$y1숷JdA7Rs> R#I (b7/FYo?sxCW>K4hq{mނ}e ˬaHb`D$y1KD}7Cv|,h&Z v{.҇Aj"E,ŨVszgɂ-nr`7+2+}F.Q"o^*K5hq{m}U ˬaHb`D$y1쨂{Ak) J$FHʚGXYQ]t:kX>sghr`7+2+}F.Q"o^*kť]|,}V@ 's> R#I (b7/F =-^!Rw/R H{.҇Aj"E,Ũ\%_v}>K4`Of\fE#X$͋Qe/#WY?Lo>dAC7צ.WӰ J$FHʎ|,hZ v{.҇Aj"E,ŨPC7Rs> R#I (b7/F=[Omgɂ,nz]as> R#I (b7/F=_ϒ Y^KnVxeV0H\$10E߼UjZ v{.҇Aj"E,ŨV_jS>;90dq{m} ˬaHb`D$y1Uwh̬dAC7Rs> R#I (b7/F`齖ݬˬaHb`D$y1,;ԐMf\fE#X$͋Qe͂<+ {!k+h؅\fE#X$͋QeHsf\fE#X$͋Qe)ءל^KnVxeV0H\$10E߼U=LH]\fE#X$͋Qe)ء(ÄUxeV0H\$10E߼U=LH]\fE#X$͋Qe 폿C<<dAk]xeV0H\$10E߼UvH;o߷ sO{qz-Y=Y 5r"I~bTF_OfKe|,hz-Y=Y 5r"I~bT]~Fo&% 8^[`G7{.҇Aj"E,Ũg _&dw>wNu9Z v{.҇Aj"E,Ũg e7M{չҧ,YЀ9Z v{.҇Aj"E,Ũg ׯ R#I (b7/F=W[u^s֋},hz-Y=Y 5r"I~bT!{ݥw`v~qDm؅\fE#X$͋Qe%/ΣRs> R#I (b7/F\WcoZ5`u~qݬˬaHb`D$y1?rݤl +_kkІ]xeV0H\$10E߼Um{j?w}, &{.҇Aj"E,Ũh;+D^~o43AL$k\fE#X$͋Qe5"Wob+DE vLV J$FHʞ-닯W/`/ofsk,YyZ v{.҇Aj"E,Ũ aoJb^ Rx/_G`7+2+}F.Q"o^*;`߹7_\梐.. % :8^[`6{.҇Aj"E,ŨC v|˯W|, R#I (b7/F`:8^[`5{.҇Aj"E,ŨPk) J$FHRC]KSv=Y 5r"I~bTفOҁ>Ktvq.N~vk`~P0H\$10E߼UvH~:3% :8^[`?5w0|R0H\$10E߼Uv@zM\zmq ˬaHb`D$y1~M\zmyc҇Aj"E,Ũg zA g/pf\fE#X$͋Qe w'ny% `vԯgm؅\fE#X$͋QerS|,=ԉ~=g.2+}F.Q"o^*{`J_ugɂ(CQA_E.Q"o^*;`9<ϒ]K`fJ$FH)W݋gk)#gc҇Aj"E,ŨC`98^[`|?> R#I (b7/Ftrvq.@lH_E.Q"o^*;>دu̓,YŹZ `aHb`D$y1wr~ gɂ-ΧRG3hE#X$͋Qe;O^\xKogɂ-ΧRG3hE#X$͋Qe9~Rχ?Ѥ >Ktnq>BA_E.Q"o^*{`8a[,Yйa̜?#> R#I (b7/F=_q^S98^K̜?#> R#I (b7/F`:8^[`6왃c҇Aj"E,Ũ.9A kk?ME#X$͋Qec<% Z}zmݩsSxeV0H\$10E߼U&_si*}F.Q"o^*KEn-_;{.҇Aj"E,ŨP6aHb`D$y1Տ͛7==矸y// >Ktnq>="#> R#I (b7/F\o~oSg?#@saHb`D$;.>lזw^kX޹݋?{wlU} DeBWv hQ줍Jh7M 'ݢH[v{rԯ,Ȇyvmas~ Rc.XSt!O},}kԷQ 2Ԙ%cۗr F?EO=Ɖo?ЙÉ^0=)\,0%ݾ){671?gD~ݿ8!g?;}4G6tpM ˬcs˜b,tWkOx7oV6{ ؿum= D`|. /9`RعM`ע SeV1HXbaL1K}Y+|ρN/K?JȆyaas~ Rc.XSŒn_=؛l?hfN`}#:}8M؋bԘ%cۗ r՛Oa?NTkl4U[8v;Y 5b1X,%ܳ}%?C` 773>w$Qh0~k,W1HXbaL1K} "|`oJO=ӟ ?dWH{`Xcs˜b,tD}n}EEX~Rǟ =a`7+w.ҏAj cX_}~/_ >O 胯Pokڛ =nԘ%cwb;'>UԷ LJs{4G6tp  +~ Rc.XSŒn_07~woco|}/` '5G6tp(ح ˬcs˜b,tЇ/=|z ~~͑ > iiԘ%cۗ e[o<|dyƿ  ͑ > iiԘ%cۗ eG~>}}z3Ǧ'CȆNN[Qas~ Rc.XSŒn_ _o^xK~ȆNG`/LW)\,0%ݾ){.oߞk#0={w.ҏAj cXK7+_jؚ#"6]eV1HXbaL1K} Rd`G8Қ9}u>"g+|2+\,0%ݾ){*J_7_)_hlg*v;Y 5b1X,%HׯO~G}dr⇥+hl']qҏAj cXKq`s/{WG)#:yks `0-~W\cs˜b,tlmB6W&i#:yksInQ؅\fԘ%cۗ e~{ {hlᮬg4MqҏAj cXKa`zE~ [sdC'we># ok~ Rc.XSŒn_ 3"#:y+kYnP؅\fԘ%cۗ e~$n|o ͑ {w.ҏAj cXKvW~>KsdC7u ߹J?1K,)bI/A^]D~w|Ȇ6;7t=[;Y 5b1X,%H˾M~1_Q͑ V1HXbaL1K} R[=6ۺ5G6D`sQ`+ ˬcs˜b,t4?5g~>͑ V1HXbaL1K} RvT IDATOrן>*;il(>@`/7Z 5b1X,%H?*Ohl(>v;Y 5b1X,%H}OO}joܧ9pt`0+\,0%ݾ);7we4#A' sҏAj cXKҏ'Ȇ-N) ˬcs˜b,t偽=D7!Px :^0 ~ Rc.XSŒn_0]~} [sdC'{`-W1HXbaL1K} R3|i_|{C:9pt.|2+\,0%ݾ){>𩧿x}~9{pt `0hcs˜b,t4koh}D4G6nqN!  ~ Rc.XSŒn_0۸!< ߹J?1K,)bI/A^؟~̯;M~`Ȇ-N)\ҏAj cXKR%sȆ6bqN!g+|2+\,0%ݾ)Gw9!{.|2+\,0%ݾ)0_Hil^ҏAj cXK?"_wkChl^ҏAj cXK%ou_?Px :e}]as~ Rc.XSŒn_% 8A s8(\,0%ݾ)(߻! 8A3*w.ҏAj cXKa` omP_q ${`F#Ԙ%cۗ e`wsgIOsdC${`F#Ԙ%cۗ e鑦{-.I+ k ˬcs˜b,t`_4il(:D`/hcs˜b,t쩿/9~1=EsdC褵]eV1HXbaL1K} RvwiNsdC${`ϢҏAj cXKvW${`ϢҏAj cXKg{Ahr#@'6v;Y 5b1X,%H=Ȇ-.I,E;Ԙ%cۗ e 6߁e${w.ҏAj cXKQ`wOaʷI  Ņ]eV1HXbaL1K} R6 짿QsdC<~ Rc.XSŒn_%"K ˬcs˜b,t,Ut:^0+VJ?1K,)bI/Aʆ_aʧ -.I\1UR1HXbaL1K} R"Up:ḿ ߹J?1K,)bI/A][jhcs˜b,t,Up:mՁ ߹J?1K,)bI/A][nhcs˜b,t,Up:^0&J?1K,)bI/AʞI'8@;v;Y 5b1X,%HS$4{`,Z(\,0%ݾ)K`wnqN# ɢҏAj cXKvW贕.|2+\,0%ݾ){'9Or\"8F`/̵EԘ%cۗ e."]m?ˋt{Aas~ Rc.XSŒn_%"!׭cs˜b,t,=nԘ%cۗ e y.|2+\,0%ݾ)K`wnqN# Bcs˜b,t쩟aяt'9.1} gv;Y 5b1X,%HS{o^bp3@`5~ Rc.XSŒn_%> v Ԙ%cۗ e 쮦?.|2+\,0%ݾ){'9nXc#IKLnqlҏAj cXK|?6]C 5b1X,%HY-.|2+\,0%ݾ)K`w5} ? K(\,0%ݾ)K`w5} 7fF4a(~ Rc.XSŒn_%> 3#0J?1K,)bI/A^//6 4G64} 7fF4a(~ Rc.XSŒn_,w/34G6.5g0\fԘ%cۗ e/ _}|W5G6D`C`[ 5b1X,%H ׎~-mH`sU`)w.ҏAj cXK}wOȆ&{q~F{3SeV1HXbaL1K} Rl`K}n?0+͑ Mnq~F{2dJ?1K,)bI/Aʞ 9mr4͑ Mnq~Fs#2TJ?1K,)bI/Aʞ 췏?g s4G64y!pP)\,0%ݾ){._/Sؚ#< F8e~ Rc.XSŒn_=؛V}d?hlhp3B`?1!R1HXbaL1K} R>j_Tg~4c01Ccs˜b,t%!e&8?##34J?1K,)bI/A^?Hӧ9-H ҏAj cXK}Øn)͑ Mnq=F'F:gh~ Rc.XSŒn_=>|Sͭ9-h ҏAj cXK{7k&4$J?1K,)bI/Aʆ?+'_~/v #ᮍc d޸N 5b1X,%Hٳ697߼?_7?򧢒hlhp18R1HXbaL1K} Rl` MOoWOu8u}` W)\,0%ݾ){67]?lJ~(˂ilhZ9}` ߹J?1K,)bI/Aʞ _{gE_D`_ݎcgSeV1HXbaL1K} R\`o\7[Kؚ#"!׭cs˜b,t%}՛e?iEsdC<>S؅\fԘ%cۗ e/׭ᅱ3"#8 ̱\ 5b1X,%Hy_lCsdC!{¹49+\,0%ݾ){_r|ӷ!8S бX 5b1X,%HًM}V?z{)=` ~ Rc.XSŒn_=؛O^?Z##8S бX 5b1X,%H~T߽oY#8ؓNԱT 5b1X,%Hٳ雛Cدo<}l͑ MNҎIpXcs˜b,td퇯_|lDsdC>{;*\,0%ݾ){>?${ρ u4vr0-ǎJ?1K,)bI/A^ط7~O_׽Ӵ{;*\,0%ݾ){I`޾q77u|Ȇ&i=v`˔~ Rc.XSŒn_,wlChlptC`N H 5b1X,%H݃Ȇ6_Q:!` ߹J?1K,)bI/A];.w.ҏAj cXKvW<~ Rc.XSŒn_4_/~GE{u`]eV1HXbaL1K} RͫHx|8Y: C`Z ~ Rc.XSŒn_`fgZ[sdCLJ;@ 5b1X,%H n=IG4G6t|8Y: c`~ Rc.XSŒn_=ؿW7?}װ5G6t|8]:>#c`~ Rc.XSŒn_=/w~|چ>IsdCLJӥ3;1_ 5b1X,%Hٳ6w`MWq]B9}B4lԘ%cۗ e~9#:>0'Di?|V1HXbaL1K} R\`o\_|5G6t|8a:>!O acs˜b,t| HDsdCLJS#`0J?1K,)bI/A^G-=J#:>2'M1S 5b1X,%HK>"B`NZ쓦ecs˜b,tE3؛4>}#::اM,Ԙ%cۗ e/6}1=R#|ý_Жs~ Rc.XSŒn_=>|Sͭ9!{{J?1K,)bI/Aʞ7W;ߔҗ"i]eV1HXbaL1K} R6 _я]~K?~ᠷ[=nԘ%cۗ e່,qt8q9#ϙL+J?1K,)bI/A =GS}`z]P1HXbaL1K} Rpr":L;˕~ Rc.XSŒn_%::7쳎rԘ%cۗ e4G6tt8y9ngJ?1K,)bI/A]N^yGw pҏAj cXKvWGӗ%bԘ%cۗ e 쮎/-J?1K,)bI/A]A99L[K~ Rc.XSŒn_877;WE4G6tx}tL 5b1X,%HW_}ol͑ E8ؗ9L{˔~ Rc.XSŒn_$_~{`槊>AsdCke2Ԙ%cۗ e/_;)3)3yE8D;Y 5b1X,%Hן߼o~?|װ5G6D`C`[ 5b1X,%H& _Ǣ~6YAF4G6D`4w ˬcs˜b,tc_mD4G6D`C`[ 5b1X,%Hٳ_n4͑ M8L變 ~ Rc.XSŒn_=o'/5)͑ M7L變 ~ Rc.XSŒn_=؛x/a7k#:<\n$/3w~ Rc.XSŒn_=؛`}z4G6tx6H`_jg0+\,0%ݾ)K`wux6H`_jg0+\,0%ݾ)K`wux6H`_y0/*\,0%ݾ){gcԌȆצ =eYԘ%cۗ e."]Q5ؗ{̈3J?1K,)bI/A^}=Kb#:8\h$/4g~ Rc.XSŒn_=ψ;ZB]<8\h$gx̐J?1K,)bI/Aʞ W| 7Ϳ].Q33<f}iԘ%cۗ ez禟.|ר 3>ҏAj cXK7oOy;Q3sl'QeV1HXbaL1K} ROd79!{{J?1K,)bI/A^ط/𩧼6[#".|2+\,0%ݾ){Y`o_=nԘ%cۗ e7'nȆר y~gcs˜b,tE?hoilpJ?1K,)bI/A^?+ilpJ?1K,)bI/Aʞ ~oR#:8\`$g3U1HXbaL1K} RT|)͑ Q/3*\,0%ݾ){._~` Q/sy*\,0%ݾ){_rd|OsdCk\J?1K,)bI/Aʞ6}':͑ U.سs~ Rc.XSŒn_lP+v˯jklhpjˬcs˜b,t^fBӿ9õE{w.ҏAj cXKsSyM`/VH`WeV1HXbaL1K} Rjpbˬcs˜b,t%i{?E{qҏAj cXK%4G6D`C`[ 5b1X,%HY+{Ma*\,0%ݾ)K`wE`C`[ 5b1X,%HYõJE{qҏAj cXKvWkU"Ԙ%cۗ e7|6NZ"ȸk@ 5b1X,%Hٓ?~|WilhpJ^fܽҏAj cXK'w~+Z"̸{iԘ%cۗ e~YmC95 E{7SJ?1K,)bI/AƁCX?|Yѧhlhp:^hҏAj cXKa`sϟ؟W5G6wfH`/5j`B 5b1X,%H0X˷7L IDATe" Ԙ%cۗ e$'>95D{WJ?1K,)bI/AF+;~o[e"cԘ%cۗ eOgB!l͑ Y%؋ 8V1HXbaL1K} R6 gB>s[#|?4D{GJ?1K,)bI/A^؇-͑ V1HXbaL1K} R`SNJ?1K,)bI/A]V1HXbaL1K} RjpہCԘ%cۗ e ׬ k 8P1HXbaL1K} RjpԘ%cۗ e v. 쫌 S1HXbaL1K} Rjp =Ԙ%cۗ e v׮ 댼!U1HXbaL1K} Rjp]Ԙ%cۗ eOIMk[sdCkuF*\,0%ݾ)K`w{vH`_iҏAj cXKvWkׇF^(\,0%ݾ)K`w{v}H`_kҏAj cXKQ`wNOUCsdCkF<+\,0%ݾ)X#|!}OJ?1K,)bI/A]V1HXbaL1K} R^ҏAj cXKvW<¶P1HXbaL1K} Rjp[GԘ%cۗ e[G/^W>ozKsdCkkԘ%cۗ e?oy㍿߾x߅Ȇvװ lJ?1K,)bI/AѿFȆvװ l[Ԙ%cۗ e6_G߿?/vSWGۯs=͑ a C/ J?1K,)bI/Aݻ?mROGſ~|e"hlhp ӐzSQ1HXbaL1K} zX؛~G}oO:Ȇvװ l7Ԙ%cۗ Ձ|/_O4G6seH`k *XbaL1K} X~,ݣψ"[!5^^\,0%ݾ+h͑ aC ~A/b.XSŒn_ᄇa͑ a"C _ cXKP[i/a"Hi\`}5͝l؅W8wm l[eeI p<6B` - Dd-6 z[P1HXbaL1K} Rvg']|>\*$uޗ+\,0%ݾ);$7T_׽χkvԘ%cۗ eG|>uZF!-4¬\ 5b1X,%H?96~Oy9õlB[hYҏAj cXK}w[{𝮷ᮯO.#z>\&$^+\,0%ݾ)/Zo>/rIM_O|Y{Ȇײ l7fJ?1K,)bI/Aʪ{?|ȟwp-V{c֭cs˜b,t:7 y꣟~m ]|>\$$^U+\,0%ݾ)+MV՟~χχkY+jԘ%cۗ e偽"lȶ@Pۚ#z>\"$ޙ5+\,0%ݾ9os_=c`o@:2 lwfJ?1K,)bI/A 7ۏgwG_}|BAH`4+V1HXbaL1K} bE`_OsdC7webc/͊~ Rc.XSŒn_%"1 l {ҏAj cXKvW<~ Rc.XSŒn_%z 9H` 7kU1HXbaL1K} RpMk|o֪cs˜b,t, 7T 5b1X,%HY5A[oYҏAj cXKvWOkvo:~ Rc.XSŒn_%z:\$|s֩cs˜b,t,ᚶ ૳JԘ%cۗ e 쮞4 _U*\,0%ݾ)K`wt)H`71Q 5b1X,%HY5-AwgJ?1K,)bI/A]=i m <+T1HXbaL1K} RpMCncYҏAj cXKvWOkv#o~ Rc.XSŒn_%H`72O 5b1X,%HY+{;+\,0%ݾ)K`wm3neYҏAj cXKvWJ?1K,)bI/A]=q͍H+Q1HXbaL1K} RpnoMZҏAj cXKvWkv{o:~ Rc.XSŒn_%z8\#;}Vcs˜b,t,諴 Ԙ%cۗ e ׸Fߥ5(\,0%ݾ)K`wpG`0.A 5b1X,%HY5N?їiJ?1K,)bI/A]=q]L+P1HXbaL1K} Rpˏcmcs˜b,t,T_ 5b1X,%HYõ?שҏAj cXKvW7wV1HXbaL1K} R<)J?1K,)bI/A]V1HXbaL1K} R^ҏAj cXKvWk}v7/TqԘ%cۗ e ׺~FߨJ?1K,)bI/A]uQ~ Rc.XSŒn_%?\#;}J+\,0%ݾ)K`wuG`w4JV1HXbaL1K} Rpics˜b,t,Z';UY 5b1X,%HYõN>їҏAj cXKvWk]|vW/UaԘ%cۗ e 쮶k^|v_oU]Ԙ%cۗ e 쮶k|v_oU]Ԙ%cۗ e 쮶k|vgUYԘ%cۗ e nؗ#׭cs˜b,t,=O)\,0%ݾ)K`wE`C`[ 5b1X,%HY+{,MaQ1HXbaL1K} Rj{G`w7bU1HXbaL1K} Rj{G`7fT1HXbaL1K} Rj{G`7fT1HXbaL1K} Rj{G`0jT1HXbaL1K} Rj{G`0nUT1HXbaL1K} Rj{G`0nUT1HXbaL1K} Rj{G`1rT1HXbaL1K} Rj{G`1rT1HXbaL1K} RjsG`1vS1HXbaL1K} RjsG`1vS1HXbaL1K} RjsG`2zS1HXbaL1K} RjsG`2zS1HXbaL1K} RN#G})\,0%ݾ)K`wE`ϓ-)lҏAj cXKvW<~ Rc.XSŒn_%"IT 5b1X,%HYG=VK 5b1X,%HYG=+VJ 5b1X,%HY7=+VJ 5b1X,%HY7=;VI 5b1X,%HY'=;VI 5b1X,%HY'=KVH 5b1X,%HY=[VG 5b1X,%HY}M`6Q1HXbaL1K} R%g`S*Ԙ%cۗ e n Yu+\,0%ݾ)K`wuK`ϒ4)lҏAj cXKvW,~ Rc.XSŒn_%>F_"J?1K,)bI/A]V1HXbaL1K} R'm`SԘ%cۗ e y6P1HXbaL1K} Rꖿ8 nԘ%cۗ e n Y6-P1HXbaL1K} R^ҏAj cXKvW,¾^ 5b1X,%HY[{{J?1K,)bI/A]سl jԘ%cۗ e 쮺5mbm˯cs˜b,t,=OU1HXbaL1K} R'y`SW*\,0%ݾ)K`wE`C`[ 5b1X,%HY+{Ma_cs˜b,t,=nԘ%cۗ e n:[ 5b1X,%HY+{Ma_cs˜b,t,=nԘ%cۗ e y 6}ҏAj cXKvW%yu+\,0%ݾ)K`wE`S!)J?1K,)bI/A]l {ҏAj cXKvW<~ Rc.XSŒn_%"RԘ%cۗ e 쮺5fKcs˜b,t,=O*\,0%ݾ)K`wE`C`[ 5b1X,%HY+{*Ma/S1HXbaL1K} RL`S؋~ Rc.XSŒn_%"!׭cs˜b,t,=O(\,0%ݾ)K`wus}؆F_J?1K,)bI/AzvY<wB`wE`C`|<[E{C<W1HXbaL1K} R8Og+\,0%ݾ)K`wE`C`[ 5b1X,%HY+{ZMaU1HXbaL1K} R^ҏAj cXKvW}:v5eS1HXbaL1K} ROخF߿lJ?1K,)bI/A]8 L 5b1X,%HY>G`})\,0%ݾ)K`wէl_o`.Ԙ%cۗ e DmlLcs˜b,t,U#~ Rc.XSŒn_%pw0ҏAj cXKvW7w'mm%Lcs˜b,t,=nԘ%cۗ e yJ6}ҏAj cXKvW<~ Rc.XSŒn_%p0ҏAj cXKvW}v7fQ1HXbaL1K} ROF,J?1K,)bI/A=u7蛘D 7b1X,%HYNF`}s\,0%ݾ)K`)lbܘ%cۗ e :軘B 7b1X,%HYNF`'0.P1ȍXbaL1K} RST~ rc.XSŒn_%{nv ocܘ%cۗ e :ۘ@ 7b1X,%HYNF`0:\,0%ݾ)K`͝F`'1>ګ\,0%ݾ)K`D`D`[ 7b1X,%HY'{MaQ1ȍXbaL1K} R^ʏAn cXKvONc4W1ȍXbaL1K} RSy*?1K,)bI/A6;w[ 9b1X,%HY^F`'2RZ+$\,0%ݾ)K`wԫLFJg䘋%cۗ e z%s˜b,t,Qd#S}-~ c.XSŒn_%;Ulv*Ar cXKvGeU1HXbaL1K} RWɌ ?1K,)bI/A5;V 9b1X,%HY^F`g3f*$\,0%ݾ)K`w7wics˜b,t,=n䘋%cۗ e VAr cXKvG\ l {J 9b1X,%HY^F`'4rZ*$\,0%ݾ)K`ӭF_OGu옋%cۗ e ~ics˜b,t,OR#S}? } c.XSŒn_%VjvN/Av cXKv?BNj S1ȎXbaL1K} R[Iv>1K,)bI/At4;WM ;b1X,%HYnF`5d\,0%ݾ)K`ӭFQ3u옋%cۗ e ~UKcs˜b,t,OH#}K} c.XSŒn_%;Nl-R1ȎXbaL1K} R{6cs˜b,t,\k l Y ;b1X,%HYud\,0%ݾ)K`wӯFTe%cۗ e njcs˜b,t,M>#}Um} c.XSŒn_%gvv懲Az cXKv7&R IDATn]uQ1HXbaL1K} R_鍾&>1K,)bI/A3;ѷC =b1X,%HY~qF`0Z(\,0%ݾ)K`wӯF_W e%cۗ e n]cs˜b,t,M4#K}a } c.XSŒn_%Wfv /Az cXKv/fӾF_>1K,)bI/Aؽlg݁MaW} c.XSŒn_%{={]1ȏXbaL1K} ReL<U} c.XSŒn_%{evo`U%cۗ e ^:V]k;V ?b1X,%HYQF`1ڎU1ȏXbaL1K} Rc؅CU} c.XSŒn_%{dv%/HU%cۗ e ^:&];P ?b1X,%HYEF`2S1ȏXbaL1K} Rgص} `.XSŒn_%;dv1/0E%cۗ e Nz];JǠb1X,%HY9F`3R1(XbaL1K} Rg匾} `.XSŒn_%;亂;kgcPs˜b,t,}&OF_!>0K,)bI/A؝W=l cPs˜b,t,}$F>0K,)bI/A؝L1WxA cXKv'=KicPs˜b,t,I#}+\,0%ݾ)K`ѵF_j>0K,)bI/A}t0׸A cXKv]3.k=cPs˜b,t,G #}T\,0%ݾ)K`5Fj>0K,)bI/A}tm07A cXKv]lUcPs˜b,t,G#K};T\,0%ݾ)K`ѵF_~j>0K,)bI/A}<͝F`62S1XbaL1K} R!yIos75 %cۗ e .ˈ6>R1(XbaL1K} R1y IǠb1X,%HYE`7BwR1(XbaL1K} Ro}}| J`.XSŒn_%_ ]| J`.XSŒn_%^ ]| J`.XSŒn_%^=| J`.XSŒn_%^*| J`.XSŒn_%[^*| J`.XSŒn_%[^:| J`.XSŒn_%^J͕| J`.XSŒn_%]J͕| J`.XSŒn_%{]ZحU| j`.XSŒn_%{]jٍU| j`.XSŒn_%{xꡛ;^WA  cXKvO9D`C`5n7U1XbaL1K} RᩆyF_*>50K,)bI/A=<=}RǠb1X,%HYE`PǠb1X,%HYE`NǠb1X,%HYE`LǠb1X,%Hŋ՟|Ko5Gѹ}[\,0%ݾ) v}\,0%ݾ)_<wbMݻ})\,0%ݾ)+߼o?[aD]_3Q1(XbaL1K} RVޥ?ni2_3$v3 M| `.XSŒn_U&?dE!!h-ocPs˜b,t:S݋q_/_I`7D`k@Ǡb1X,%HYu`w?#hlwi4| `.XSŒn_2_|3}z6zR/\Ǡb1X,%HYq`o@&2w .͑m<͝E`蛮V1(XbaL1K} RVؿy)Oi/5s3yFu"ww_!7mev^ї y١عE`T1XbaL1K} R3;4Gv=U}߅=U0K,)bI/Au/,{F_xzA cXK9E`/S1XbaL1K} ROr|$vKڍ**%cۗ eŁ}2-k7ʫ{ `.XSŒn_U7zHǠ b1X,%HYu`ψUtw l*%cۗ eՁXCUoZ;]1XbaL1K} RV؛~?o_&"Q=U0K,)bI/A{O>,"Nn% cPs˜b,t>o~~e݁<7wڗl4\,0%ݾ) o?}) G<5F{ `.XSŒn_mnW}V1(XbaL1K} Rnnxy댾*\,0%ݾ)K`7?l<}T1(XbaL1K} RnUx4_cPs˜b,t,\"dJǠ b1X,%HYME`rA cXKvsƎ prA cXKvsƮ\Ǡ b1X,%HYAE`c XcPs˜b,t,\"g ,V1(XbaL1K} Rnm@O7z \,0%ݾ)K`6 l U{ `.XSŒn_%[PS6^e=u0K,)bI/Aح )GF":%cۗ e *N"ql",Q1XbaL1K} Rnm/rylћ@Ǡb1X,%HY!!F|:%cۗ e  U{ `.XSŒn_%[Q6^=u0K,)bI/Aح (F/\:%cۗ e dmcPs˜b,t,؈"{ a.XSŒn_%Q6BbbA! cXKvc#FlFQ1(XbaL1K} RnlDB8aJP1(XbaL1K} RnlDA8eN\cPs˜b,t,؈"q襸XǠb1X,%HYD`[qbA! cXKvc#BB%cۗ e FsFe=0K,)bI/Amd͝D`ћqZA% cXKv[UC`C`3z5.Q1XbaL1K} Rn jyFj=0K,)bI/Am4 =8cP s˜b,t,֐p"qqVǠb1X,%HY!D`2ZA% cXKv[CƅFJ%cۗ e 춆TK^j=0K,)bI/Am &!'z *a.XSŒn_%L6.7zGN)\,0%ݾ)K`75&l1zKbR%cۗ e YFIcP s˜b,t,Ԙ^"1=z Ja.XSŒn_%K6f(RA) cXKvScj\7eZǠb1X,%HY1D`cѫ2cP s˜b,t,aiJ6+SJ=0K,)bI/AMF =eP1(XbaL1K} RnayF˱JA- cXKvKG C`C`w5z_Tz ja.XSŒn_%[I60*=0K,)bI/A- $ ޘZ%cۗ e UF̾JA- cXKvK"bwfOǠb1X,%HYAD`cKcP s˜b,t,ҠD"q[cP s˜b,t,ҠB"qkcP s˜b,t,Ш@"qыcP s˜b,t,Ш>"qѫcP s˜b,t,Ш:"qѻscP s˜b,t,Ш8"q˳U1(XbaL1K} RnhTظ(\,0%ݾ)K`7t-7wڧ zs˜b,t,q#zs˜b,t,qC^ zs˜b,t,qcޠBA1 cXKv;òT\,0%ݾ)K`3l ]:A5 cXKv;ÚT\,0%ݾ)K`3,l \:A5 cXKv;ÊҸT\,0%ݾ)K`3,lhZ:A5 cXKv;rؠT\,0%ݾ)K`3lY:A5 cXKv;bܐT\,0%ݾ)K`73l40`<0K,)bI/A͌+!-ߡ2A9 cXKv3BFwcPs˜b,t,TiA6Cer%cۗ e fvw<0K,)bI/AL =my a.XSŒn_%{O*\,0%ݾ)K`73.l4q<0K,)bI/A͌+ Muۡ2A9 cXKv+F[cPs˜b,t,!X%\,0%ݾ)K`2~lg<0K,)bI/Aح uY*A= cXKv+ۇF{=cPs˜b,t,!A%\,0%ݾ)K`2|l~<0K,)bI/Aح }^*A= cXKv+F'cPs˜b,t,!M-*\,0%ݾ)K`72]#7w7~ZnQǠ b1X,%HY!!3hEE%cۗ e FSNy b.XSŒn_%.{;V[T1(XbaL1K} Rnddy b.XSŒn_%;6kEE%cۗ e FFlQǠ b1X,%HYC`cy b.XSŒn_%:6PQǠ"b1X,%HYC`cx *b.XSŒn_%Z96FѮQǠ"b1X,%HYC`cx *b.XSŒn_%86QǠ"b1X,%HYC`c(x *b.XSŒn_%76RQǠ"b1X,%HYyC`c4x *b.XSŒn_%N!1dj<1K,)bI/AMDA`C`أAI cXKvQtؙ]G%%cۗ e & !SzJ<%1K,)bI/AM&ܣAI cXKvccPs˜b,t,ب!=*\,0%ݾ)K`716ilbJ<%1K,)bI/A- .V/RǠ&b1X,%HY=C`E\,0%ݾ)K`08glYH%cۗ e  ;cPs˜b,t,!ah"Ux jb.XSŒn_%[\26_ AM cXKvCٛT1(XbaL1K} Rn`t05s <E1K,)bI/A }qs}ذ5k <E1K,)bI/A uA`C`3g <E1K,)bI/Azvrq\OZ#ӂiЖg`kh?|!aM*\,0%ݾ)K`덎.ZAU cXKt!1XbaL1K} R.6R8J%cۗ e lB`#s1XbaL1K} R]-68J%cۗ e lB`#1XbaL1K} R],6R9K%cۗ e lѽB`#p?e1K,)bI/ArkF:. b.XSŒn_%F &w)cPs˜b,t,-7Tl4K%cۗ e lS qs}Hh?e1K,)bI/Ar ^]J\,0%ݾ)K` {{m)cPs˜b,t,v2y[A] cXK@Fj;˔1XbaL1K} RV'y?u1K,)bI/Aj=.SǠ.b1X,%HY[mt|@`#e\,0%ݾ)K` m)cPs˜b,t,-6:K6lT@/b.XSŒn_%FG?1K,)bI/AbdF˜zX,%HY[ltlبcFcJGzX,%HY[ltlبdNXGzX,%HY[ltlبeVPGzX,%HY[ktlبfbc_GzX,%HY[L&i#6ؕ.,1K} R: <6^nҵ%ӳ6bI/A:{ l~HO%ݾ)K`댮'6poPUGzX,%HY[gtKӳ8bI/A:K 2..1K} ROl`跡tqy,XKxF`F$}KguŒn_%eF386$#]]?=c,t,-3:ѯDIYcۗ e lB>եӳ:bI/A2~su?iߊ>եӳ:bI/A*N`C`Er9Ycۗ e lT'!#tyY,XKEhiWGzX,%HY[et+ ˌ~6HO%ݾ)K`.6p/GJ9Ycۗ e lѕz/韞1K} Rl`H2)ww=gcG|w}  /yI"a1& !~\ܰ;~y3]~v۞Q3e)R~I6LHtz>RHY$w l&9R*W<|Hq%;H00Y U҇y,KK"evQD`Vr'ūn@ӳHq%;F X"a[xH7RHY#w , R9=`Y_)+cN%>Y U!݀:g,KK"evD`C܇Kj<Ple)R~I1tTe *S T9=[`Y_)+C {M1r2EnAӳHq%;-ؔ$9S T9=[`Y_)+C {Mar5nAӳHq%;DL`>mJP!݂*g ,KK"ev ;ɮC 5N&X"D 5v-W}tjM,E/rk6E'&8=`Y_)+#l؟OMqz6)n$RV`G=ذ_}nBӳ Hq%;@~} Uut*m,E/rO6$ڟ6T8=`Y_)+ l&qmpz)n$RV`=ѯؐW3iת;Ple)R~I^Y~rK;U!݈g#,KK"er6 Ѵ;ҍoz6²)n$RV`oo=:i~E`C!rN;R!݈g#,KK"eL[=>UvH RHY1V`#9Pҭnz²)n$RV`om̌lK*Jet+,E/=WPgVf6=aY_)+{X tuH7 RHYcz U}|MV!ݎʦg;,KK"erOU6#6EUt;*,E/[=WPXUlzò)n$RV`o'\^I`Crf#tH7RHY#y% } S!ݐgC,KK"eVr6/NEtK-,E/[=Wp rq -jzIJ)n$RV`o%]M`!}T!ݒgK,KK"e6r 'iC)5MϦX"D m S#R=tSjM,E/=`^#e>9Rle)R~I(ؐnKEӳ-Hq%{ G`YξjT4=bY_)+0 8W!ݘzgc,KK"etiVnL=ӳ1Hq%{ܓ4I`Cv~rH7RHY=Y&ήNJT3=[cY_)+'=:6,nM5ӳ5Hq%{r Duͩez6Dz)n$RV`O{J!<8Sle)R~I(|\G`M?8Sle)R~I&d\K`#>k8Tle)R~I&@\K` <k8Tle)R~I$\O`[Z{VpH"RHY=Eѷ¬8?Tle)R~I"@`G`tjM,E/LPGg]vCML6Y"D K{ }0#KeY_)+b q6'ݬg,KK"eh =>[Sle)R~I+@`}ҧg,KK"eX(^>=eY_)+G=@r+|z˲)n$RV`{ "CЯHӳaHq%ӻ/fq˝oyra6prR==[fY_)7'<-=V@crO<=fY_)'~\̷-L`{Wle)R~Ilx`u6}ݳ_<ۑ/_ lrO(vzβ)n$R6<Ϻë~nbP` 4NO,KK"e;?Nn^o9R~ +QR]׫wfWG|)[<I`dIUvK"@6|)[O`faUe)R~Iqp`/_>͞?+1rL !R(~Z{7oykG`Y{pj3Q՝_&~?Yѹ%m=!aG`\6Zt൛~ޥ #00ޫ #00\݁u/> g6 Wa`O?~lN` g6 W[`=`9`8Fa`EA[Ik^>|?~l~t<=7C^>x?~l_nyu/0 U_Yr2C^>x?~l.oC 쮛oq;PoxݝW.ry`\7/+^~tz~~g\/͗`"Y l$ @ l$?pd6{.?8=oOof0փ6 |ug)IޗOs1P͎{_ ~`sf_ =9;޽3[ aR t;vRz(g}kn<\l૟{OϖԖ Ww6lvU3|*H:*Wg/NOݢ^%l;6l0 X←rss} [`ǽkerl횿ذI*+g|{dq{m&㸟 ǚ6_Y`# ur?y- X4(lD,ywuⴸݛ=T`fWCţan=5ދOx6l֥Awqn1o -?,B`(u6 0g>@Rey9A C|E_?aD`?  c\~N{|'(B9 XlZ7l`_Ww+(o({l6 qp@Vw{?6 an ?3~okVv;m F*a0 nZ`GwMaqIvov$?nŌtE9XC`AvvpK֋'$7 l-"0ص )? l{`?DHYzs;/{O:8` lب_-Z`Kշ߸a( ?%ߜ_?x!vwޗ7h6ހyؐ?Z{oOZ~30׿J~ iӏ>w_>[pPhfo[[0ۗ_%>*֓ͫ{|s0ݧow?YG ]<?~{ o` 6H`@ 6H`@ 6H`@ 6H`@ 6H`@ 6H`@ 6H`@ 6ܨsWIENDB`bayestestR/man/figures/unnamed-chunk-12-1.png0000644000176200001440000010346215054272014020445 0ustar liggesusersPNG  IHDR `gbPLTE:f:::f:f333::::::::::f:f::ff:f:f:::MMMMMnMMMnnMnMnMff:f::f:fff:ffffffffffnMMnnMnnnnnnnnnnMMnMnnȎ:f:ffffېnMnff:ff:f۶۶ȎMȎnȫې:ېf۶f۶۶۶۶nȎȫcfȎې۶E& pHYs.#.#x?v IDATx$gyj^G׶ШW^vvFŖȋ@630rvfUkfVdfD|SnD'NdS`4'l l l l lQsGg?C@o?___NmڣK_Ɔv~fxpwytՆ7\5Mj^\~[׫_[ݪDة^];y%:ChOKO/ WW㏮/n"$ؿԿ^m=-h)[7jum#rm*W<ִ޾zzoll`b 7w,nj6=D~-hM' 7)]`к6;t^"sG`_:9Dء}-h^?w=E ]U??l BہY^o{(-@Hk;WR_^` xǗw]߸#*iBځVM}q{ ]}7~ꮐW?~ qM?k~w75 @HK} ^^^_¾ f_k}O61-u{=k[% @HCzm/k}?h?%١*١*١`?z&١L_XZ)`_m\]K 0a[[lc܁v;%6j`ay-SC6VZcf lk *6:%6~jv{١#Zaf lk 0X`*%605;T`\1V_Kl!jv4b_+lP ܨyVC6΍.5w`i3ح,-6I_+ljLO'k SXZL`*k Uئf l`&k M `*e6F5;T`6W_+lMjvlVP ,؜}n١Xy[aTC6\3f l`fk pC` * ບ;SP܁,-,S6U5w`i,R65;T`Kk pf l`}.P ,O6s5;T`˓l pf l`qy+ pf l`iq>m١XtZK&P ,K:/@ jv%՗G5;T`+҇5;T`Kk f l0 N҇ ,-,G:oI;S }HjLRcz1K `)1QdP ,D:H*HUC6 *}`jv![ @R"3z١XtD>6A5;T`K f l` }١/wJ *|H١(/@H` K Qs` K0Qs` K@Qs` jK`Ps@ilCPP =@@@ehK`̯f lt2)}fWC6PW:>^١+{K0P jvJ҇ `f5;T`Uc 0*ҩ|aU@QR>P̪f lt(,}TC6ҙ|SXZ)()GH:܁ґ|QXZ)(GI<܁PP:>z١((GJ>P ԓcljvI`.5;T`x#0*ry١(&]#IFYP ԒѤ$jvjIwh`5;T`)(%#JJ܁JQ<AXZ)$ţJL܁BI<\XZxd 0*:A<Z@]Lf ltO }H&VC6PF'>١"ÓHTiP Ni*jvHD҇`R5;T`5Cx2 0*<R@ PLf ltO(}h&TC6 <NXZ) J\܁ <LXZ)_:>K \Lf l\Lf l{ALf l{ALf lwCLf lwE Lf lwE Lf lsI0Lf lsI0Lf loK8Lf loM@Lf lkOHLf l0]Kg҇`5w`igSXL,-,Jl܁w^ 0;:Nޙ7jv޹7jv٥8jv8jv݀!Y@ұ>#١U:v#`\5;T`JnFf lS IvQP ))0*>C7&}TC6Хt椏#K 0=JGnT,-(ݸQ0;:NܰM@҅>١O:p'`,5;T`Im\f l;m@f l;m@f l7mB$f l7mB$f l3mC,f l3mD4f l3mD4f l/mFDf l0}Iwm3'` 5w`i+mGL,-t% I ܁%s0;zږjv:nڶjv:Nڶjv.ƤOjv.֤jv֤jvjvjvO jvz jvz&O qjvzn&O qjvSЉt6*}ZRs` :.FO QjLA!۪y8JXZ)C:d>1ǨK mVf l mXf l mXf lmZf lmZf lm\f lm\f l}m^f l}~m^f ly|m_ f ly|@f lyz@f l0Kk' 05w`iuvB$,-4.H&܁ƥ˵p;.NOAjvږnO!jvږnO!jv֎Ojv֎OjvZ֞jvZ֮OjvZn֮OjvN־jvNΤOjvڕޤjvڕޤjvSЮtv'}Us` jLAҹڟWXZ)hUV{>g{K UXQf lQVRf lQVSf lQTSf lQTTf lMPUf lMPUf lMPVf lILWf lILXf lIJXf lEHYf l0-JGj'`;SТtv-}K 0 J'jg`;SРtv.}K AB\ VC6Оtv/}١hOO>CP 4'O!@5;T`IiS0P@suZ@ TC6Кt>P &ݦ%O"05;T`Ii0H@ceZD4 RC6ЖtV>P %eO$5;T`)hKKHH!jLASYZHT Ps` Bҧ`;SДt>܁Вt>w١hIIKILP 4$ŤO'jv.bҧN5;T` Ii1 p*v K@;=ZNܥf l'}FRC6Ќt>w١hFF JR;P ")jvZђ'`*"ݢ%O*n5w`i)}VvK 0HhQ SXZ)hC:DJWjhC:DJWjvڐѲ'`*&3إf l +}fv١hAB KZ]jvZ`*#ءf l--}r١h@:AkK]jvNg`*تf l /ե/V5;T`)Khu UXZ)Khy MXZ)Kgy MXZ)Kg}3 MXZq\)آf l .] >[P s `*t|.B$lVC6nEHdjv Q@X<"}6١KBO3F5;T`Y\ؤf l +ݝ>P DsAҧ`*tu.HTlPC6tu.HTlPs` ѹ( p[XZ)HJ7碤O6m5w`i (˒>܁N΅In[jvŹ0 pK@N:8'}n١IO8M5;T`9\ f l & >7P Ĥksҧ*tm.PPC6EJtjvRҭHp]@J5)}١IBO;55;T`)IBO;55w`i #K>܁th.V\Us` 2ҝXpUXZ\f l " >WP D+sҧ*td.Y\QC6%K{+jvґhpf l ˖>jv҉lpf l >jv̥K 5;T`Kҥ?*٥r B` fK+๚;S0t^^܁٥ ,--l5w`i-l5;T`sK%R@+jvfNKVҫLiJz١Y:-YK/*˒2X١W:,9^k5;T`J% `f l`^\z!P *<^ +5;T`Jg%KA*^ O6Te 攎JH/ U9+ҋ@`CU`NbP)Q:)&6E5 .JI/ ptPrCzAP 'ݓܐ^5;T`I$7@l9-%,^l5-%,^\1m5,^\1E,]\-E,]L)&U,]L)Fe,\` f.I6J/ `jLr _~[{wϟſH 0K#wJ/`܁|]!-X:=txn5,V5q?~:O|E,U5Wg7olU,U)"W^:}>&NGH`a3V$LjCWnN`J#ä PMvB0u,T*ƥÑaX&;-"@P,S-W kSDt62Tzdn ՛/zZ-׹60t62Tzdẓ?9 L+ ^*"5١[{u_>}>~[`)T:.VEjr^}u ҖH 0JG#å HM{' "-L+!X%jrO}?H 0SJ7#{H/`܁wӧvP:GzKTC60t2rf l`Bbd/,Pt~X*餃= <5;T`I"{J/`yjv&E^1P L&-dũ١L[zSC60t,f l`*XE,MTҭҋX*L$"jK 0I"I/`ajLDҥAX;S0t(r,-iC,5;T`Hw"J/`Yjv&D^9P L",tE١D9XzRC60t$rf l` H,IҍҋX* cW$5;T`H'"GI/`Ajv&.D^>P /'~١Lҁȑ X;S0tr,-.+娹K 0K!GK/!`1j]9Zz QC60trf l`l8dE,EmҋX*Ӑ1W5;T`#K!H/#`!jvF.CF^FBP +#١W: Gz QC60t2Bf l`T,d,,CYX+ X**%eK 0cJW!I%`jLQxk X;S0t2Z,-5;T`#J7!cJ&` jvFNBƔ^MP ()%١OWz= PC60t2zf l`< d\,@xA f l`4dlWC60t2١M:]zIP &].jvS0t 2ʫK 0cI H/*;S0t 2K 0#I SH*;FNA&^V@q5;T`#I H/+*KIP\8!D f l`d"VC60t2j١E:LziP "]L&jvQZ IDATF@&^Z@i5;T`cHG J/.*1 PY Ȅҋ f l0#H' J//;S0t2 K 0#H J//;Spt2 K xdbUC6pt1١8Z:ZzuP -L.Ājv?&^b@Y5;T`JK1*cPU ҋ f lHcUTUC6pt12١8REzEP '~#΀jvSpt1BjK 0Iw3I/4;Spt1JjK 0GIgI/5;>f^j@I5;T`GIWI/5*cPR1njҋ f lcFTTC6pt1j*١8B:SzP !|*܀jv>^o@A5;T`KJ7*åyPPҽ+f l0KsK8;Spt1꩹K 0JK/9;Spt1ʩK Pc~5SC6pt1ʩ١8PHH:*S١8P:HH:*S١8L:H;*äC١8L:I/<*ä;j١8H:HI<*3J١LAҕGLz܁C#JK 0HGATRs` n<ҋ,-C*١8@:J/?*  ١8@J/?*١_K/@*١[:K/A*뎸ʨ١/A*}!PEmGҋf l0J H/B;Stт*K 0{JMH/C;Stф2K `O鲣 eQC6tшBj١OhDz!5P 'u4"jv:^@ 5;T`{IWH/E*f"PB>QGCҋf l`馣!TPC6tѐb*١C:hJz9P !]t4%jvStєr K 0å#P@XZ).t4& ܁=Gc _XZs֤W$п*9GsK^`隣9% tf l`tўWC60T:hOzMݫ١*s4((P N9Z^@jvJ-JJw5;T`SW%л*a%G\0鐣Mu tf l`tѦ:WC6AG ܐ~>S0HhUze}~0'S0H:hUze}''^f4L 錣U mp`?WzUDZC+v&е}Ͼ9'HGJMk:.۱60D:hWzm]ڡ"ۿ[`C#'г=:/۱60@:hYzu=ۯC~ p,:ݡ?Vc l`tѴ:vH^[E60@hZzy;C߿wDcG؁?wy[`wKK/P_toWnwln~q k'_N`wKK/P_uO. y7~5&t` 7Z^@ف%hG` 7^@W<ՋM=OZ)Sh^z_}.aI 0wJK/Q[CvO_5dSW.)Ы[-"@_FҋVk9}K@zػ^}lt e tjP`ͷK`wH]H/SS5;T`wH]H/SSt'RvKH/TOny÷zu[$F' /9~u{O$F' (mt#T.lSj tiGg^rr[=@F7KҮ}rrf` vIGHUKvoo9#-쒎6:^@v/)%-n6:^@voo|K 0+Уß"2iN6^@68b+ tH`.6^@u'|?Ȩ6]:Lz١.kt&`P l5:^@jvJI/Y;; l`tѝ#/iV-"ҵFwK%kt(hl`YҭFҋ=_6;l`tѡzSC6-ҩFҫMXZ)"jt)l܁-ҥFLXZ),j)n܁Y:Tz}9CD6YTz}ڡ?O;&PGFLW eX~YkfޘE`3^W.ЗAzU5l l4^@WuU{_VK l`tѯ2C?}u]_WiB(' l\H夗4< 51FAE n=~ŗڣK5к*-FAE f l\:()P K%5и*sWC6p&b^@ӿkgoo?lO4 IwE6жz]>/;:Τ; hKW_.{I?Q60J/mi;7~#l`-]a^@ӆuGgm}o6t KWe6дAU󧟾z?Y楉?60JmiC:t}z}E`?ͫ-߆-#ҋhِx}{}e`)?tQXzq-?J`="LS}͔ҫhـx}Cꋏ/|vifR 4l󻭯՟[#-I7а}^G:- ltQ[z} ;cD܃ +`^@_rZ/Kp]CwW{ݞ 4+_T^@hfW79MKեW8ЮJӫf؀fr54kP/aͷV81AH8Ьa^}rJll l^@vya_Ҵ/^,@zڡ?۽~- ҫh=޵»S~c lXӋ%HrU>f9SN/ ʁV܁)Hː^@jKO 0bhTxiËeHsQ5;T`ҥËH/tMC;wVI?X6,\:XJ4CO+7'Tذpb)+h/~cOذxb1Kh]omxI3/7$l .#ԁ&ѡzCsm"eKGˑ^@vwG}}w럻>6,[:XbZCϯ_Ϯj6,[XbZCOy׷n>_l>l ˖n.$؁'ۯS]~cu4 N.$ځm K-\,Jz ѡHQwa lXEI/wA;v';QozȢ=bQhЎg+x/F贀%OA:Xڳ}G ߄䴀3Kto0gw`z5G '[,Lzޡzrn_I,[,Nz@1bqKhI^@s6PK:XZ#Zҭ= Ԓn-(ltjHe4F`KEJ/{1(%]Z,Rz@%bh>yV;@kҡB>Ж;Z)Hw ^@[6Դ)HgK^@[6Դ)Hg^@S6Դ)HW^@Sj M Xtd\4f lXtd`f lXtc`f lXtbdf lXtbh4f lXtah4f lXt`l4f lXt`pQC6,QX١(W,]zvP +/=@3jvJ5;T` ]W,^zf܁p W5wܴ)H(l\xyi7-o m8Ws^^Z 6\ذ8 <7COq lXtZ6pnH~~0'Ih>9&0ذ4鰂ϼ^nذ4鰂'웓1lXtW$m֡v]܎-aaYgғahnvl *8 {t_4vc lXtU(mدC~%]U\z&ݡ?Vc lXtTsYpH^[E6,k Qϥgh;ױh J dQSn*|sw_?y{\?zK`m?o[ K*&=@ްoI_]G@{}~p%l K)&=@ސ[C6=zc?ثG.g7+$Sp]z"[|[D[Z߿%?ԫIܐ nx`o+OUYז{D6,HHqC{ ?`ޣ,7}t/o{RHܔ nP`ͷL_~M! l@`ӞPiMvq7{WlXtK-ҎOwC o\6 iOz(A[[=z\%hQ*RAz,_r1~} lIV)/-&-`LA`XaG'ڵ)Hl k|_yɽ?n=;"ri)i; 6JCi>`; 6JCOyW`1͇tor.3 HC?_Cm UR/F`B+ HC/gSnח/l-a ۥHjCQmyFS N(!=@P#ϟ@`" vHd O_{G; K(%=@O={ذ)H gG`_<;df>[dKH m S')= @0|liS'CzD;?ϟٟgi  f l/Op15;T`C}x;H١K)=$@JP^:n)Rjv١K  dU@F:`!_4s/2@@ä'xU:&(=*@DP| GO  J Qs0D)HG  \;-`S&,=,@BvZ⥓ KO PC6N&.=-@W }Kӽ*H'  af^$3^4/]LM:`y6Лt0>~U٫з4ݫҁ٥{  f l(,K١ K'=1jvҹIO 0*t-3̮f l+KۧC?7~FrA`C)H+=3߿/|uuiC՝t,CmWzoMS7-`SN%_zj ہO5LюP6-`SN%_zj ہ~~gǏ?|aM Jp3ԡOn ydKD`CYRِ[׫OՋ>MD`CUPCِh Fޘ#@`CUPא}{_#@`CUN׀]k5|ҙIO03?ou6$8PztY[~ E+ `Vo@ґJ0_r}_r攎$8Xzx9 }L3n6l)HpsҡLj|vPS:p4CW1}WthW)H'!=>~ߔww/J `Fwo?j>/;~s_ni{8@cxxL  `>E(#N `>;wz(!`6E?!l IDAT5;T`CA:#GMPO:XfSC6ԓ#8ZzP "`.5;T`C94㥧QSDyF#`&;޹of.#Az E3@P:` 9f2Cx##Ezy Џ*E 6.Q ǐg9y]~0;Pt8ғc}ͳҾֳt}K+SK 8@)Hg$=J,ow^쳬~rK 8@)HW$=J,;D^ZO~ivbi"KzY 7?~|!CD6"Mz9 UJ r%7&dPK`4a00ϯU_}vl(%D04s/^^.ϧdPJ:`Dqfg`_>F雰6."Qz "yƔ'`C}~z\yHs6T"Uz }yJj6G`C%Q ސ]UucDήf}K 9JO0Aj{/+ҿzK+=Qu_x7^4L.C0H֡g׫WϵzBS![z ܁r뻰<v_{cƖ)`rCwOw/ NT'-Pu C0Lc !l~VyZ)-Pe B0TST&-t SLf l(#B0TS١HL =Vjv*%SH0}:y_~7 l"B0\ܡ߿3/|uu4 UC&,`ZC;);e2HO0az^A`C i' ְ}!?~~-tDңLjP>9)}uck. 5#-`RC:Wo]>W/4 5#-`RC:Myc4 5#&.`JC:M{|i4 5 @0pSSGFZ1JLA:`:4`^݂7 %Rb J0{˯P"-(% ]@0xxf Rz {W#0tLgcnqsvA`CI ΐ]?FsW[} JO0AWJL,=bdu躰{7, &1`2;ۏ/J֓{;??~Fпt3Lf{w\zȀlMɥ N^zʀNAz̀P K =fDjvާ >0zު>Y F;V)Hw"=h4ށ=-xOA:{`Iy~tKt@:{`&Q&1COKl\z`&Q&1C߿ꤟ8Mz؀) '^ʴnMz؀) Џ_~/f1g;f6` :uRǾք(NQz܀ зWկ5ju4 ]K(=nt͘~K.Qz܀ 7?Ն(gYߐ}{cJ?i =K*=pt77 qF_7#gށY߀] rnsf9`tjx ҵ3K0 w ұsK0`oxkV;؁٥Ч~^߶ߴ;ց٥ېxu%썿lEiNH0!~̍owll J٠]SW.o>}~KclP,?Xoٯ8 lW:t "=xu&AD`Cҝ5C?ҭl}-[΁ڡw]_zGv{6t*9=`T;.-n+Bңj=zE`Cҕ!FUC6)9>`LC:'_xwO2& }J7ĤJo.ri@_3K7hz̽(#2-`T]NAp (=~vomܤ˴Qu9#x+Й 8@` lP:o ,=ht1"p J GK 0 4"]7A`4"[|ѯ= lO:n .=XjvCf lOm .=XjvSf lN:m1F2C?gq ln e H!0vOw@ϾC6.-`tMA:l AƱ{>3}u4D`;wOt2/ws6鮁&Ǯ7^lH,^:kQFCl8:hDzQO_][NnЗt@3aG~+yR,hnhFz1'/Yz^+3|lKiaưC߾q|Q`CWI I#0'-!sG`;t/Vl`t頁}Гt@S O`i鞁89I$p4 kIhjh 5I$p4 53阁椇8t@sC K`CMLAe9%t@c I`Q钁8% J%p݁}ƒK;6zhRz0wh^;L8Jg )=q6hTz4l (]1ШhGۻgɲ݃,3B!4C X _#Ec D&V;L!McEQrk(BjsYk :>3f lC:b[ #^%jv>^%jv>^%jv.^jv. ^jvF^jvWA:P`h *H -u7cZ:\<0pv6:L Y ]'0Fi^jv^jv^jv H/cL5;T`Cwe1p*;28SЛt@ f lMKBSC6t&%PCz%١:("P IW ^YjvΤH/e,5;T`C_ %PFz1hnVAIbP_i }t@ Z$B8C?;pK].($3P ]I ^jvJI/hച*'R 8f lIGN١:%jvsI/iच*#bK8f lG:FN١n('SjvnS J/k*8f lfK J/k>vֺI eCJJ/l.vzI e;JJ/l.vzI d3J/mv:I ]!PTziGP }HGT^Q5;T`Ce7pLЇt@Y SC6t! PXzyGP ]H^5;T`C7pDЃt@iL١z(-i5;T`Cť80f l@>&P H^*! e80f P^vjvh>- - E8pDWA:=`kVCi ́ 5;T`CZ:<` 8f lHKw,BzP a쀅H/uࠚ*!,]TC6"ԁjvttb;pHؐnXb١ ^5;T`CT8`A8f lHJ,IzP IEI/x`_ؐ\ހEI-t`Z\܀e -t*A`RV:0f lذL١rұK^*!'8E١rҭ^*!&y!zJX:WCapۯt<'vׯ?GtUo an;i]u oWGo$ I7:t ?|❿8t tӺЗMj'?x CW>~w6R: ,QOz]DM ҹ\"=1`zm)n 7" S>F'9n0톫 ] En4 i]u臛Z\kEncmF0*;H lی`ZW {D>H`VA:dӺsOQ>q7?weD`ͤKXzlաwׯ?G~}/7'6L:+,M_~ /po2"Vҝ\!=8`i:]{[>14n$ Uң=F`wp#J0uZz+??xn# uҳf lt$WJX*6ҍ\)=<`YjvH'pE١n"]f lt!WKX*ҁ@P z jvtx"jv lk<i5;T`C{8'=C`Ajvq0jvgJoғ١)H(f lx#=k*yқ>0jvg̥P 8Gñ<١ >0sWC6l,-pf lxv)=q*=0ȁjv7{`^P KoC١٥SC6\-K(f lVz҃١灀jjv+y "=z*J]H(f lNzBj١⁔ZjvVAzbn0`jvkVAzn1`jv \f lbw &f lBzw# ١.܁:jv˥v ,=*bK!(f lTzcsʨ١.؁TQC6\*H"f lPz[EPDpt!=*2MDzA 5;T`e{:Љ IDAT0jvtq%P tc f ld7tK5;T`rKQC6\ 9Е&X*Jz$A5;T`қ9ЙPP K@gC WC6-I%^pNt'=`x5;T`ù9С`P gJo@ғ FWC6)]J&\p&t*=`l5;T`y{8ЩpP gIo@ VC6%J'Zp*Ho@TPU gcPAU5;T` }]eTAM5;T`U޼3* t.=`\5;T`I^zLjv;7нaP 7n`A١NHoң UC6޵!G f l8.iH+SpTzFV0* #=`H5;T`1 Gz^jvc60P Gk`$#١v e5P ӫ YcwvA 5;T`wxA5;T`*Hohf^P@0)UI-N0%QJ.M0%OJ.M0!MCJ.LpXz^0* *=`,5;T`A-Vz|PjvC40P 7h``#١Ho# RC6ޝG f lؗޜf l[^hxjv g0*aw7f@P ; -& f lؑޖң QC6lKo@ac١d01P [[2PFzjvM ($=`5;T`æ~ h0*aCz;JI4@$g f lxތbC WC6N,“zSC6KJ` ١@DxAgjvf^ l #pkPMP:O&^xþ*Ig餷=?١tUoXOzExÎ*H7.X*GZazo,]"ǭO1wf lfS'r»KVC67֧ VC67sO4D7 f ln)]gZkzo ,TP:ϵ>4X*t7m}} }١[IWz:X*t3_d}M ١H'DX*i.[ '魄Ũ١ұ| /5;T`R:#I),B4 l PC6+9 t)P^4NY?ЩCq5;T`Bi;@{ P֏"t,PXSC6Hqcǔ޳Fނf l.֏*a# QI\*]7~\ `(Ὀ2jv"CKU oHQC6JWܭN0jv<% pjv 1 MPI9 L靊qP w'7+UC6;MM o[f lJ'9-L޹tzG(#1*ٕ݀NGw2RC6ҥ~jv^:rFjvf-Y?PUxo5;T`~%n,ѳ*.}XQ١{YۏGzXGjv^tvfgu"җ*'[ `Qһ!١{I!+ AK؞ lHXصu =*JWH`@5;T`Ѭқ ›(١tkǗOXA}~ptngE8,r[u/W~v+{h0b@8!ru/ţ5 Qhz_}ņOQ<L:EkZѦ .ސidׯ?G_~ݻt{.O:Skuա秿}㗇>y=ݩtr.<5DxB]u?W~ťHG播H@xs礮:f2b.=IW%n/] r{Nd`n,`]W~<Ɠe00/s2.W/+]ꦏ'_8pڲH˗|{/ן~w/?)|˗qzY+P?zx~և^w~|}Ego>k}EozŋwO؇^00 lhH`@CА64$w}{cI/^|uSIǯsc;G.Lw_>~W~vION]cѽ0 ؛6)8&=9yݞ<b]Fi.lC|iӝ R]F7d{oן~wx;'6 ޝd߾˃\Gsu=}aC~#b.~g5š#ޟ R]Wa|lwWj]=kW)Н R]F;m.~9޿/֛X&tu{ƅ !.C{?KCm.~Z |r)Y;yݞ!eh=؆r G;G='nO_ؐbRT`&Vb^NN^/lH1v)X`SwaKab͏w= lfRR|OؿV] "]V{63v)@`s~tR;@`30c6,:WW~v} xfRwؗc] >"{gHn/a>.tw;>|ja1 ' /l]6dkt;K }8uݞ>b]7؆ 'q|Dvپ|+ fu=}o2 EOַdOY/q{Z 'ۓ@&]43y]ݯ=yf|>3)u{1vd`&7l_5_> ͯ{90}8y>z`26|Y؛V_*ĩcMd{˧|X _ïm_}鯢?̜"9u<`2#m.V/Z=_?7}'!_yMգ9_`el[oOgv/~ddFE`45P3`>m<8Wơ/l@-'{,&~Ї.Z6@S; oZGdOQ`lqwH *'Yo/Z6@S:7hG T#:V}G-Z6@S{W?Owo͗>f~cӽ϶}a_w#ڣoݧ'e}: :'?z}S?ןҺӷ>>-~!LO8 `;'f`ǷWhxkźb?x=.?ʗ  G>-Y?_~C`4u[{:qUvy^xuGoIm&3@?6@SjyH/pv{)#.w /5|<ōL{y(w>Lg{WJFIDATghj"93'?Z|Iܹ])Fw^o䑧?|)#97@?6@S߬?x>x's[;>&zӁ}|waG~ <ǃ6=Ň>3o]n#NmֻzL}a7񡆇*v8 {3o7uuȁ'y}!:؟H w2;U3oQra l ޾7;/r#woc[`lNwYovEd|f`c# 짷^~zhT`ox:X`;|<~U2o}a lo @`4u*~}7L G6}G؏>}❵z!:;z#w /_G [gx!:؛OY?{8|n`T=~{77+>z~M؛Nf>Cɱ# x> 썷{:v~Muc$>9{'>;o?/Uz/?~s 0=nlN~}3 {[} [}tC`4u2o;}%mWb# 4#{;_?_>Hz!:o"*?[>TU[eۿy_Gw73za~cuN_x+' @'64$! lhH`@CА64$! lhH`@CА64$! lhH`@CА64$! lhH`@CА64$% z:AIENDB`bayestestR/man/figures/logo.png0000644000176200001440000001740514542333405016276 0ustar liggesusersPNG  IHDRxb]esRGBgAMA a pHYsodIDATx^]Uֽ#aHC,0$A]D1X뺀몿 f H 03ɩ?TUw3=-W]ozy{Oz/L~#8K +=|ͽ,u< 6O fP_ (B`]g`,3Qd\TJB !I0͋T3V&5n Mf́=)%! c/0~6DDlp)LJ /t P) b5 ?V{ ˌ[, #Hܰ"0}^E7 0H^'`{6/.W ؾ$_$<)_+TV2)H 6DoUw AG0`w"XfVx:jxg=ɞMJJ Q{6 Dg)%A!Fa'ؓ,3[*[,[.[WpIaq _$ A0lU U>v׆^ؽwEKg%ohLM HVݵ/lM0m kt;$c#J(Uzd*Ei%{Fž% `XO+ltqĿ $[ֲw3% | SL^Y绰) :gq< 381b eS Pٳ GE/ .RH"uoI vHmĿqGc"yǗj,cR>5y">E"{k_qRYV"T{\ }` t-"YKD:MѾdL;H[* _D2ЪIʼn4}UHqq7ՏbZY\\!/Dل XKߥ%}ӵV]SȞ\H+4 |Z?gE|Vhqd " mη6Q~,POҮ j^!K\K&Aݖ~џտ_P]j"Z"YSc 89#gF0]Cg^eI?87>YxcD+rhHA5ulGgVƦUG-sE;pSY r۷ŴL#mk4 Nè$gp3kGirÒ>Eox/HH|8wZ`5KU%ӵƣ+qx^u454*(3E5 mOC!Q|AmH1HjQC{5^"RC" `+gZC NB}"98+= D;/i [LcD|ccMk$gDjEeZsQdUvbx>qͧlv:"o~c[_'TvFi?QQYa@Hi}p^?GR)'v矽^-cif@G7_efN^yy%x;Btr 7'_w4 K8AFK 84`:#Q)@["oJ#L\\5Pj#(nͥ Pac #؂iJ=MqsYy.-ނ{&v2A~u@O_5^M:i"qꖮ|?M"FƒJ݂'L{0FOzTJVuo G@Z7[T"_\p$鞦`Os+͟g!}lC"8_SKסUMMF#c 7 wZ9 n-s}ws;'KE|pD<K Gm@C1h3ѯ+"ʇ/\ x_w7J> Fp]tM"!jAs09dn52n7-~UG6I;-0"FV6o+ Lfy#[vIKi-]Zm+%V@|(|c0`D؇滣HLR[I`+CIuuIoɣD`Kݑ` "O <[M2Hs;J895(-bʩrp0o'+CSD0>ңFϮcR2 Nc<^4zopu[|Wx3mW/=ʷ(gEHK8ԞIj h0Ԛ+DZRfV=CJcܸqX?9ڮ7o`b:uȓO ǟ|Ffg'Ϭ >锬ȫ$5|~ɞ>rtbe>ߋJҤ5~ Wց\0Oɼ%&-b4;ː!Cs1B\,mĶ'8!V`tJAc؈;$8#+W7f OZuv`}eS"%WHe޼y2g;v3FRRRR;t iiih"9}Z}ѝg8+u(7Ɩ}Ur݈~RXXȑȄ $==]xYx]w%;w[dص7*C !~!//8"U1WZbRm'[sML֭[xekO?-SLe˖)Q^^uЍiQ` ؞{5iC VhJ_˨ueڴi>'N=B6 mOBr,{>TND Z~+%99Y&O ۷;> οOGsEz)z,_}_U4GJ^̨:11QVYː l$%2M"G݆>T< 0@;Cy`F.GM^#{=[~(=aÆ J%Qtj5 ,j{`t=n[y;yO ܙ_yF|p)4pdJOͥd-j)ቕ;M+SGɭHVtib\oJޭoUg]H|/Wr}-g}qmQ\X]hz7bZKE:$nIY?6SGJoѵ奸eڪjx+Mlfɴ;+g=i$6K@kދ:>r%rf.@\ْ#*8*i.k CtNW>ZҠϟ㲿P"?^!+%j#g^BӦ77!} Lk0fΎ401+tىn `tʚ[ȭݥ:mTgFE[0>ybi+J!e }45ET$_$wgI*@~ud|TzH^yTKƽmarC\W)]Qz2QrJZ.u]7:2H8s0Ӱ5,DoIXt2!,F:! R`h_-i]Oԓ}Q+Cll/dɒr`JI'mƶ$gmT'LZ4 ga}"ym/"K 0P95(}c0"H :<&QaR-"R.$a<|VD6;f]qiٱ'](m n9-$đe!zOu:@t#lf¸دoD;!C Vu 2hQ.8ĞPJ|DƆsNr$ܸI«lǙ8*2&]2q[{T`$3\bGaKh%Ii?]HDȋa\z"X;='&{ /R{E^'lIMs6rјÒxdϒ敼8 ĚH0lMub){W Y*qvHˆ-,{ /n)~ Q4.HO :@.{rY#YI{Jx_Q^ )AGmkٳ&{&=9*/K,MU&/AK0yiZXl_EGp^ʋ|mnp" ){> i:ceO''/E}b" uhʞy kbl O:@tl({z;gbm!/^D0z<~p s:UMLx*u]mUno)qiBہ׏]|y;` W۾xnJ B՗wϭ y~j lRפϭowV?喿&iCW;K]ԡہ}-·< }o@`q~$iC->}ѣ/6!5;T`RC6!5;T`RC6!5;T`Ϥ?@5;T`)`_^4K 0 -ҟ k5w`i;lkk p;SNw8TXZ0,%6ajvjx^klCP ٞu-WC6FO`/5;T`rD^+l=P pq}-١ຣZb UC6Uf l+k 0DFk p*56jvSZbܥ,-pfV܁&k SXZ)dyvK )ZbPC6}١`VP ,y١X9ZalVC6lبf l`k I `*6m5;T` 6o`+ljvkV7P ,}n١LRZa\Ws` *;SPVW܁e ,-Ef l`}.P ,P6* xf l`yҁP ,N:P ,M: 5;T` NsЄ*eIhARHf l`QY}EP4f l`IQ}M`P `AI}Cp܁H-Vs` # @XXZ)XtLo>&a5w`i,E:7J*HUC6"}Xjv![ @R2;zH١XtF>4A5;T`K] f l` SP ,@:><15;T`NRC6P_>@)5;T`y!١LAux$}2jLAqt&}2jLAqt(}"jLAmp,}j(-{H**ռ١,{I,P N=jvJ `v5;T`uyo0*ҹ|!Y@YZ>@̬f lt,$}UC6PT:>l١(*]J6YP Ԕ僥jvSPR:>rsK 0%3C0;SPQ:>v3K 0#(0; J'qG`>5;T`H0*zҁ|M@=@>Z̥f lt/}RC6PN:G>3١&ǣHDyP TnQ"3K 0xT 0;SPH:G>K t,}8&WC6PG:G>S١#ãKPP )jvH҇`b5;T`Ucx 0*")S١(!JZ P -jvSPA:'>өK 0xR 0;SP@:'>K 0K`25w`i/K`P /ݿK`P t/3HbP t/]3HbP .sHcP .HdiP .ݾHdiP t.3IfIP t.]3IfIP -sIgIP -Ih)P t-Ii)P kQPL,-,sJk ܁wV 0;Sбt+}Ws@;]@;]@;[@ҹ;[@ҹ>#١U:v`d5;T`JnDf lSHuqP t*!0*>C7%}FUC6ЧtƤ١Pp҇`45;T`In\f l?龍KP t' HP t'] HP &MHqP &ݶMHqP t&mHqP t&HQP t&]HQP %H1P /mFD,-t%H 1܁!S0;SГtԶ$}.FPs@OQےA@GMۖ8^@GIۖ8^@?Eۘ8^@?Eۚ8Z@?Aۚ8Z@7=۞8V@79۞8V@79۠)8R@/1ۢ98R@/1ۤI8N@/-ۤI8N` :NFO QjLA'%ۨi8JXZ)C:d[>/GK 0}Hl'5w`i!ݱJcP t! K#P t!] K#P MKP ݰMKP t KP t ]KP /K CP /ݯK CP 4/K!CP 4/H"P 4/]H"P uxB$,-.ݮ]H$܁ƥӵp;Sиtv"}Rs@ډi8H@ڍ8D@ڍ8D@ڑ8@@ڑ8@@ړ8@@ڕ_@ڕ_@ڗ_@ڙ[@ڛ[@ڛ[` ڕOjLAҹڟWXZ)hV:W>cK 0Jj `O5w`i*=J3=P 4*ݪ]J4=P 4*ݪ}J5P 4*}J5P 4*J6P )J7P )J7P )J8}P 4)J9}P 4)K:=P 4)]K:=P (=K;=P EHZ Ws` ZnԮOp5w`iAD[ Ws` NΥO`5w`i4(]K>jvړO P5;T`Ii'`*)f l9:- } ١hNN HBjvZ'`*֤۴If l14-"}١hLLHFAjvڒ*`*ôf l0mIwi 0DXZ)hJ:K IJjLASUZHT Ps` Bҧ`;ZR'n5;T`-I7i) p*S@CEZLtܩf l!"-&}:TC6Ўt>w١hGGIPP 4#(]jvтҧ5;T`HhAS p*Vc9C@+1ZRVC6V[Iح,-4"5*n5w`i-*}ZvK 0mHhU SXZmHhU S@!ZVRC6Єt֕>P 4!u,.5;T`-HWhaS K@ ZYPC6ЂtV>;P O.v5;T` H'hm ]@ Z[lWC6Ѐt>[P تf l0y.}~K 0y-/}K 0y,/}K 0q/}K . >[P ĥsҧ`*t|.AlQC6EHdjv Y@X:="}6١KBO3F5;T`a\iبf l +>P ds1'`*tv.HTlPC6Ij jvSIj jLAR::%}nK 0I\,-sYgක;ɹ0 pK@P8&}n١IO8M5;T`9\ f l 'ݛ>7P Ĥssҧ*tm.PPC6JrjvRұHp]@J5)}١HI"O:u5;T`!\if l0!\i,-dCs;SJxjLAF3+}K "˕>WP D3sҧ*te.X\QC6%K{+jvґdspE@B:2-}.P sgR@@:1-}.P sҧBҁtpf l`~\PC60t_.^z\١L}Iz١M:'%$ū١M&%$ū١K:&-&ū١K:& (١K% (١I:%$*١I:%(,١LL%Fe,\XZ)G:$,.K 0H$[l5w`i`d,-Y3m+X*Y3KX*Y+KX*9#kX6;~_[fHvH/`|M`H7$;dMv诶 ld]_ ldhWA )<kCw.NۏHvK`Zgq?ثGE`K$wH/`V~{[`ѣ L/ݏ!@jC>_l0K#wI`ہ{i9q=zЖ` &G^!r5Vwk`3[i`r|N%,V;+K'G^#b5o q@ '-FjC=_᳟߭^Gs"O: HjC!#uoM"M:"JjC?EKg<ݧU,U:,W׳uF`J# TMv # l`Zrd:ؿ@H:&NjC6иt82Lz dEh[*RejC!|oyFJ`mzSeR^:&FJ`]/yG?׶!"iKX&;tk`.Z?OG/` l0JG#å HM7w.a_i`ZhdZx{`¶Di= DMoG[i`Jfd,Q;~ϯp? - }W D5;T`J'#{I/`jv&.F^.P L''^١N:SzSC60t/f l`2\d_,OdҹKX*ɤk 85;T`SI"K`qjvE^4P L%݊ h١LDҩ!ҫX;S0t*r,-L$]$lK 0H"I`ajF:9PzRC60t'rf l`LP,K$ҙKX*I+(5;T`SHG"K`QjvD^

f^l@E5;T`GH'sJ6*#9WPQǬ (f lpc^TC6pt1z ١8\:WzP ,{-zjvSpt1꩹K 0KsK8;Spt1꩹K 0JK9;=^s@95;T`JK9*[١8P:HH:*S١8P:H/;*äC١8L:H;*äCj١8LI/<*3j١8H:I/=*$]yĤPJXZ)8D:I=;Spt^|@%5w`i#(Jj8DJ/>* ١8@J/?*  ١_:K/@*١_K/@*ʨ١/A*뎸ʨ١W:h@zUP +v4 *jvStр"K 0{J-HB;Stф2K 0{JMH/C;.;^@5;T`IH/D*F"PC~]G# f l`/鬣PC6tьRJ١KhFz)%P #u4$ jvn:^@5;T`H7 I/F*=#P@EGS(f l0{HMI/G;S0\:hKz=܁AGc _XZ).s4& ܁0X:hMzE١,s4'$P 9^@jvJII{5;T`Cc$н*1Gҋ]@锣EU f l`tѢzWC60P:hQzU١&]r).P 9ڔ^@jvImJKs5;T`)$q4*0 فOgqI 0;VW&з!;𧯞|1I 03VW&з}rrNaF#- 8Z^@3/|[E0DhWzm]'웓1l`tѮ6C"[`C#v&еz.۱60D:hXzq=ۣCOw[`W'г:߼l ߎ- Gҫovp;H'MK/Oct۱ۼUD` 'б;_xwO4 -p->ء|sw6ptѸ_ΐ~']_~kOw( N|u kx?^]~iviN|y% tk|?ɳ_;楗(Эa;78~Kg~0g;SptѼ5d[C6=zK:@z+"%nt H^ oEɽ'ۍ)Ы|0N7^@|kH6ptх2:UC6ptх2:uL~߿/El`tщB4 \K-mt"P> l`F7Kщ:6^@vt{|''>/݃ )]mt#T.''wyiϹ `tяZk>]\sOL.h# h}\r_L.f# h~}SKHz=:)"-d+ tH`.6^@68b+ thP~?J`ۥΤ,П*ҽFg Ov^3 f l`tѝC?}?xfJI/Y;;!6k"*]kt'dl`YұFҋ%jt(hlo=~=i?oIJ/Z75;T`)"j(j܁-ҩFLXZ)"]jt)l܁ҡFLXZCN.З:'?9yᫍ>@dM`;N.Зooog`{ku4 l4z^@_u7\f)?Q6Q:Uz}ԡX+?^FL[ teP]~gym4;6IGJ]+C:WU=quoȓ/a l`tѯ2C?Z?q})i6IGJ]+C:틨>Cˍ5|6H7=K^':t7.<1du}6H'=K^':tu~^ի_S~ l`tѳz20Kɕgl/Bk d~e6ܖ.4^@G CG{etѷ:2Kg`_{}VҁFxW=Ze nIK`C_4}<ߎiܒ3z^@?*'9}K' !"yF+Ǡ]rf}u6p[:^z ֡o_+W ltѿ1Cվm ltѿ1C?k|NF qFE =F#M6"zQC6pC:ͨ ^P ܐN3JH/c5;T`7ˌD 22:1C?[W9 ltQCzء߽g~0;Sp]:("> ہ?ژe KwE2ЇA;[Z`CLu.B0d>Ɩ,SpM:(#. ف?Zg~6i\2H/e C:U2GIWe2ЅC?ɘ6pU:($ O_}oLQF$MF! ```m-6pU($ltQJz9x.2JI/gC:{R]SD^b hߠ}K1I/h}:tu&$ctQLzA֡w<|ӫҁsj+hߐ=wڽF/JtQVziMԡ_?{Yo^L`+6C׷\__mLJ:(, ف7\_/_AZ)XI77в!;𓋻A#"-j`_5x*Tzy %-~hJ7аoY=F=@Fq/9>%Ge 51}g)}%9@Eukf-}%x#дt~Q]z>ثn k&^@u?_s$t|Q_z֡''/??V؀fz54k`5/MɎ!a9Ь7 ʁV ^/;:OA:X*ZzosO 0b ҫhUxity u4S/!΁F܁@:X:UC6,]:XB4CO?xgo?ӌE`¥HtMC:_{_xsOu, ."ҁ6ݡ?8l .#ԁ&ա憗?rOذpb1Kh7?ٖ/b lXtt4iw~twyo{ߚ#@`òI/vE;;o [`òI/vE:gy}vfذlbAҋhѮ}:ٵ7&PGذhbIҫhю]_vȺ-aŢ;Р΋|EOAXrc~v,:-`mS..%܁؁~?WhN X[I/x=w.QMKN 8)H ^@{vW_s@{ҽ¤<О'']$aҹ⤗< -'ltm8%4G`cJ/z5%Z,Pz@-bҋhJI^@c6PJX#Rҥ"= T-)l &Z,Tzm# UKtgPE`CM tfTE`CM tfX4E`CM teX4дKG˕^@SjveJG ^@KjveJ7 ^@KjvEJ'K^@KjvEJ'^@CjvEJ^@Cjv%J˖^@Cjv%J 5;T`KЎ*ayҥ'hGذ@b#4f lXt]xQC6,p uGhFxi7,o qQs^^ZM˛t\܁p V s5w,^:@`P N+sC:o6IRat觯|1 lXtXJzF 쓓{/t3 K+XIЈ _V K+XIЈ}Ͼ9'IwIOІak&Up&= @v_Hoذ0鬂3IڰG~Ec7~;eIWKІ:߼l ߎ-aYUϥghovp;eIG< t۱ۼUD`ò U\z&y ưM5馂KiZp>wy{Qi-j Ig7?ok}?_TZFtR4-{~_S}hk`>:_TZ餂+4`=!/|uO֘Wɣ  K**=@-/sxuI?ӿ|mk`ۏz [`Ò JЀz?ɳ_8i[{umOOذ$頂k Ыop%`ۿ;:1w\ذ$鞂k O_5dSW>V`;_-y{j"aA9ץ'+GE[ëM?lXtN |H[]_vK`~mGD`Âk nH74wF8=yS`W' lXtLM|kG+6Idn w]x[sHܒ _:Į_ l@`ӞPiny÷zJ`sT:`XaCx9c> Gؐ R`_ZLZVtIN6k)S)(=@֎=>{|ꗉ{w9K|(Jwl kW~~cU\Jgl jg~tG_<чث_\[uHWl jw=_x^5u}6,D`hQ?EdB[{8 lXtDV wV?ƿ$a) [Hԡ|O W//[/a_v[`2 KOdn򌾧"PCz<&;tG`?͟>EH OϾw PKz>k<6{8W a S()= @Ύxvf} OSz@ 5-` 'P Op1wo=~=i?oH!="@LP_:.bjvwJ RC6ԗn'SzH*t:SP H RC6.' =&@Wūҁt89Bvh.^4d I ! I ! IO Uwt "M0PzT* J Qs.0@)HG  \;-`S&*=+@DvZ M0XzX;pK'  f l(-L0\zZݯJ?{{U:N&.=-@͜Hf+gh_`y6Лt0>l7`}WoiWK%=0@@PX`?WC6%OzbP s `~5;T`C]Z}g]PW:`_fO~׏ׯo>5{ҥH 0!髷W~w[}ҡH 0!ѦAw1GҡI0!=4'ҝI0Nת?j E3 `^:tu g~p&l(*Ip:< JW(=:""# `VCx-")Ips_gܜmPS`4C׏V""t"iPbFa~UЮSN$8Bz| ہׅ݋^)_dL K)H!=>z|f}r_~wǏ="%Rq ҁH0;1|w>hU)H%=@|6ЋtQG`H'=A|vkP`sI)=Bljv9 IDATuGJ0*t3̦f l'Gps١I-=D\jvriKO0:H =FLw?s| &]F03ءɋftsdX>&G *F0 ԡm{UUm ]H0!;7rϭ|Owji+6,q' ǐx}ޛg}gW&l+pbS"IzY ف>9gYw-*pbS"IzY ؁w񣋟t8/E0,oœC>}݇l(&E00sС>ʏKoLɎ!tha``_r1"PJ:`* Dl$D0@&^U+mJ=J0!Lj]~ėsƕ(`z:t'^W!Wz %g?oh\:`d&7CחϮWfk C0L{wayI 8T)H-=S|yO_gߝSNZLA:`l&B૭RZLA`|VcLZ ZPF:`|VC6T.!@zP UK+`bt'3fo\PE:` &6COw+f^hHL"=Xv?\S}S e3,`Z:/2';"HO0a;C>~ [`C G Ԡ}rvSțƾ]"jHGL%=[t觯޺^}կ^li"jHGL%=[tGY5ijHGL&=\tۛ.Vh jLA`24`^k5|HcttLi3?ouJ&/`B_7DZQJLA`B&4 .@0|%`n%G`:I :n J0!~`.` 0`::t7 {+J&XzĀ ua?|oJ;Y HL,=bdw_z}'>wy[n_ˍ#"g ]=.L%?0S@K0 4)>0Sد=C L#>0١.Az̀P OA:|`9&Ui} H0wzO 8^SEzЀito{Z:t<ғLcxݗ<-tLңLbp~`{їйtLңLbh5׉I?q6-=0SءO6i?16-<0S֡=l_~cv8 }K7&=lu뤾]ߏ} ?Q6t-<0ԡo_k<=Dh.Qz܀ 1v˗6t-]<0ҡo8~7~ Q`CJ0!Ɣ~="zUz olduȋnF`CҽJ0"[ K,=r6kf9`t{-"Zc斞9`t݃ w ұK0OͿmCicw ҭK0!;ZKي~/:;`dC:tNwa-.H0Azɽ\p}U_7"G J٠]Yb|7\u_qЯt@Dzq M"75{~;"҃k`~[}vZ`Cҝ5CﺾplT:s $=zw]7[k JWG~zOmN+Bңf lS:r &=|t7NdTnI0_4 ]J'䤧Ӏ]fٗ6n$)H'рx{oMQFeZt@Pz ڸIir ҅AF4W3=NA:p *=xOm/MIcZ龁% =J DϠ]/|Уt@Tz O>O=a}Сt@Xz cD6k":K 0 4"]7A`4hDn ,=h=E[r_{Пt@\zP I ĥKПt@\zP I 䥧KНt@cdH~O'C`$w||}l]Zzt@҃c|g^Li)Hw 4!=8v~ke^"voim ]MH"0];7n>ձzK Xt@#ңbW>97 >q6t&]5Ј(ѡvx}ȋ$"/騁fÎh}W^/r9>6%4Ќ0cѡO_xWfH#Зt@3aG}G`;tuOȵ[B>&l ]I 4$=w;_A-I#0=/%'頁8=MI$p< sIhK 4&=6kIhjg 1I%p, 53閁椇8t@sC K`Q锁8% J%p$ DKKH>wrw[u0nWA:U`P;M ]*@e \f lU:T` LЫt".p*WNEJ/\25;T`Cҙ ^EjvN+*tP }JG ,Vz١nX.QC6)(X \f lR:Q`@Хt/p*G@%K_5;T`C (h *H ,ZznnǴyu y ^zہ0-`ft¥0pv6:L ]'p% f lO:N`ҋ8WПt1p*;48SНt@e f lNL2TC6&&PBz!١z(!P Ig Ԑ^yjvΤH/e,5;T`CgUE2p*3*"K8K*HW T^9zځ,- U"sYZ@@g %PEz-inVA:JbNgiH/f 5;T`CgMe3p*3&:ҫ8f lL:IjN١:N#jvΤHfച*3$B8f lLHrN١vt@%E RC6C`CcE RC6t8f l8f lflh.؁[&- UV`C{ \/i9 H/l.vzI d0an6L"jv>0١ a" SC6A`Dҋ8f l[ SI/o* &^5;T`C6L&#jv0P =0P x+aJ%١: aR%١: aR%١ie80f@`IJKWCiiUV`ĢK8f@`kWCil\z#jv^a5;T`C:pXV`K8f l0R١6"ԁjv^!5;T`C;pHV`\8f l0r١ lMzP If^k5;T`CrlQjjv*x+aFQC6lXJP "ҁq5;T`C~_ lVzP 9f^*!G`ҋWC6ļk Kz`_#ave١b6.=5;T`C0P ) UC6lH/|`Wr6L/5;T`CvP Z` ҋVC6dV;0f lذ*/w`\YZ`f_*A`ú̾ށq5;T`C0^P xQC6Y'f l*!@`CTzjvf lP k sIQ0; q1<١f'!.=G5;T`Z`l҃xPC6M`C҃xPC6̼ ̸q5;T`5qjvyW0V=pD a[*a^:f lѾ086Ѐnp/k J@`Б@6p}-a^lV nt6,=*a>: @0}-afP НXի١f#;WC6̵ Nͱq5;T`cjv +6P 33Z`fX*a& r5;T`L6t)=`jvy֭f lN[0Z`C@z:P ЭxU١f![VC6Ἶf5;T`6t,= `û_m03Z`CFzD֡S<'O lGXgo}? ]KX:χ~#,ӯsZ`Cȴ#Wn?}W VoC7O`?9pF&_gIg0~>CGx^{ L:q]uZՓ?=v~_ lHI X:txK>z7co߷K`6,@zPZԡ{m㏿ywwoݑ0 Z`CLzRZԡC`o(_}w01 R=uޫ߼)J?"aJC"g{sPD`\6,DzX:ԡ{9__W.a&uCO{cG/0 J=u~u{!Ge}-!)=/`z =0% F=uw5~ ]֨=>ػƟ0! B=u12'>qq 2$uաo#?KD`tҾ6(uա;ż׿u6l4URKR6L .k i`\W:<'𡍯<2"a2'=6`uЇB[_怑'6L6}u)gOO`yy^ɈlXCWY)0+Z`C^zt֡~)O'7]6LD`"GLo?}Ͽ׿{>0kZ`C^zvP ذPRC6LC`BK0Z`CVf lJX*a+=>`Ujv)\ &5;T`@`Â*A`;Z`\0*6DP x"jvk }HX*= "5;T`CsWNGМKX*Z`C/sVf lhM`CAkQC64vC_ lFzZP l(!=J`%jvnk HX*- E CM}-#iPC64%8U١ZZ`COZ*A` **6t`١6pX١ڹ6%=S`jvv6*P_}-3P l('=V*Z`Cosʫ١ZPPz@u5;T`C# Z`Cwғ١PRz@q5;T`C-Z`Cҳ١PTz@m5;T`CMZ`Cj١PVz@i5;T`CUЦ6)١6pcSC64XZ`C`D  చ* G VC6ܮU_ lTz@]5;T`62PWpf}-S)uP 7P^z@Y5;T`63PVpv}-[AUP ذAUP 7j4PUp# 5PTp}-_YEP ذa5P 7}-cff l9 Ep*hznjv P 7h8PQp 9PPp}-oP ذ2P Wkz;PNp5 ;PNp}-{P ذBP W6/=z*JV)=z*:SHf l$}-aj١ZV5١YEh?`jv \١Xe`ZP WذjP 6,DzA5;T`6\zA5;T`&k KCPFpZ`RQC6\J` ١.% P 6,Ջ IDATGzA5;T`Å6 lhf l̔}-aAj١.#0jvL$=*U G %X*U0m_ lXH L7`=jv V}-aY&L5;T`6%=*|SI%X*|ؑJ|5;T`&k KKx5;T`6'=`jvsM'=`jv3'=`jv3 lh١3G_ lXpe١# f l8,}-a f l8F,Zp*6,c ١6pUC6 fk 4àjvӫ`ذLsL*(f l8M`G,Wpl}-ac f l8I`',Vp|}-a҃ f l8aƾذ\Q UC6 3G,Tpܜ}-a f l8j־ذ`iTC6%3,Rp̼}-a f l8F`gK,X*ሙZ`â',Qpdl3.f l_sesrA5;T`stA 5;T`*6,ܬ J١F lBSC66,^zpP c6p١FZ`G,LpX6^,5;T`aJzxP EZ`CRC6k ,Jp`$5;T`PCzP l RC6k E,GjZ`CaKTC6l0%١WA6g<5;T`*2B f l#ң f lؕk ,CذK`M,Cذ#*I3X*a[6h5;T`6 4h5;T`Öp_ l%=`jvPKzP /}-PP y-XP y-XP OuV`C=١VA l';ޠ5;T`*H`8tMf lxXN']@S ١60􈃾P ~0I: VC6lp&K7Z0HT-pг*ᾛPSzAjv{ L*=c5;T`C?}-~P SUC6l`j1ݪ١Ue8tSH:Uج^: `Q١KG|L"=S5;T`v1Pif lVVA g`"q]١uV`sH;RجPEzAjvf6U+yGtf l,ӯ '.`:١5K+INzAjvf9pV&{Н*YtM0Vz|Л*YtL2Wz|Л*YtL2Wz&}Й*YtK4Xz~З*YtK4Xz~З*YtJ6Yz&Е*YtJ6Yz&Е*YtIN-SK@IجR: ' f lVijU }ù~`r١5ڭZ ,=5;T`B{U+ tf lgj60 ^P#>0(NPڤӻ>0,NPڤӻ>00>Pʤ ӛ>08.Pͺ {>0@Pͺ [>0@Pͪ[>0HP͚$;>0P*Yt>6ez擞WC6+ӆLocjvf=|4=0`*Ytp_zHBNج@:7mzcbjvt5_`8>$PM}jp}HHJH١||<RC6ե" wy #=,!f lK'e3No@Hz\BDԖ. ご*)-̗9) 5;T`SZ:/5szb#jvvXvنN@Nzhjv3V`HM]un l s١ήZ #=:af5;T`SU2xzwҳfVC6ES:Ùww +==a^5;T`SS4zzsfUC6%CZùv -=AaN5;T`SQ6|zk3fTC63z٧wv /=Ea>5;T`SP:7}zc:0*']7N?=HRKؔ[ ց')̥f lIG-Oo@ңfRC6դ&wua ١b҉| қ:Ї4yPM-T ZKHo@'fQC6\URzjvV`}JTAre lS ӫ١:up Hzjvt0\Gz?z0*"M ΁g+Lf lHqÕws/ Ӫ١ep)Lz¤jvt2\Kz/z0* ʁg,Lf l Hgq;դwr?) ө١KWqC7rC9 ١KGqKqGI S١K7qSqKY ١K'q[wqOi Ө١ekQ*h}.)J[D,Z"'.Lf lM l`3&PC6 ֨j6 ١Kp{Uwp_ Prkxe7pc Pbcx uog PR[xÅok PBSxÕwoo mP2Kx"å7os MP"Cx*õnw -P;x2ťn{ P3x:եwn) P+xB7n`s١IGK"G14RC6KnI ޵eHchf lfV7m`!١Ej6H -P͒LSXLjvfA&Z ,Tz*jvf9;26 7١H, MG3ܪf l"ݾ.4]p*Ytcn ,Kz:mjvf;Rӛ50 7١EH\kMҤ'4ܢf l ݽ.6Up*YtgN ,PzLjvp}XkPM;z4HQ W١Υw^wi`SC6}K̆KNoR6\f l6K`[4X W١S("=r5;T`ӯV`U6\f l5[ l PMҩ1\yzw-=25;T`ӥt מޜKpH(ݹ1ŧf`3.PC6JgnpXPMҕ4\~zc Hq8[t'Ie$sPMoҍ5܀ Ԑp*L:q;ޔ"RC6}Inp {2PEz9jv'龍nBzKHt8Ct$y]H@!'PM7qۃ>chjvVA6܉n Ԓp\!Y(-=ᘚ*Aj6P[z5;T`Z 0f lQۓ~7b15;T`nڮ 7$ %g=١te#])=ᰚ*Jmg[ބ١tf'=(+=ယ* Jlށc^١th/,=a_ĤcG}Io@mjv&&=KzKOVC61p_[/P^zÖ* xL-*hOlĢl١Lt>VA=!iEwxQC6}&Unf lf;1L`+`f l71L`^`m^MthFL-3[05;T`3C<=:pX*a[S[LnlcG# `bmU١OthL,Ub5;T`3s<=:v/X*)knTL Ol6Ew Vf lrrcGnĢkTC68ccGoԢSC68ozcGg=EVf l;wvcٰ 0>šPM[ t>E7֣f lZhlc8$5;T`̥C;6a]u١F.w`bѭ١&`bjvu:f{tZt*ݕ:=vL-PZI]{C&hf lnp۠Nlnn9UC6WuFcG7WEJ١4`ZѽjvMs:f{L,QM\pNl^E7!j١K5Q 0ND!5;T`ss9=jx&ݎ(f l|*cٰ Xtc*996WC6gfcEtbjv椩q:f lE)f lNncGqE*f lM<1ۣio;ĢTC611ۣ=ĢSC63}1ۣY>Ģ[ SC6f\RC6fќĢKQC6Ģ[ PC6y E7zWC6QXto5;T`\rԦcG`Zݎ~Pb1E=TC:u1a1ۣE7@zSCL1lXsx#0VHGjv^4}9t>=NP)ksJ}&r#y5;T`זc6'g *Jɣ6guL+sPCvIx,1ۣEy0Z1ۣB1Ģ1ա?ox {_\Z:f{T!Xtz]u?~7>y#ݩ1ۣ<n\>Kx$H\q:f{&9CWf;Kx$s3'is1ۣ0!=uw~<sJ~lnG:f{_E=u|_yǯވO۔g*H#t kS7Oҋ606GE=uދ߼!-Fzg #l27f ~sw '$2Y@3]&O=ӷ.?7f |O>x7?;;}E &! lhH`@CА6჏~~w_|ܑ%8QosBl/q?9r؛;g>Jp_ݳa?[MN#KptO ll_>ɔu;g>J..'jw~H&W(|'#Z'ߌ=˿~xp9,Y6U6\kxs%~|w_> ?MtGG656\v G/tGGy?' p W -_o'\IN#KpΣty p Wca+?>g}{tGGi󃐟+  ג(mֽ+zO&'ݑ%8Qzz|6u6\i/>~&'ݑ%8Qzl p Wܑ%8Qޡ/ 6\INsG*m p Wܑ%8(}}:$'#KpQll8ې;p>rYSқW#6m7"qrYSk,CjjrYkzu;Vx66\khJH&'ݑ%8QCpK{s7z;g?JZ_iD֓( lk lֻl=nUIwd }66\oxK'utGGI`\K` >S~o9,y7ݗY{N#KpΣ$%! lhH`@CА64$! lhH`@CА64$! lhH`@CА64$! lhH`@CА64$! lhH`@CА64$! lhH`@CА64$! lhH`@CА64$! lhH`@CА64$! lhH`@CА64$! lhH`@C/o|g5OՃ_܎>'!z؃_y|wp_: l ~Q>+ى?.Z6@Sc=p̧mg/n "z;m ǂk؛>D'g]jMz3i{5"oG6Ph"^7aB ׈l@-ï:7h؛?9G6Php`o׈ og\jMoB_{/|G_o>~gᗯ>O#WrY`=?l {χ v~ࡍyKN/˷0^~9rj[Fd_hh0kIDAT{z~iŦb?z=!?ʗ_ǯ G>^# {!:Ǡ;/?Ee/ſ^v|>~>>LcS{C[a>;@?6@S{SOnJ倝/?Tn;~c`? /l~_/=Ώ#>?|W[a>7@?6@S_}vn'~7}/ 0F?ןl}n`;?~l^ҟs r{ߎGۯ3yJԣG|{|ݟc3C`4ZWRw>~J|˽}5|>ŭי|՟9pϢ0v  H`~'h>&q/1_*}=LF<{O\ɹ}!:_Hv/2%?zҎOvS=t`ハds>cwghU`O?}_|V`zM wہ}ȱS۲#/>xh`|ᡊ;x#{57;yȁG/>~~MH w2y8w_Qr6Phd`׿{?ӋG0 $l ":=dỿo_N/9;lϫ`yN`u첏K??~lNe/ c_أo*G<+?.}H杣^~`oɽ!lNܯ>v}_MgkD^>[}❵z!:{s{uysg=},=?~lN6دOhҏy^`?~\O%@?6@Sv=v𹁽/or{'W`~{@?6@Sg9kG>;7o>7Ux?8x5"ÿxusxEWo G*gCW|a =#Gt{/8zہ~~)|7xenlN㯍 Dl_+h| ~UG }3v8~ Sg? #W ~|_|]_?oſ k'<7?'.~e;~G}Jz!:؟G_?**vCƾ0oyxEwsE'_=z{=ӯ =;>z%А64$! lhH`@CА64$! lhH`@CА64$! lhH`@CА64$! lhH`@CА&5EWIENDB`bayestestR/man/describe_prior.Rd0000644000176200001440000000254314765755711016462 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/describe_prior.R \name{describe_prior} \alias{describe_prior} \alias{describe_prior.brmsfit} \title{Describe Priors} \usage{ describe_prior(model, ...) \method{describe_prior}{brmsfit}(model, parameters = NULL, ...) } \arguments{ \item{model}{A Bayesian model.} \item{...}{Currently not used.} \item{parameters}{Regular expression pattern that describes the parameters that should be returned. Meta-parameters (like \code{lp__} or \code{prior_}) are filtered by default, so only parameters that typically appear in the \code{summary()} are returned. Use \code{parameters} to select specific parameters for the output.} } \description{ Returns a summary of the priors used in the model. } \examples{ \donttest{ library(bayestestR) # rstanarm models # ----------------------------------------------- if (require("rstanarm")) { model <- rstanarm::stan_glm(mpg ~ wt + cyl, data = mtcars) describe_prior(model) } # brms models # ----------------------------------------------- if (require("brms")) { model <- brms::brm(mpg ~ wt + cyl, data = mtcars) describe_prior(model) } # BayesFactor objects # ----------------------------------------------- if (require("BayesFactor")) { bf <- ttestBF(x = rnorm(100, 1, 1)) describe_prior(bf) } } } bayestestR/man/bayesfactor_models.Rd0000644000176200001440000001743314765755711017340 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/bayesfactor_models.R \name{bayesfactor_models} \alias{bayesfactor_models} \alias{bf_models} \alias{bayesfactor_models.default} \alias{update.bayesfactor_models} \alias{as.matrix.bayesfactor_models} \title{Bayes Factors (BF) for model comparison} \usage{ bayesfactor_models(..., denominator = 1, verbose = TRUE) bf_models(..., denominator = 1, verbose = TRUE) \method{bayesfactor_models}{default}(..., denominator = 1, verbose = TRUE) \method{update}{bayesfactor_models}(object, subset = NULL, reference = NULL, ...) \method{as.matrix}{bayesfactor_models}(x, ...) } \arguments{ \item{...}{Fitted models (see details), all fit on the same data, or a single \code{BFBayesFactor} object (see 'Details'). Ignored in \code{as.matrix()}, \code{update()}. If the following named arguments are present, they are passed to \code{\link[insight:get_loglikelihood]{insight::get_loglikelihood()}} (see details): \itemize{ \item \code{estimator} (defaults to \code{"ML"}) \item \code{check_response} (defaults to \code{FALSE}) }} \item{denominator}{Either an integer indicating which of the models to use as the denominator, or a model to be used as a denominator. Ignored for \code{BFBayesFactor}.} \item{verbose}{Toggle off warnings.} \item{object, x}{A \code{\link[=bayesfactor_models]{bayesfactor_models()}} object.} \item{subset}{Vector of model indices to keep or remove.} \item{reference}{Index of model to reference to, or \code{"top"} to reference to the best model, or \code{"bottom"} to reference to the worst model.} } \value{ A data frame containing the models' formulas (reconstructed fixed and random effects) and their \code{log(BF)}s (Use \code{as.numeric()} to extract the non-log Bayes factors; see examples), that prints nicely. } \description{ This function computes or extracts Bayes factors from fitted models. The \verb{bf_*} function is an alias of the main function. } \details{ If the passed models are supported by \strong{insight} the DV of all models will be tested for equality (else this is assumed to be true), and the models' terms will be extracted (allowing for follow-up analysis with \code{bayesfactor_inclusion}). \itemize{ \item For \code{brmsfit} or \code{stanreg} models, Bayes factors are computed using the \CRANpkg{bridgesampling} package. \itemize{ \item \code{brmsfit} models must have been fitted with \code{save_pars = save_pars(all = TRUE)}. \item \code{stanreg} models must have been fitted with a defined \code{diagnostic_file}. } \item For \code{BFBayesFactor}, \code{bayesfactor_models()} is mostly a wraparound \code{BayesFactor::extractBF()}. \item For all other model types, Bayes factors are computed using the BIC approximation. Note that BICs are extracted from using \link[insight:get_loglikelihood]{insight::get_loglikelihood}, see documentation there for options for dealing with transformed responses and REML estimation. } In order to correctly and precisely estimate Bayes factors, a rule of thumb are the 4 P's: \strong{P}roper \strong{P}riors and \strong{P}lentiful \strong{P}osteriors. How many? The number of posterior samples needed for testing is substantially larger than for estimation (the default of 4000 samples may not be enough in many cases). A conservative rule of thumb is to obtain 10 times more samples than would be required for estimation (\emph{Gronau, Singmann, & Wagenmakers, 2017}). If less than 40,000 samples are detected, \code{bayesfactor_models()} gives a warning. See also \href{https://easystats.github.io/bayestestR/articles/bayes_factors.html}{the Bayes factors vignette}. } \note{ There is also a \href{https://easystats.github.io/see/articles/bayestestR.html}{\code{plot()}-method} implemented in the \href{https://easystats.github.io/see/}{\pkg{see}-package}. } \section{Interpreting Bayes Factors}{ A Bayes factor greater than 1 can be interpreted as evidence against the null, at which one convention is that a Bayes factor greater than 3 can be considered as "substantial" evidence against the null (and vice versa, a Bayes factor smaller than 1/3 indicates substantial evidence in favor of the null-model) (\cite{Wetzels et al. 2011}). } \examples{ \dontshow{if (require("lme4") && require("BayesFactor") && require("rstanarm") && require("brms")) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf} # With lm objects: # ---------------- lm1 <- lm(mpg ~ 1, data = mtcars) lm2 <- lm(mpg ~ hp, data = mtcars) lm3 <- lm(mpg ~ hp + drat, data = mtcars) lm4 <- lm(mpg ~ hp * drat, data = mtcars) (BFM <- bayesfactor_models(lm1, lm2, lm3, lm4, denominator = 1)) # bayesfactor_models(lm2, lm3, lm4, denominator = lm1) # same result # bayesfactor_models(lm1, lm2, lm3, lm4, denominator = lm1) # same result update(BFM, reference = "bottom") as.matrix(BFM) as.numeric(BFM) lm2b <- lm(sqrt(mpg) ~ hp, data = mtcars) # Set check_response = TRUE for transformed responses bayesfactor_models(lm2b, denominator = lm2, check_response = TRUE) \donttest{ # With lmerMod objects: # --------------------- lmer1 <- lme4::lmer(Sepal.Length ~ Petal.Length + (1 | Species), data = iris) lmer2 <- lme4::lmer(Sepal.Length ~ Petal.Length + (Petal.Length | Species), data = iris) lmer3 <- lme4::lmer( Sepal.Length ~ Petal.Length + (Petal.Length | Species) + (1 | Petal.Width), data = iris ) bayesfactor_models(lmer1, lmer2, lmer3, denominator = 1, estimator = "REML" ) # rstanarm models # --------------------- # (note that a unique diagnostic_file MUST be specified in order to work) stan_m0 <- suppressWarnings(rstanarm::stan_glm(Sepal.Length ~ 1, data = iris, family = gaussian(), diagnostic_file = file.path(tempdir(), "df0.csv") )) stan_m1 <- suppressWarnings(rstanarm::stan_glm(Sepal.Length ~ Species, data = iris, family = gaussian(), diagnostic_file = file.path(tempdir(), "df1.csv") )) stan_m2 <- suppressWarnings(rstanarm::stan_glm(Sepal.Length ~ Species + Petal.Length, data = iris, family = gaussian(), diagnostic_file = file.path(tempdir(), "df2.csv") )) bayesfactor_models(stan_m1, stan_m2, denominator = stan_m0, verbose = FALSE) # brms models # -------------------- # (note the save_pars MUST be set to save_pars(all = TRUE) in order to work) brm1 <- brms::brm(Sepal.Length ~ 1, data = iris, save_pars = save_pars(all = TRUE)) brm2 <- brms::brm(Sepal.Length ~ Species, data = iris, save_pars = save_pars(all = TRUE)) brm3 <- brms::brm( Sepal.Length ~ Species + Petal.Length, data = iris, save_pars = save_pars(all = TRUE) ) bayesfactor_models(brm1, brm2, brm3, denominator = 1, verbose = FALSE) # BayesFactor # --------------------------- data(puzzles) BF <- BayesFactor::anovaBF(RT ~ shape * color + ID, data = puzzles, whichRandom = "ID", progress = FALSE ) BF bayesfactor_models(BF) # basically the same } \dontshow{\}) # examplesIf} } \references{ \itemize{ \item Gronau, Q. F., Singmann, H., & Wagenmakers, E. J. (2017). Bridgesampling: An R package for estimating normalizing constants. arXiv preprint arXiv:1710.08162. \item Kass, R. E., and Raftery, A. E. (1995). Bayes Factors. Journal of the American Statistical Association, 90(430), 773-795. \item Robert, C. P. (2016). The expected demise of the Bayes factor. Journal of Mathematical Psychology, 72, 33–37. \item Wagenmakers, E. J. (2007). A practical solution to the pervasive problems of p values. Psychonomic bulletin & review, 14(5), 779-804. \item Wetzels, R., Matzke, D., Lee, M. D., Rouder, J. N., Iverson, G. J., and Wagenmakers, E.-J. (2011). Statistical Evidence in Experimental Psychology: An Empirical Comparison Using 855 t Tests. Perspectives on Psychological Science, 6(3), 291–298. \doi{10.1177/1745691611406923} } } \author{ Mattan S. Ben-Shachar } bayestestR/man/dot-select_nums.Rd0000644000176200001440000000040014542333405016543 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utils.R \name{.select_nums} \alias{.select_nums} \title{select numerics columns} \usage{ .select_nums(x) } \description{ select numerics columns } \keyword{internal} bayestestR/man/simulate_simpson.Rd0000644000176200001440000000256714677026462017064 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/simulate_simpson.R \name{simulate_simpson} \alias{simulate_simpson} \title{Simpson's paradox dataset simulation} \usage{ simulate_simpson( n = 100, r = 0.5, groups = 3, difference = 1, group_prefix = "G_" ) } \arguments{ \item{n}{The number of observations for each group to be generated (minimum 4).} \item{r}{A value or vector corresponding to the desired correlation coefficients.} \item{groups}{Number of groups (groups can be participants, clusters, anything).} \item{difference}{Difference between groups.} \item{group_prefix}{The prefix of the group name (e.g., "G_1", "G_2", "G_3", ...).} } \value{ A dataset. } \description{ Simpson's paradox, or the Yule-Simpson effect, is a phenomenon in probability and statistics, in which a trend appears in several different groups of data but disappears or reverses when these groups are combined. } \examples{ \dontshow{if (requireNamespace("MASS", quietly = TRUE)) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf} data <- simulate_simpson(n = 10, groups = 5, r = 0.5) if (require("ggplot2")) { ggplot(data, aes(x = V1, y = V2)) + geom_point(aes(color = Group)) + geom_smooth(aes(color = Group), method = "lm") + geom_smooth(method = "lm") } \dontshow{\}) # examplesIf} } bayestestR/man/p_direction.Rd0000644000176200001440000003245215005147105015744 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/p_direction.R \name{p_direction} \alias{p_direction} \alias{pd} \alias{p_direction.numeric} \alias{p_direction.data.frame} \alias{p_direction.brmsfit} \alias{p_direction.get_predicted} \title{Probability of Direction (pd)} \usage{ p_direction(x, ...) pd(x, ...) \method{p_direction}{numeric}( x, method = "direct", null = 0, as_p = FALSE, remove_na = TRUE, ... ) \method{p_direction}{data.frame}( x, method = "direct", null = 0, as_p = FALSE, remove_na = TRUE, rvar_col = NULL, ... ) \method{p_direction}{brmsfit}( x, effects = "fixed", component = "conditional", parameters = NULL, method = "direct", null = 0, as_p = FALSE, remove_na = TRUE, ... ) \method{p_direction}{get_predicted}( x, method = "direct", null = 0, as_p = FALSE, remove_na = TRUE, use_iterations = FALSE, verbose = TRUE, ... ) } \arguments{ \item{x}{A vector representing a posterior distribution, a data frame of posterior draws (samples be parameter). Can also be a Bayesian model.} \item{...}{Currently not used.} \item{method}{Can be \code{"direct"} or one of methods of \code{\link[=estimate_density]{estimate_density()}}, such as \code{"kernel"}, \code{"logspline"} or \code{"KernSmooth"}. See details.} \item{null}{The value considered as a "null" effect. Traditionally 0, but could also be 1 in the case of ratios of change (OR, IRR, ...).} \item{as_p}{If \code{TRUE}, the p-direction (pd) values are converted to a frequentist p-value using \code{\link[=pd_to_p]{pd_to_p()}}.} \item{remove_na}{Should missing values be removed before computation? Note that \code{Inf} (infinity) are \emph{not} removed.} \item{rvar_col}{A single character - the name of an \code{rvar} column in the data frame to be processed. See example in \code{\link[=p_direction]{p_direction()}}.} \item{effects}{Should variables for fixed effects (\code{"fixed"}), random effects (\code{"random"}) or both (\code{"all"}) be returned? Only applies to mixed models. May be abbreviated. For models of from packages \strong{brms} or \strong{rstanarm} there are additional options: \itemize{ \item \code{"fixed"} returns fixed effects. \item \code{"random_variance"} return random effects parameters (variance and correlation components, e.g. those parameters that start with \code{sd_} or \code{cor_}). \item \code{"grouplevel"} returns random effects group level estimates, i.e. those parameters that start with \code{r_}. \item \code{"random"} returns both \code{"random_variance"} and \code{"grouplevel"}. \item \code{"all"} returns fixed effects and random effects variances. \item \code{"full"} returns all parameters. }} \item{component}{Which type of parameters to return, such as parameters for the conditional model, the zero-inflated part of the model, the dispersion term, etc. See details in section \emph{Model Components}. May be abbreviated. Note that the \emph{conditional} component also refers to the \emph{count} or \emph{mean} component - names may differ, depending on the modeling package. There are three convenient shortcuts (not applicable to \emph{all} model classes): \itemize{ \item \code{component = "all"} returns all possible parameters. \item If \code{component = "location"}, location parameters such as \code{conditional}, \code{zero_inflated}, \code{smooth_terms}, or \code{instruments} are returned (everything that are fixed or random effects - depending on the \code{effects} argument - but no auxiliary parameters). \item For \code{component = "distributional"} (or \code{"auxiliary"}), components like \code{sigma}, \code{dispersion}, \code{beta} or \code{precision} (and other auxiliary parameters) are returned. }} \item{parameters}{Regular expression pattern that describes the parameters that should be returned. Meta-parameters (like \code{lp__} or \code{prior_}) are filtered by default, so only parameters that typically appear in the \code{summary()} are returned. Use \code{parameters} to select specific parameters for the output.} \item{use_iterations}{Logical, if \code{TRUE} and \code{x} is a \code{get_predicted} object, (returned by \code{\link[insight:get_predicted]{insight::get_predicted()}}), the function is applied to the iterations instead of the predictions. This only applies to models that return iterations for predicted values (e.g., \code{brmsfit} models).} \item{verbose}{Toggle off warnings.} } \value{ Values between 0.5 and 1 \emph{or} between 0 and 1 (see above) corresponding to the probability of direction (pd). } \description{ Compute the \strong{Probability of Direction} (\emph{\strong{pd}}, also known as the Maximum Probability of Effect - \emph{MPE}). This can be interpreted as the probability that a parameter (described by its posterior distribution) is strictly positive or negative (whichever is the most probable). Although differently expressed, this index is fairly similar (\emph{i.e.}, is strongly correlated) to the frequentist \strong{p-value} (see details). } \note{ There is also a \href{https://easystats.github.io/see/articles/bayestestR.html}{\code{plot()}-method} implemented in the \href{https://easystats.github.io/see/}{\pkg{see}-package}. } \section{What is the \emph{pd}?}{ The Probability of Direction (pd) is an index of effect existence, representing the certainty with which an effect goes in a particular direction (i.e., is positive or negative / has a sign), typically ranging from 0.5 to 1 (but see next section for cases where it can range between 0 and 1). Beyond its simplicity of interpretation, understanding and computation, this index also presents other interesting properties: \itemize{ \item Like other posterior-based indices, \emph{pd} is solely based on the posterior distributions and does not require any additional information from the data or the model (e.g., such as priors, as in the case of Bayes factors). \item It is robust to the scale of both the response variable and the predictors. \item It is strongly correlated with the frequentist p-value, and can thus be used to draw parallels and give some reference to readers non-familiar with Bayesian statistics (Makowski et al., 2019). } } \section{Relationship with the p-value}{ In most cases, it seems that the \emph{pd} has a direct correspondence with the frequentist one-sided \emph{p}-value through the formula (for two-sided \emph{p}): \ifelse{html}{\out{p = 2 * (1 - pd)}}{\eqn{p = 2 \times (1 - p_d)}} Thus, a two-sided p-value of respectively \code{.1}, \code{.05}, \code{.01} and \code{.001} would correspond approximately to a \emph{pd} of \verb{95\%}, \verb{97.5\%}, \verb{99.5\%} and \verb{99.95\%}. See \code{\link[=pd_to_p]{pd_to_p()}} for details. } \section{Possible Range of Values}{ The largest value \emph{pd} can take is 1 - the posterior is strictly directional. However, the smallest value \emph{pd} can take depends on the parameter space represented by the posterior. \strong{For a continuous parameter space}, exact values of 0 (or any point null value) are not possible, and so 100\% of the posterior has \emph{some} sign, some positive, some negative. Therefore, the smallest the \emph{pd} can be is 0.5 - with an equal posterior mass of positive and negative values. Values close to 0.5 \emph{cannot} be used to support the null hypothesis (that the parameter does \emph{not} have a direction) is a similar why to how large p-values cannot be used to support the null hypothesis (see \code{\link[=pd_to_p]{pd_to_p()}}; Makowski et al., 2019). \strong{For a discrete parameter space or a parameter space that is a mixture between discrete and continuous spaces}, exact values of 0 (or any point null value) \emph{are} possible! Therefore, the smallest the \emph{pd} can be is 0 - with 100\% of the posterior mass on 0. Thus values close to 0 can be used to support the null hypothesis (see van den Bergh et al., 2021). Examples of posteriors representing discrete parameter space: \itemize{ \item When a parameter can only take discrete values. \item When a mixture prior/posterior is used (such as the spike-and-slab prior; see van den Bergh et al., 2021). \item When conducting Bayesian model averaging (e.g., \code{\link[=weighted_posteriors]{weighted_posteriors()}} or \code{brms::posterior_average}). } } \section{Methods of computation}{ The \emph{pd} is defined as: \deqn{p_d = max({Pr(\hat{\theta} < \theta_{null}), Pr(\hat{\theta} > \theta_{null})})}{pd = max(mean(x < null), mean(x > null))} The most simple and direct way to compute the \emph{pd} is to compute the proportion of positive (or larger than \code{null}) posterior samples, the proportion of negative (or smaller than \code{null}) posterior samples, and take the larger of the two. This "simple" method is the most straightforward, but its precision is directly tied to the number of posterior draws. The second approach relies on \link[=estimate_density]{density estimation}: It starts by estimating the continuous-smooth density function (for which many methods are available), and then computing the \link[=area_under_curve]{area under the curve} (AUC) of the density curve on either side of \code{null} and taking the maximum between them. Note the this approach assumes a continuous density function, and so \strong{when the posterior represents a (partially) discrete parameter space, only the direct method \emph{must} be used} (see above). } \section{Model components}{ Possible values for the \code{component} argument depend on the model class. Following are valid options: \itemize{ \item \code{"all"}: returns all model components, applies to all models, but will only have an effect for models with more than just the conditional model component. \item \code{"conditional"}: only returns the conditional component, i.e. "fixed effects" terms from the model. Will only have an effect for models with more than just the conditional model component. \item \code{"smooth_terms"}: returns smooth terms, only applies to GAMs (or similar models that may contain smooth terms). \item \code{"zero_inflated"} (or \code{"zi"}): returns the zero-inflation component. \item \code{"location"}: returns location parameters such as \code{conditional}, \code{zero_inflated}, or \code{smooth_terms} (everything that are fixed or random effects - depending on the \code{effects} argument - but no auxiliary parameters). \item \code{"distributional"} (or \code{"auxiliary"}): components like \code{sigma}, \code{dispersion}, \code{beta} or \code{precision} (and other auxiliary parameters) are returned. } For models of class \code{brmsfit} (package \strong{brms}), even more options are possible for the \code{component} argument, which are not all documented in detail here. See also \href{https://easystats.github.io/insight/reference/find_parameters.BGGM.html}{\code{?insight::find_parameters}}. } \examples{ \dontshow{if (requireNamespace("rstanarm", quietly = TRUE) && requireNamespace("emmeans", quietly = TRUE) && requireNamespace("brms", quietly = TRUE) && requireNamespace("BayesFactor", quietly = TRUE)) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf} library(bayestestR) # Simulate a posterior distribution of mean 1 and SD 1 # ---------------------------------------------------- posterior <- rnorm(1000, mean = 1, sd = 1) p_direction(posterior) p_direction(posterior, method = "kernel") # Simulate a dataframe of posterior distributions # ----------------------------------------------- df <- data.frame(replicate(4, rnorm(100))) p_direction(df) p_direction(df, method = "kernel") \donttest{ # rstanarm models # ----------------------------------------------- model <- rstanarm::stan_glm(mpg ~ wt + cyl, data = mtcars, chains = 2, refresh = 0 ) p_direction(model) p_direction(model, method = "kernel") # emmeans # ----------------------------------------------- p_direction(emmeans::emtrends(model, ~1, "wt", data = mtcars)) # brms models # ----------------------------------------------- model <- brms::brm(mpg ~ wt + cyl, data = mtcars) p_direction(model) p_direction(model, method = "kernel") # BayesFactor objects # ----------------------------------------------- bf <- BayesFactor::ttestBF(x = rnorm(100, 1, 1)) p_direction(bf) p_direction(bf, method = "kernel") } \dontshow{\}) # examplesIf} \dontshow{if (requireNamespace("posterior", quietly = TRUE)) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf} # Using "rvar_col" x <- data.frame(mu = c(0, 0.5, 1), sigma = c(1, 0.5, 0.25)) x$my_rvar <- posterior::rvar_rng(rnorm, 3, mean = x$mu, sd = x$sigma) x p_direction(x, rvar_col = "my_rvar") \dontshow{\}) # examplesIf} } \references{ \itemize{ \item Makowski, D., Ben-Shachar, M. S., Chen, S. A., & Lüdecke, D. (2019). Indices of effect existence and significance in the Bayesian framework. Frontiers in psychology, 10, 2767. \doi{10.3389/fpsyg.2019.02767} \item van den Bergh, D., Haaf, J. M., Ly, A., Rouder, J. N., & Wagenmakers, E. J. (2021). A cautionary note on estimating effect size. Advances in Methods and Practices in Psychological Science, 4(1). \doi{10.1177/2515245921992035} } } \seealso{ \code{\link[=pd_to_p]{pd_to_p()}} to convert between Probability of Direction (pd) and p-value. } bayestestR/man/dot-prior_new_location.Rd0000644000176200001440000000051414542333405020124 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/sensitivity_to_prior.R \name{.prior_new_location} \alias{.prior_new_location} \title{Set a new location for a prior} \usage{ .prior_new_location(prior, sign, magnitude = 10) } \description{ Set a new location for a prior } \keyword{internal} bayestestR/man/describe_posterior.Rd0000644000176200001440000002574515005147105017342 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/describe_posterior.R \name{describe_posterior} \alias{describe_posterior} \alias{describe_posterior.numeric} \alias{describe_posterior.data.frame} \alias{describe_posterior.stanreg} \title{Describe Posterior Distributions} \usage{ describe_posterior(posterior, ...) \method{describe_posterior}{numeric}( posterior, centrality = "median", dispersion = FALSE, ci = 0.95, ci_method = "eti", test = c("p_direction", "rope"), rope_range = "default", rope_ci = 0.95, keep_iterations = FALSE, bf_prior = NULL, BF = 1, verbose = TRUE, ... ) \method{describe_posterior}{data.frame}( posterior, centrality = "median", dispersion = FALSE, ci = 0.95, ci_method = "eti", test = c("p_direction", "rope"), rope_range = "default", rope_ci = 0.95, keep_iterations = FALSE, bf_prior = NULL, BF = 1, rvar_col = NULL, verbose = TRUE, ... ) \method{describe_posterior}{stanreg}( posterior, centrality = "median", dispersion = FALSE, ci = 0.95, ci_method = "eti", test = c("p_direction", "rope"), rope_range = "default", rope_ci = 0.95, keep_iterations = FALSE, bf_prior = NULL, diagnostic = c("ESS", "Rhat"), priors = FALSE, effects = "fixed", component = "location", parameters = NULL, BF = 1, verbose = TRUE, ... ) } \arguments{ \item{posterior}{A vector, data frame or model of posterior draws. \strong{bayestestR} supports a wide range of models (see \code{methods("describe_posterior")}) and not all of those are documented in the 'Usage' section, because methods for other classes mostly resemble the arguments of the \code{.numeric} method.} \item{...}{Additional arguments to be passed to or from methods.} \item{centrality}{The point-estimates (centrality indices) to compute. Character (vector) or list with one or more of these options: \code{"median"}, \code{"mean"}, \code{"MAP"} (see \code{\link[=map_estimate]{map_estimate()}}), \code{"trimmed"} (which is just \code{mean(x, trim = threshold)}), \code{"mode"} or \code{"all"}.} \item{dispersion}{Logical, if \code{TRUE}, computes indices of dispersion related to the estimate(s) (\code{SD} and \code{MAD} for \code{mean} and \code{median}, respectively). Dispersion is not available for \code{"MAP"} or \code{"mode"} centrality indices.} \item{ci}{Value or vector of probability of the CI (between 0 and 1) to be estimated. Default to \code{0.95} (\verb{95\%}).} \item{ci_method}{The type of index used for Credible Interval. Can be \code{"ETI"} (default, see \code{\link[=eti]{eti()}}), \code{"HDI"} (see \code{\link[=hdi]{hdi()}}), \code{"BCI"} (see \code{\link[=bci]{bci()}}), \code{"SPI"} (see \code{\link[=spi]{spi()}}), or \code{"SI"} (see \code{\link[=si]{si()}}).} \item{test}{The indices of effect existence to compute. Character (vector) or list with one or more of these options: \code{"p_direction"} (or \code{"pd"}), \code{"rope"}, \code{"p_map"}, \code{"p_significance"} (or \code{"ps"}), \code{"p_rope"}, \code{"equivalence_test"} (or \code{"equitest"}), \code{"bayesfactor"} (or \code{"bf"}) or \code{"all"} to compute all tests. For each "test", the corresponding \strong{bayestestR} function is called (e.g. \code{\link[=rope]{rope()}} or \code{\link[=p_direction]{p_direction()}}) and its results included in the summary output.} \item{rope_range}{ROPE's lower and higher bounds. Should be a vector of two values (e.g., \code{c(-0.1, 0.1)}), \code{"default"} or a list of numeric vectors of the same length as numbers of parameters. If \code{"default"}, the bounds are set to \code{x +- 0.1*SD(response)}.} \item{rope_ci}{The Credible Interval (CI) probability, corresponding to the proportion of HDI, to use for the percentage in ROPE.} \item{keep_iterations}{If \code{TRUE}, will keep all iterations (draws) of bootstrapped or Bayesian models. They will be added as additional columns named \verb{iter_1, iter_2, ...}. You can reshape them to a long format by running \code{\link[=reshape_iterations]{reshape_iterations()}}.} \item{bf_prior}{Distribution representing a prior for the computation of Bayes factors / SI. Used if the input is a posterior, otherwise (in the case of models) ignored.} \item{BF}{The amount of support required to be included in the support interval.} \item{verbose}{Toggle off warnings.} \item{rvar_col}{A single character - the name of an \code{rvar} column in the data frame to be processed. See example in \code{\link[=p_direction]{p_direction()}}.} \item{diagnostic}{Diagnostic metrics to compute. Character (vector) or list with one or more of these options: \code{"ESS"}, \code{"Rhat"}, \code{"MCSE"} or \code{"all"}.} \item{priors}{Add the prior used for each parameter.} \item{effects}{Should variables for fixed effects (\code{"fixed"}), random effects (\code{"random"}) or both (\code{"all"}) be returned? Only applies to mixed models. May be abbreviated. For models of from packages \strong{brms} or \strong{rstanarm} there are additional options: \itemize{ \item \code{"fixed"} returns fixed effects. \item \code{"random_variance"} return random effects parameters (variance and correlation components, e.g. those parameters that start with \code{sd_} or \code{cor_}). \item \code{"grouplevel"} returns random effects group level estimates, i.e. those parameters that start with \code{r_}. \item \code{"random"} returns both \code{"random_variance"} and \code{"grouplevel"}. \item \code{"all"} returns fixed effects and random effects variances. \item \code{"full"} returns all parameters. }} \item{component}{Which type of parameters to return, such as parameters for the conditional model, the zero-inflated part of the model, the dispersion term, etc. See details in section \emph{Model Components}. May be abbreviated. Note that the \emph{conditional} component also refers to the \emph{count} or \emph{mean} component - names may differ, depending on the modeling package. There are three convenient shortcuts (not applicable to \emph{all} model classes): \itemize{ \item \code{component = "all"} returns all possible parameters. \item If \code{component = "location"}, location parameters such as \code{conditional}, \code{zero_inflated}, \code{smooth_terms}, or \code{instruments} are returned (everything that are fixed or random effects - depending on the \code{effects} argument - but no auxiliary parameters). \item For \code{component = "distributional"} (or \code{"auxiliary"}), components like \code{sigma}, \code{dispersion}, \code{beta} or \code{precision} (and other auxiliary parameters) are returned. }} \item{parameters}{Regular expression pattern that describes the parameters that should be returned. Meta-parameters (like \code{lp__} or \code{prior_}) are filtered by default, so only parameters that typically appear in the \code{summary()} are returned. Use \code{parameters} to select specific parameters for the output.} } \description{ Compute indices relevant to describe and characterize the posterior distributions. } \details{ One or more components of point estimates (like posterior mean or median), intervals and tests can be omitted from the summary output by setting the related argument to \code{NULL}. For example, \code{test = NULL} and \code{centrality = NULL} would only return the HDI (or CI). } \section{Model components}{ Possible values for the \code{component} argument depend on the model class. Following are valid options: \itemize{ \item \code{"all"}: returns all model components, applies to all models, but will only have an effect for models with more than just the conditional model component. \item \code{"conditional"}: only returns the conditional component, i.e. "fixed effects" terms from the model. Will only have an effect for models with more than just the conditional model component. \item \code{"smooth_terms"}: returns smooth terms, only applies to GAMs (or similar models that may contain smooth terms). \item \code{"zero_inflated"} (or \code{"zi"}): returns the zero-inflation component. \item \code{"location"}: returns location parameters such as \code{conditional}, \code{zero_inflated}, or \code{smooth_terms} (everything that are fixed or random effects - depending on the \code{effects} argument - but no auxiliary parameters). \item \code{"distributional"} (or \code{"auxiliary"}): components like \code{sigma}, \code{dispersion}, \code{beta} or \code{precision} (and other auxiliary parameters) are returned. } For models of class \code{brmsfit} (package \strong{brms}), even more options are possible for the \code{component} argument, which are not all documented in detail here. See also \href{https://easystats.github.io/insight/reference/find_parameters.BGGM.html}{\code{?insight::find_parameters}}. } \examples{ \dontshow{if (all(insight::check_if_installed(c("logspline", "rstanarm", "emmeans", "BayesFactor"), quietly = TRUE))) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf} library(bayestestR) x <- rnorm(1000) describe_posterior(x, verbose = FALSE) describe_posterior(x, centrality = "all", dispersion = TRUE, test = "all", verbose = FALSE ) describe_posterior(x, ci = c(0.80, 0.90), verbose = FALSE) df <- data.frame(replicate(4, rnorm(100))) describe_posterior(df, verbose = FALSE) describe_posterior( df, centrality = "all", dispersion = TRUE, test = "all", verbose = FALSE ) describe_posterior(df, ci = c(0.80, 0.90), verbose = FALSE) df <- data.frame(replicate(4, rnorm(20))) head(reshape_iterations( describe_posterior(df, keep_iterations = TRUE, verbose = FALSE) )) \donttest{ # rstanarm models # ----------------------------------------------- model <- suppressWarnings( rstanarm::stan_glm( mpg ~ wt + gear, data = mtcars, chains = 2, iter = 200, refresh = 0 ) ) describe_posterior(model) describe_posterior(model, centrality = "all", dispersion = TRUE, test = "all") describe_posterior(model, ci = c(0.80, 0.90)) describe_posterior(model, rope_range = list(c(-10, 5), c(-0.2, 0.2), "default")) # emmeans estimates # ----------------------------------------------- describe_posterior(emmeans::emtrends(model, ~1, "wt")) # BayesFactor objects # ----------------------------------------------- bf <- BayesFactor::ttestBF(x = rnorm(100, 1, 1)) describe_posterior(bf) describe_posterior(bf, centrality = "all", dispersion = TRUE, test = "all") describe_posterior(bf, ci = c(0.80, 0.90)) } \dontshow{\}) # examplesIf} } \references{ \itemize{ \item Makowski, D., Ben-Shachar, M. S., Chen, S. H. A., and Lüdecke, D. (2019). \emph{Indices of Effect Existence and Significance in the Bayesian Framework}. Frontiers in Psychology 2019;10:2767. \doi{10.3389/fpsyg.2019.02767} \item \href{https://easystats.github.io/bayestestR/articles/region_of_practical_equivalence.html}{Region of Practical Equivalence (ROPE)} \item \href{https://easystats.github.io/bayestestR/articles/bayes_factors.html}{Bayes factors} } } bayestestR/man/bayestestR-package.Rd0000644000176200001440000000471015001512506017154 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/bayestestR-package.R \docType{package} \name{bayestestR-package} \alias{bayestestR-package} \alias{bayestestR} \title{bayestestR: Describing Effects and their Uncertainty, Existence and Significance within the Bayesian Framework} \description{ Existing R packages allow users to easily fit a large variety of models and extract and visualize the posterior draws. However, most of these packages only return a limited set of indices (e.g., point-estimates and CIs). \strong{bayestestR} provides a comprehensive and consistent set of functions to analyze and describe posterior distributions generated by a variety of models objects, including popular modeling packages such as \strong{rstanarm}, \strong{brms} or \strong{BayesFactor}. References: \itemize{ \item Makowski et al. (2019) \doi{10.21105/joss.01541} \item Makowski et al. (2019) \doi{10.3389/fpsyg.2019.02767} } } \details{ \code{bayestestR} } \seealso{ Useful links: \itemize{ \item \url{https://easystats.github.io/bayestestR/} \item Report bugs at \url{https://github.com/easystats/bayestestR/issues} } } \author{ \strong{Maintainer}: Dominique Makowski \email{officialeasystats@gmail.com} (\href{https://orcid.org/0000-0001-5375-9967}{ORCID}) Authors: \itemize{ \item Daniel Lüdecke \email{d.luedecke@uke.de} (\href{https://orcid.org/0000-0002-8895-3206}{ORCID}) \item Mattan S. Ben-Shachar \email{matanshm@post.bgu.ac.il} (\href{https://orcid.org/0000-0002-4287-4801}{ORCID}) \item Indrajeet Patil \email{patilindrajeet.science@gmail.com} (\href{https://orcid.org/0000-0003-1995-6531}{ORCID}) \item Micah K. Wilson \email{micah.k.wilson@curtin.edu.au} (\href{https://orcid.org/0000-0003-4143-7308}{ORCID}) \item Brenton M. Wiernik \email{brenton@wiernik.org} (\href{https://orcid.org/0000-0001-9560-6336}{ORCID}) } Other contributors: \itemize{ \item Paul-Christian Bürkner \email{paul.buerkner@gmail.com} [reviewer] \item Tristan Mahr \email{tristan.mahr@wisc.edu} (\href{https://orcid.org/0000-0002-8890-5116}{ORCID}) [reviewer] \item Henrik Singmann \email{singmann@gmail.com} (\href{https://orcid.org/0000-0002-4842-3657}{ORCID}) [contributor] \item Quentin F. Gronau (\href{https://orcid.org/0000-0001-5510-6943}{ORCID}) [contributor] \item Sam Crawley \email{sam@crawley.nz} (\href{https://orcid.org/0000-0002-7847-0411}{ORCID}) [contributor] } } \keyword{internal} bayestestR/man/convert_bayesian_as_frequentist.Rd0000644000176200001440000000315614542333405022120 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/convert_bayesian_to_frequentist.R \name{convert_bayesian_as_frequentist} \alias{convert_bayesian_as_frequentist} \alias{bayesian_as_frequentist} \title{Convert (refit) a Bayesian model to frequentist} \usage{ convert_bayesian_as_frequentist(model, data = NULL, REML = TRUE) bayesian_as_frequentist(model, data = NULL, REML = TRUE) } \arguments{ \item{model}{A Bayesian model.} \item{data}{Data used by the model. If \code{NULL}, will try to extract it from the model.} \item{REML}{For mixed effects, should models be estimated using restricted maximum likelihood (REML) (\code{TRUE}, default) or maximum likelihood (\code{FALSE})?} } \description{ Refit Bayesian model as frequentist. Can be useful for comparisons. } \examples{ \dontshow{if (require("rstanarm")) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf} \donttest{ # Rstanarm ---------------------- # Simple regressions model <- rstanarm::stan_glm(Sepal.Length ~ Species, data = iris, chains = 2, refresh = 0 ) bayesian_as_frequentist(model) model <- rstanarm::stan_glm(vs ~ mpg, family = "binomial", data = mtcars, chains = 2, refresh = 0 ) bayesian_as_frequentist(model) # Mixed models model <- rstanarm::stan_glmer( Sepal.Length ~ Petal.Length + (1 | Species), data = iris, chains = 2, refresh = 0 ) bayesian_as_frequentist(model) model <- rstanarm::stan_glmer(vs ~ mpg + (1 | cyl), family = "binomial", data = mtcars, chains = 2, refresh = 0 ) bayesian_as_frequentist(model) } \dontshow{\}) # examplesIf} } bayestestR/man/sexit_thresholds.Rd0000644000176200001440000000300414542333405017034 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/sexit_thresholds.R \name{sexit_thresholds} \alias{sexit_thresholds} \title{Find Effect Size Thresholds} \usage{ sexit_thresholds(x, ...) } \arguments{ \item{x}{Vector representing a posterior distribution. Can also be a \code{stanreg} or \code{brmsfit} model.} \item{...}{Currently not used.} } \description{ This function attempts at automatically finding suitable default values for a "significant" (i.e., non-negligible) and "large" effect. This is to be used with care, and the chosen threshold should always be explicitly reported and justified. See the detail section in \code{\link[=sexit]{sexit()}} for more information. } \examples{ sexit_thresholds(rnorm(1000)) \donttest{ if (require("rstanarm")) { model <- suppressWarnings(stan_glm( mpg ~ wt + gear, data = mtcars, chains = 2, iter = 200, refresh = 0 )) sexit_thresholds(model) model <- suppressWarnings( stan_glm(vs ~ mpg, data = mtcars, family = "binomial", refresh = 0) ) sexit_thresholds(model) } if (require("brms")) { model <- brm(mpg ~ wt + cyl, data = mtcars) sexit_thresholds(model) } if (require("BayesFactor")) { bf <- ttestBF(x = rnorm(100, 1, 1)) sexit_thresholds(bf) } } } \references{ Kruschke, J. K. (2018). Rejecting or accepting parameter values in Bayesian estimation. Advances in Methods and Practices in Psychological Science, 1(2), 270-280. \doi{10.1177/2515245918771304}. } bayestestR/man/mediation.Rd0000644000176200001440000001411114765755712015433 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/mediation.R \name{mediation} \alias{mediation} \alias{mediation.brmsfit} \title{Summary of Bayesian multivariate-response mediation-models} \usage{ mediation(model, ...) \method{mediation}{brmsfit}( model, treatment, mediator, response = NULL, centrality = "median", ci = 0.95, method = "ETI", ... ) } \arguments{ \item{model}{A \code{brmsfit} or \code{stanmvreg} object.} \item{...}{Not used.} \item{treatment}{Character, name of the treatment variable (or direct effect) in a (multivariate response) mediator-model. If missing, \code{mediation()} tries to find the treatment variable automatically, however, this may fail.} \item{mediator}{Character, name of the mediator variable in a (multivariate response) mediator-model. If missing, \code{mediation()} tries to find the treatment variable automatically, however, this may fail.} \item{response}{A named character vector, indicating the names of the response variables to be used for the mediation analysis. Usually can be \code{NULL}, in which case these variables are retrieved automatically. If not \code{NULL}, names should match the names of the model formulas, \code{names(insight::find_response(model, combine = TRUE))}. This can be useful if, for instance, the mediator variable used as predictor has a different name from the mediator variable used as response. This might occur when the mediator is transformed in one model, but used "as is" as response variable in the other model. Example: The mediator \code{m} is used as response variable, but the centered version \code{m_center} is used as mediator variable. The second response variable (for the treatment model, with the mediator as additional predictor), \code{y}, is not transformed. Then we could use \code{response} like this: \code{mediation(model, response = c(m = "m_center", y = "y"))}.} \item{centrality}{The point-estimates (centrality indices) to compute. Character (vector) or list with one or more of these options: \code{"median"}, \code{"mean"}, \code{"MAP"} (see \code{\link[=map_estimate]{map_estimate()}}), \code{"trimmed"} (which is just \code{mean(x, trim = threshold)}), \code{"mode"} or \code{"all"}.} \item{ci}{Value or vector of probability of the CI (between 0 and 1) to be estimated. Default to \code{0.95} (\verb{95\%}).} \item{method}{Can be \link[=eti]{"ETI"} (default), \link[=hdi]{"HDI"}, \link[=bci]{"BCI"}, \link[=spi]{"SPI"} or \link[=si]{"SI"}.} } \value{ A data frame with direct, indirect, mediator and total effect of a multivariate-response mediation-model, as well as the proportion mediated. The effect sizes are median values of the posterior samples (use \code{centrality} for other centrality indices). } \description{ \code{mediation()} is a short summary for multivariate-response mediation-models, i.e. this function computes average direct and average causal mediation effects of multivariate response models. } \details{ \code{mediation()} returns a data frame with information on the \emph{direct effect} (mean value of posterior samples from \code{treatment} of the outcome model), \emph{mediator effect} (mean value of posterior samples from \code{mediator} of the outcome model), \emph{indirect effect} (mean value of the multiplication of the posterior samples from \code{mediator} of the outcome model and the posterior samples from \code{treatment} of the mediation model) and the total effect (mean value of sums of posterior samples used for the direct and indirect effect). The \emph{proportion mediated} is the indirect effect divided by the total effect. For all values, the \verb{89\%} credible intervals are calculated by default. Use \code{ci} to calculate a different interval. The arguments \code{treatment} and \code{mediator} do not necessarily need to be specified. If missing, \code{mediation()} tries to find the treatment and mediator variable automatically. If this does not work, specify these variables. The direct effect is also called \emph{average direct effect} (ADE), the indirect effect is also called \emph{average causal mediation effects} (ACME). See also \emph{Tingley et al. 2014} and \emph{Imai et al. 2010}. } \note{ There is an \code{as.data.frame()} method that returns the posterior samples of the effects, which can be used for further processing in the \strong{bayestestR} package. } \examples{ \dontshow{if (require("mediation") && require("brms") && require("rstanarm")) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf} \donttest{ library(mediation) library(brms) library(rstanarm) # load sample data data(jobs) set.seed(123) # linear models, for mediation analysis b1 <- lm(job_seek ~ treat + econ_hard + sex + age, data = jobs) b2 <- lm(depress2 ~ treat + job_seek + econ_hard + sex + age, data = jobs) # mediation analysis, for comparison with Stan models m1 <- mediate(b1, b2, sims = 1000, treat = "treat", mediator = "job_seek") # Fit Bayesian mediation model in brms f1 <- bf(job_seek ~ treat + econ_hard + sex + age) f2 <- bf(depress2 ~ treat + job_seek + econ_hard + sex + age) m2 <- brm(f1 + f2 + set_rescor(FALSE), data = jobs, refresh = 0) # Fit Bayesian mediation model in rstanarm m3 <- suppressWarnings(stan_mvmer( list( job_seek ~ treat + econ_hard + sex + age + (1 | occp), depress2 ~ treat + job_seek + econ_hard + sex + age + (1 | occp) ), data = jobs, refresh = 0 )) summary(m1) mediation(m2, centrality = "mean", ci = 0.95) mediation(m3, centrality = "mean", ci = 0.95) } \dontshow{\}) # examplesIf} } \references{ \itemize{ \item Imai, K., Keele, L. and Tingley, D. (2010) A General Approach to Causal Mediation Analysis, Psychological Methods, Vol. 15, No. 4 (December), pp. 309-334. \item Tingley, D., Yamamoto, T., Hirose, K., Imai, K. and Keele, L. (2014). mediation: R package for Causal Mediation Analysis, Journal of Statistical Software, Vol. 59, No. 5, pp. 1-38. } } \seealso{ The \pkg{mediation} package for a causal mediation analysis in the frequentist framework. } bayestestR/man/as.data.frame.density.Rd0000644000176200001440000000061614542333405017531 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/estimate_density.R \name{as.data.frame.density} \alias{as.data.frame.density} \title{Coerce to a Data Frame} \usage{ \method{as.data.frame}{density}(x, ...) } \arguments{ \item{x}{any \R object.} \item{...}{additional arguments to be passed to or from methods.} } \description{ Coerce to a Data Frame } bayestestR/man/simulate_prior.Rd0000644000176200001440000000644115005147105016502 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/simulate_priors.R \name{simulate_prior} \alias{simulate_prior} \alias{simulate_prior.brmsfit} \title{Returns Priors of a Model as Empirical Distributions} \usage{ simulate_prior(model, n = 1000, ...) \method{simulate_prior}{brmsfit}( model, n = 1000, effects = "fixed", component = "conditional", parameters = NULL, verbose = TRUE, ... ) } \arguments{ \item{model}{A \code{stanreg}, \code{stanfit}, \code{brmsfit}, \code{blavaan}, or \code{MCMCglmm} object.} \item{n}{Size of the simulated prior distributions.} \item{...}{Currently not used.} \item{effects}{Should variables for fixed effects (\code{"fixed"}), random effects (\code{"random"}) or both (\code{"all"}) be returned? Only applies to mixed models. May be abbreviated. For models of from packages \strong{brms} or \strong{rstanarm} there are additional options: \itemize{ \item \code{"fixed"} returns fixed effects. \item \code{"random_variance"} return random effects parameters (variance and correlation components, e.g. those parameters that start with \code{sd_} or \code{cor_}). \item \code{"grouplevel"} returns random effects group level estimates, i.e. those parameters that start with \code{r_}. \item \code{"random"} returns both \code{"random_variance"} and \code{"grouplevel"}. \item \code{"all"} returns fixed effects and random effects variances. \item \code{"full"} returns all parameters. }} \item{component}{Which type of parameters to return, such as parameters for the conditional model, the zero-inflated part of the model, the dispersion term, etc. See details in section \emph{Model Components}. May be abbreviated. Note that the \emph{conditional} component also refers to the \emph{count} or \emph{mean} component - names may differ, depending on the modeling package. There are three convenient shortcuts (not applicable to \emph{all} model classes): \itemize{ \item \code{component = "all"} returns all possible parameters. \item If \code{component = "location"}, location parameters such as \code{conditional}, \code{zero_inflated}, \code{smooth_terms}, or \code{instruments} are returned (everything that are fixed or random effects - depending on the \code{effects} argument - but no auxiliary parameters). \item For \code{component = "distributional"} (or \code{"auxiliary"}), components like \code{sigma}, \code{dispersion}, \code{beta} or \code{precision} (and other auxiliary parameters) are returned. }} \item{parameters}{Regular expression pattern that describes the parameters that should be returned. Meta-parameters (like \code{lp__} or \code{prior_}) are filtered by default, so only parameters that typically appear in the \code{summary()} are returned. Use \code{parameters} to select specific parameters for the output.} \item{verbose}{Toggle off warnings.} } \description{ Transforms priors information to actual distributions. } \examples{ \donttest{ library(bayestestR) if (require("rstanarm")) { model <- suppressWarnings( stan_glm(mpg ~ wt + am, data = mtcars, chains = 1, refresh = 0) ) simulate_prior(model) } } } \seealso{ \code{\link[=unupdate]{unupdate()}} for directly sampling from the prior distribution (useful for complex priors and designs). } bayestestR/man/check_prior.Rd0000644000176200001440000001136415005147105015734 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/check_prior.R \name{check_prior} \alias{check_prior} \alias{check_prior.brmsfit} \title{Check if Prior is Informative} \usage{ check_prior(model, method = "gelman", simulate_priors = TRUE, ...) \method{check_prior}{brmsfit}( model, method = "gelman", simulate_priors = TRUE, effects = "fixed", component = "conditional", parameters = NULL, verbose = TRUE, ... ) } \arguments{ \item{model}{A \code{stanreg}, \code{stanfit}, \code{brmsfit}, \code{blavaan}, or \code{MCMCglmm} object.} \item{method}{Can be \code{"gelman"} or \code{"lakeland"}. For the \code{"gelman"} method, if the SD of the posterior is more than 0.1 times the SD of the prior, then the prior is considered as informative. For the \code{"lakeland"} method, the prior is considered as informative if the posterior falls within the \verb{95\%} HDI of the prior.} \item{simulate_priors}{Should prior distributions be simulated using \code{\link[=simulate_prior]{simulate_prior()}} (default; faster) or sampled via \code{\link[=unupdate]{unupdate()}} (slower, more accurate).} \item{...}{Currently not used.} \item{effects}{Should variables for fixed effects (\code{"fixed"}), random effects (\code{"random"}) or both (\code{"all"}) be returned? Only applies to mixed models. May be abbreviated. For models of from packages \strong{brms} or \strong{rstanarm} there are additional options: \itemize{ \item \code{"fixed"} returns fixed effects. \item \code{"random_variance"} return random effects parameters (variance and correlation components, e.g. those parameters that start with \code{sd_} or \code{cor_}). \item \code{"grouplevel"} returns random effects group level estimates, i.e. those parameters that start with \code{r_}. \item \code{"random"} returns both \code{"random_variance"} and \code{"grouplevel"}. \item \code{"all"} returns fixed effects and random effects variances. \item \code{"full"} returns all parameters. }} \item{component}{Which type of parameters to return, such as parameters for the conditional model, the zero-inflated part of the model, the dispersion term, etc. See details in section \emph{Model Components}. May be abbreviated. Note that the \emph{conditional} component also refers to the \emph{count} or \emph{mean} component - names may differ, depending on the modeling package. There are three convenient shortcuts (not applicable to \emph{all} model classes): \itemize{ \item \code{component = "all"} returns all possible parameters. \item If \code{component = "location"}, location parameters such as \code{conditional}, \code{zero_inflated}, \code{smooth_terms}, or \code{instruments} are returned (everything that are fixed or random effects - depending on the \code{effects} argument - but no auxiliary parameters). \item For \code{component = "distributional"} (or \code{"auxiliary"}), components like \code{sigma}, \code{dispersion}, \code{beta} or \code{precision} (and other auxiliary parameters) are returned. }} \item{parameters}{Regular expression pattern that describes the parameters that should be returned. Meta-parameters (like \code{lp__} or \code{prior_}) are filtered by default, so only parameters that typically appear in the \code{summary()} are returned. Use \code{parameters} to select specific parameters for the output.} \item{verbose}{Toggle off warnings.} } \value{ A data frame with two columns: The parameter names and the quality of the prior (which might be \code{"informative"}, \code{"uninformative"}) or \code{"not determinable"} if the prior distribution could not be determined). } \description{ Performs a simple test to check whether the prior is informative to the posterior. This idea, and the accompanying heuristics, were discussed in \emph{Gelman et al. 2017}. } \examples{ \dontshow{if (require("rstanarm") && require("see")) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf} \donttest{ library(bayestestR) model <- rstanarm::stan_glm(mpg ~ wt + am, data = mtcars, chains = 1, refresh = 0) check_prior(model, method = "gelman") check_prior(model, method = "lakeland") # An extreme example where both methods diverge: model <- rstanarm::stan_glm(mpg ~ wt, data = mtcars[1:3, ], prior = normal(-3.3, 1, FALSE), prior_intercept = normal(0, 1000, FALSE), refresh = 0 ) check_prior(model, method = "gelman") check_prior(model, method = "lakeland") # can provide visual confirmation to the Lakeland method plot(si(model, verbose = FALSE)) } \dontshow{\}) # examplesIf} } \references{ Gelman, A., Simpson, D., and Betancourt, M. (2017). The Prior Can Often Only Be Understood in the Context of the Likelihood. Entropy, 19(10), 555. \doi{10.3390/e19100555} } bayestestR/man/contr.equalprior.Rd0000644000176200001440000001504714542333405016762 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/contr.equalprior.R \name{contr.equalprior} \alias{contr.equalprior} \alias{contr.bayes} \alias{contr.orthonorm} \alias{contr.equalprior_pairs} \alias{contr.equalprior_deviations} \title{Contrast Matrices for Equal Marginal Priors in Bayesian Estimation} \usage{ contr.equalprior(n, contrasts = TRUE, sparse = FALSE) contr.equalprior_pairs(n, contrasts = TRUE, sparse = FALSE) contr.equalprior_deviations(n, contrasts = TRUE, sparse = FALSE) } \arguments{ \item{n}{a vector of levels for a factor, or the number of levels.} \item{contrasts}{a logical indicating whether contrasts should be computed.} \item{sparse}{logical indicating if the result should be sparse (of class \code{\link[Matrix:dgCMatrix-class]{dgCMatrix}}), using package \href{https://CRAN.R-project.org/package=Matrix}{\pkg{Matrix}}.} } \value{ A \code{matrix} with n rows and k columns, with k=n-1 if contrasts is \code{TRUE} and k=n if contrasts is \code{FALSE}. } \description{ Build contrasts for factors with equal marginal priors on all levels. The 3 functions give the same orthogonal contrasts, but are scaled differently to allow different prior specifications (see 'Details'). Implementation from Singmann & Gronau's \href{https://github.com/bayesstuff/bfrms/}{\code{bfrms}}, following the description in Rouder, Morey, Speckman, & Province (2012, p. 363). } \details{ When using \code{\link[stats:contrast]{stats::contr.treatment}}, each dummy variable is the difference between each level and the reference level. While this is useful if setting different priors for each coefficient, it should not be used if one is trying to set a general prior for differences between means, as it (as well as \code{\link[stats:contrast]{stats::contr.sum}} and others) results in unequal marginal priors on the means the the difference between them. \if{html}{\out{

}}\preformatted{library(brms) data <- data.frame( group = factor(rep(LETTERS[1:4], each = 3)), y = rnorm(12) ) contrasts(data$group) # R's default contr.treatment #> B C D #> A 0 0 0 #> B 1 0 0 #> C 0 1 0 #> D 0 0 1 model_prior <- brm( y ~ group, data = data, sample_prior = "only", # Set the same priors on the 3 dummy variable # (Using an arbitrary scale) prior = set_prior("normal(0, 10)", coef = c("groupB", "groupC", "groupD")) ) est <- emmeans::emmeans(model_prior, pairwise ~ group) point_estimate(est, centr = "mean", disp = TRUE) #> Point Estimate #> #> Parameter | Mean | SD #> ------------------------- #> A | -0.01 | 6.35 #> B | -0.10 | 9.59 #> C | 0.11 | 9.55 #> D | -0.16 | 9.52 #> A - B | 0.10 | 9.94 #> A - C | -0.12 | 9.96 #> A - D | 0.15 | 9.87 #> B - C | -0.22 | 14.38 #> B - D | 0.05 | 14.14 #> C - D | 0.27 | 14.00 }\if{html}{\out{
}} We can see that the priors for means aren't all the same (\code{A} having a more narrow prior), and likewise for the pairwise differences (priors for differences from \code{A} are more narrow). The solution is to use one of the methods provided here, which \emph{do} result in marginally equal priors on means differences between them. Though this will obscure the interpretation of parameters, setting equal priors on means and differences is important for they are useful for specifying equal priors on all means in a factor and their differences correct estimation of Bayes factors for contrasts and order restrictions of multi-level factors (where \code{k>2}). See info on specifying correct priors for factors with more than 2 levels in \href{https://easystats.github.io/bayestestR/articles/bayes_factors.html}{the Bayes factors vignette}. \emph{\strong{NOTE:}} When setting priors on these dummy variables, always: \enumerate{ \item Use priors that are \strong{centered on 0}! Other location/centered priors are meaningless! \item Use \strong{identically-scaled priors} on all the dummy variables of a single factor! } \code{contr.equalprior} returns the original orthogonal-normal contrasts as described in Rouder, Morey, Speckman, & Province (2012, p. 363). Setting \code{contrasts = FALSE} returns the \eqn{I_{n} - \frac{1}{n}} matrix. \subsection{\code{contr.equalprior_pairs}}{ Useful for setting priors in terms of pairwise differences between means - the scales of the priors defines the prior distribution of the pair-wise differences between all pairwise differences (e.g., \code{A - B}, \code{B - C}, etc.). \if{html}{\out{
}}\preformatted{contrasts(data$group) <- contr.equalprior_pairs contrasts(data$group) #> [,1] [,2] [,3] #> A 0.0000000 0.6123724 0.0000000 #> B -0.1893048 -0.2041241 0.5454329 #> C -0.3777063 -0.2041241 -0.4366592 #> D 0.5670111 -0.2041241 -0.1087736 model_prior <- brm( y ~ group, data = data, sample_prior = "only", # Set the same priors on the 3 dummy variable # (Using an arbitrary scale) prior = set_prior("normal(0, 10)", coef = c("group1", "group2", "group3")) ) est <- emmeans(model_prior, pairwise ~ group) point_estimate(est, centr = "mean", disp = TRUE) #> Point Estimate #> #> Parameter | Mean | SD #> ------------------------- #> A | -0.31 | 7.46 #> B | -0.24 | 7.47 #> C | -0.34 | 7.50 #> D | -0.30 | 7.25 #> A - B | -0.08 | 10.00 #> A - C | 0.03 | 10.03 #> A - D | -0.01 | 9.85 #> B - C | 0.10 | 10.28 #> B - D | 0.06 | 9.94 #> C - D | -0.04 | 10.18 }\if{html}{\out{
}} All means have the same prior distribution, and the distribution of the differences matches the prior we set of \code{"normal(0, 10)"}. Success! } \subsection{\code{contr.equalprior_deviations}}{ Useful for setting priors in terms of the deviations of each mean from the grand mean - the scales of the priors defines the prior distribution of the distance (above, below) the mean of one of the levels might have from the overall mean. (See examples.) } } \examples{ contr.equalprior(2) # Q_2 in Rouder et al. (2012, p. 363) contr.equalprior(5) # equivalent to Q_5 in Rouder et al. (2012, p. 363) ## check decomposition Q3 <- contr.equalprior(3) Q3 \%*\% t(Q3) ## 2/3 on diagonal and -1/3 on off-diagonal elements } \references{ Rouder, J. N., Morey, R. D., Speckman, P. L., & Province, J. M. (2012). Default Bayes factors for ANOVA designs. \emph{Journal of Mathematical Psychology}, 56(5), 356-374. https://doi.org/10.1016/j.jmp.2012.08.001 } bayestestR/man/estimate_density.Rd0000644000176200001440000002176615005147105017025 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/estimate_density.R \name{estimate_density} \alias{estimate_density} \alias{estimate_density.data.frame} \alias{estimate_density.brmsfit} \title{Density Estimation} \usage{ estimate_density(x, ...) \method{estimate_density}{data.frame}( x, method = "kernel", precision = 2^10, extend = FALSE, extend_scale = 0.1, bw = "SJ", ci = NULL, select = NULL, by = NULL, rvar_col = NULL, ... ) \method{estimate_density}{brmsfit}( x, method = "kernel", precision = 2^10, extend = FALSE, extend_scale = 0.1, bw = "SJ", effects = "fixed", component = "conditional", parameters = NULL, ... ) } \arguments{ \item{x}{Vector representing a posterior distribution, or a data frame of such vectors. Can also be a Bayesian model. \strong{bayestestR} supports a wide range of models (see, for example, \code{methods("hdi")}) and not all of those are documented in the 'Usage' section, because methods for other classes mostly resemble the arguments of the \code{.numeric} or \code{.data.frame}methods.} \item{...}{Currently not used.} \item{method}{Density estimation method. Can be \code{"kernel"} (default), \code{"logspline"} or \code{"KernSmooth"}.} \item{precision}{Number of points of density data. See the \code{n} parameter in \code{density}.} \item{extend}{Extend the range of the x axis by a factor of \code{extend_scale}.} \item{extend_scale}{Ratio of range by which to extend the x axis. A value of \code{0.1} means that the x axis will be extended by \code{1/10} of the range of the data.} \item{bw}{See the eponymous argument in \code{density}. Here, the default has been changed for \code{"SJ"}, which is recommended.} \item{ci}{The confidence interval threshold. Only used when \code{method = "kernel"}. This feature is experimental, use with caution.} \item{select}{Character vector of column names. If \code{NULL} (the default), all numeric variables will be selected. Other arguments from \code{datawizard::extract_column_names()} (such as \code{exclude}) can also be used.} \item{by}{Optional character vector. If not \code{NULL} and input is a data frame, density estimation is performed for each group (subsets) indicated by \code{by}. See examples.} \item{rvar_col}{A single character - the name of an \code{rvar} column in the data frame to be processed. See example in \code{\link[=p_direction]{p_direction()}}.} \item{effects}{Should variables for fixed effects (\code{"fixed"}), random effects (\code{"random"}) or both (\code{"all"}) be returned? Only applies to mixed models. May be abbreviated. For models of from packages \strong{brms} or \strong{rstanarm} there are additional options: \itemize{ \item \code{"fixed"} returns fixed effects. \item \code{"random_variance"} return random effects parameters (variance and correlation components, e.g. those parameters that start with \code{sd_} or \code{cor_}). \item \code{"grouplevel"} returns random effects group level estimates, i.e. those parameters that start with \code{r_}. \item \code{"random"} returns both \code{"random_variance"} and \code{"grouplevel"}. \item \code{"all"} returns fixed effects and random effects variances. \item \code{"full"} returns all parameters. }} \item{component}{Which type of parameters to return, such as parameters for the conditional model, the zero-inflated part of the model, the dispersion term, etc. See details in section \emph{Model Components}. May be abbreviated. Note that the \emph{conditional} component also refers to the \emph{count} or \emph{mean} component - names may differ, depending on the modeling package. There are three convenient shortcuts (not applicable to \emph{all} model classes): \itemize{ \item \code{component = "all"} returns all possible parameters. \item If \code{component = "location"}, location parameters such as \code{conditional}, \code{zero_inflated}, \code{smooth_terms}, or \code{instruments} are returned (everything that are fixed or random effects - depending on the \code{effects} argument - but no auxiliary parameters). \item For \code{component = "distributional"} (or \code{"auxiliary"}), components like \code{sigma}, \code{dispersion}, \code{beta} or \code{precision} (and other auxiliary parameters) are returned. }} \item{parameters}{Regular expression pattern that describes the parameters that should be returned. Meta-parameters (like \code{lp__} or \code{prior_}) are filtered by default, so only parameters that typically appear in the \code{summary()} are returned. Use \code{parameters} to select specific parameters for the output.} } \description{ This function is a wrapper over different methods of density estimation. By default, it uses the base R \code{density} with by default uses a different smoothing bandwidth (\code{"SJ"}) from the legacy default implemented the base R \code{density} function (\code{"nrd0"}). However, Deng and Wickham suggest that \code{method = "KernSmooth"} is the fastest and the most accurate. } \note{ There is also a \href{https://easystats.github.io/see/articles/bayestestR.html}{\code{plot()}-method} implemented in the \href{https://easystats.github.io/see/}{\pkg{see}-package}. } \section{Model components}{ Possible values for the \code{component} argument depend on the model class. Following are valid options: \itemize{ \item \code{"all"}: returns all model components, applies to all models, but will only have an effect for models with more than just the conditional model component. \item \code{"conditional"}: only returns the conditional component, i.e. "fixed effects" terms from the model. Will only have an effect for models with more than just the conditional model component. \item \code{"smooth_terms"}: returns smooth terms, only applies to GAMs (or similar models that may contain smooth terms). \item \code{"zero_inflated"} (or \code{"zi"}): returns the zero-inflation component. \item \code{"location"}: returns location parameters such as \code{conditional}, \code{zero_inflated}, or \code{smooth_terms} (everything that are fixed or random effects - depending on the \code{effects} argument - but no auxiliary parameters). \item \code{"distributional"} (or \code{"auxiliary"}): components like \code{sigma}, \code{dispersion}, \code{beta} or \code{precision} (and other auxiliary parameters) are returned. } For models of class \code{brmsfit} (package \strong{brms}), even more options are possible for the \code{component} argument, which are not all documented in detail here. See also \href{https://easystats.github.io/insight/reference/find_parameters.BGGM.html}{\code{?insight::find_parameters}}. } \examples{ \dontshow{if (require("logspline") && require("KernSmooth") && require("mclust") && require("emmeans") && require("rstanarm") && require("brms")) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf} library(bayestestR) set.seed(1) x <- rnorm(250, mean = 1) # Basic usage density_kernel <- estimate_density(x) # default method is "kernel" hist(x, prob = TRUE) lines(density_kernel$x, density_kernel$y, col = "black", lwd = 2) lines(density_kernel$x, density_kernel$CI_low, col = "gray", lty = 2) lines(density_kernel$x, density_kernel$CI_high, col = "gray", lty = 2) legend("topright", legend = c("Estimate", "95\% CI"), col = c("black", "gray"), lwd = 2, lty = c(1, 2) ) # Other Methods density_logspline <- estimate_density(x, method = "logspline") density_KernSmooth <- estimate_density(x, method = "KernSmooth") density_mixture <- estimate_density(x, method = "mixture") hist(x, prob = TRUE) lines(density_kernel$x, density_kernel$y, col = "black", lwd = 2) lines(density_logspline$x, density_logspline$y, col = "red", lwd = 2) lines(density_KernSmooth$x, density_KernSmooth$y, col = "blue", lwd = 2) lines(density_mixture$x, density_mixture$y, col = "green", lwd = 2) # Extension density_extended <- estimate_density(x, extend = TRUE) density_default <- estimate_density(x, extend = FALSE) hist(x, prob = TRUE) lines(density_extended$x, density_extended$y, col = "red", lwd = 3) lines(density_default$x, density_default$y, col = "black", lwd = 3) # Multiple columns head(estimate_density(iris)) head(estimate_density(iris, select = "Sepal.Width")) # Grouped data head(estimate_density(iris, by = "Species")) head(estimate_density(iris$Petal.Width, by = iris$Species)) \donttest{ # rstanarm models # ----------------------------------------------- library(rstanarm) model <- suppressWarnings( stan_glm(mpg ~ wt + gear, data = mtcars, chains = 2, iter = 200, refresh = 0) ) head(estimate_density(model)) library(emmeans) head(estimate_density(emtrends(model, ~1, "wt", data = mtcars))) # brms models # ----------------------------------------------- library(brms) model <- brms::brm(mpg ~ wt + cyl, data = mtcars) estimate_density(model) } \dontshow{\}) # examplesIf} } \references{ Deng, H., & Wickham, H. (2011). Density estimation in R. Electronic publication. } bayestestR/man/eti.Rd0000644000176200001440000002414115005147105014222 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/eti.R \name{eti} \alias{eti} \alias{eti.numeric} \alias{eti.data.frame} \alias{eti.brmsfit} \alias{eti.get_predicted} \title{Equal-Tailed Interval (ETI)} \usage{ eti(x, ...) \method{eti}{numeric}(x, ci = 0.95, verbose = TRUE, ...) \method{eti}{data.frame}(x, ci = 0.95, rvar_col = NULL, verbose = TRUE, ...) \method{eti}{brmsfit}( x, ci = 0.95, effects = "fixed", component = "conditional", parameters = NULL, verbose = TRUE, ... ) \method{eti}{get_predicted}(x, ci = 0.95, use_iterations = FALSE, verbose = TRUE, ...) } \arguments{ \item{x}{Vector representing a posterior distribution, or a data frame of such vectors. Can also be a Bayesian model. \strong{bayestestR} supports a wide range of models (see, for example, \code{methods("hdi")}) and not all of those are documented in the 'Usage' section, because methods for other classes mostly resemble the arguments of the \code{.numeric} or \code{.data.frame}methods.} \item{...}{Currently not used.} \item{ci}{Value or vector of probability of the (credible) interval - CI (between 0 and 1) to be estimated. Default to \code{.95} (\verb{95\%}).} \item{verbose}{Toggle off warnings.} \item{rvar_col}{A single character - the name of an \code{rvar} column in the data frame to be processed. See example in \code{\link[=p_direction]{p_direction()}}.} \item{effects}{Should variables for fixed effects (\code{"fixed"}), random effects (\code{"random"}) or both (\code{"all"}) be returned? Only applies to mixed models. May be abbreviated. For models of from packages \strong{brms} or \strong{rstanarm} there are additional options: \itemize{ \item \code{"fixed"} returns fixed effects. \item \code{"random_variance"} return random effects parameters (variance and correlation components, e.g. those parameters that start with \code{sd_} or \code{cor_}). \item \code{"grouplevel"} returns random effects group level estimates, i.e. those parameters that start with \code{r_}. \item \code{"random"} returns both \code{"random_variance"} and \code{"grouplevel"}. \item \code{"all"} returns fixed effects and random effects variances. \item \code{"full"} returns all parameters. }} \item{component}{Which type of parameters to return, such as parameters for the conditional model, the zero-inflated part of the model, the dispersion term, etc. See details in section \emph{Model Components}. May be abbreviated. Note that the \emph{conditional} component also refers to the \emph{count} or \emph{mean} component - names may differ, depending on the modeling package. There are three convenient shortcuts (not applicable to \emph{all} model classes): \itemize{ \item \code{component = "all"} returns all possible parameters. \item If \code{component = "location"}, location parameters such as \code{conditional}, \code{zero_inflated}, \code{smooth_terms}, or \code{instruments} are returned (everything that are fixed or random effects - depending on the \code{effects} argument - but no auxiliary parameters). \item For \code{component = "distributional"} (or \code{"auxiliary"}), components like \code{sigma}, \code{dispersion}, \code{beta} or \code{precision} (and other auxiliary parameters) are returned. }} \item{parameters}{Regular expression pattern that describes the parameters that should be returned. Meta-parameters (like \code{lp__} or \code{prior_}) are filtered by default, so only parameters that typically appear in the \code{summary()} are returned. Use \code{parameters} to select specific parameters for the output.} \item{use_iterations}{Logical, if \code{TRUE} and \code{x} is a \code{get_predicted} object, (returned by \code{\link[insight:get_predicted]{insight::get_predicted()}}), the function is applied to the iterations instead of the predictions. This only applies to models that return iterations for predicted values (e.g., \code{brmsfit} models).} } \value{ A data frame with following columns: \itemize{ \item \code{Parameter} The model parameter(s), if \code{x} is a model-object. If \code{x} is a vector, this column is missing. \item \code{CI} The probability of the credible interval. \item \code{CI_low}, \code{CI_high} The lower and upper credible interval limits for the parameters. } } \description{ Compute the \strong{Equal-Tailed Interval (ETI)} of posterior distributions using the quantiles method. The probability of being below this interval is equal to the probability of being above it. The ETI can be used in the context of uncertainty characterisation of posterior distributions as \strong{Credible Interval (CI)}. } \details{ Unlike equal-tailed intervals (see \code{\link[=eti]{eti()}}) that typically exclude \verb{2.5\%} from each tail of the distribution and always include the median, the HDI is \emph{not} equal-tailed and therefore always includes the mode(s) of posterior distributions. While this can be useful to better represent the credibility mass of a distribution, the HDI also has some limitations. See \code{\link[=spi]{spi()}} for details. The \href{https://easystats.github.io/bayestestR/articles/credible_interval.html}{\verb{95\%} or \verb{89\%} Credible Intervals (CI)} are two reasonable ranges to characterize the uncertainty related to the estimation (see \href{https://easystats.github.io/bayestestR/articles/credible_interval.html}{here} for a discussion about the differences between these two values). The \verb{89\%} intervals (\code{ci = 0.89}) are deemed to be more stable than, for instance, \verb{95\%} intervals (\emph{Kruschke, 2014}). An effective sample size of at least 10.000 is recommended if one wants to estimate \verb{95\%} intervals with high precision (\emph{Kruschke, 2014, p. 183ff}). Unfortunately, the default number of posterior samples for most Bayes packages (e.g., \code{rstanarm} or \code{brms}) is only 4.000 (thus, you might want to increase it when fitting your model). Moreover, 89 indicates the arbitrariness of interval limits - its only remarkable property is being the highest prime number that does not exceed the already unstable \verb{95\%} threshold (\emph{McElreath, 2015}). However, \verb{95\%} has some \href{https://easystats.github.io/blog/posts/bayestestr_95/}{advantages too}. For instance, it shares (in the case of a normal posterior distribution) an intuitive relationship with the standard deviation and it conveys a more accurate image of the (artificial) bounds of the distribution. Also, because it is wider, it makes analyses more conservative (i.e., the probability of covering zero is larger for the \verb{95\%} CI than for lower ranges such as \verb{89\%}), which is a good thing in the context of the reproducibility crisis. A \verb{95\%} equal-tailed interval (ETI) has \verb{2.5\%} of the distribution on either side of its limits. It indicates the 2.5th percentile and the 97.5th percentile. In symmetric distributions, the two methods of computing credible intervals, the ETI and the \link[=hdi]{HDI}, return similar results. This is not the case for skewed distributions. Indeed, it is possible that parameter values in the ETI have lower credibility (are less probable) than parameter values outside the ETI. This property seems undesirable as a summary of the credible values in a distribution. On the other hand, the ETI range does change when transformations are applied to the distribution (for instance, for a log odds scale to probabilities): the lower and higher bounds of the transformed distribution will correspond to the transformed lower and higher bounds of the original distribution. On the contrary, applying transformations to the distribution will change the resulting HDI. } \section{Model components}{ Possible values for the \code{component} argument depend on the model class. Following are valid options: \itemize{ \item \code{"all"}: returns all model components, applies to all models, but will only have an effect for models with more than just the conditional model component. \item \code{"conditional"}: only returns the conditional component, i.e. "fixed effects" terms from the model. Will only have an effect for models with more than just the conditional model component. \item \code{"smooth_terms"}: returns smooth terms, only applies to GAMs (or similar models that may contain smooth terms). \item \code{"zero_inflated"} (or \code{"zi"}): returns the zero-inflation component. \item \code{"location"}: returns location parameters such as \code{conditional}, \code{zero_inflated}, or \code{smooth_terms} (everything that are fixed or random effects - depending on the \code{effects} argument - but no auxiliary parameters). \item \code{"distributional"} (or \code{"auxiliary"}): components like \code{sigma}, \code{dispersion}, \code{beta} or \code{precision} (and other auxiliary parameters) are returned. } For models of class \code{brmsfit} (package \strong{brms}), even more options are possible for the \code{component} argument, which are not all documented in detail here. See also \href{https://easystats.github.io/insight/reference/find_parameters.BGGM.html}{\code{?insight::find_parameters}}. } \examples{ \dontshow{if (require("rstanarm") && require("emmeans") && require("brms") && require("BayesFactor")) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf} library(bayestestR) posterior <- rnorm(1000) eti(posterior) eti(posterior, ci = c(0.80, 0.89, 0.95)) df <- data.frame(replicate(4, rnorm(100))) eti(df) eti(df, ci = c(0.80, 0.89, 0.95)) \donttest{ model <- suppressWarnings( rstanarm::stan_glm(mpg ~ wt + gear, data = mtcars, chains = 2, iter = 200, refresh = 0) ) eti(model) eti(model, ci = c(0.80, 0.89, 0.95)) eti(emmeans::emtrends(model, ~1, "wt", data = mtcars)) model <- brms::brm(mpg ~ wt + cyl, data = mtcars) eti(model) eti(model, ci = c(0.80, 0.89, 0.95)) bf <- BayesFactor::ttestBF(x = rnorm(100, 1, 1)) eti(bf) eti(bf, ci = c(0.80, 0.89, 0.95)) } \dontshow{\}) # examplesIf} } \seealso{ Other ci: \code{\link{bci}()}, \code{\link{ci}()}, \code{\link{hdi}()}, \code{\link{si}()}, \code{\link{spi}()} } \concept{ci} bayestestR/man/area_under_curve.Rd0000644000176200001440000000305114542333405016754 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/area_under_curve.R \name{area_under_curve} \alias{area_under_curve} \alias{auc} \title{Area under the Curve (AUC)} \usage{ area_under_curve(x, y, method = c("trapezoid", "step", "spline"), ...) auc(x, y, method = c("trapezoid", "step", "spline"), ...) } \arguments{ \item{x}{Vector of x values.} \item{y}{Vector of y values.} \item{method}{Method to compute the Area Under the Curve (AUC). Can be \code{"trapezoid"} (default), \code{"step"} or \code{"spline"}. If "trapezoid", the curve is formed by connecting all points by a direct line (composite trapezoid rule). If "step" is chosen then a stepwise connection of two points is used. For calculating the area under a spline interpolation the splinefun function is used in combination with integrate.} \item{...}{Arguments passed to or from other methods.} } \description{ Based on the DescTools \code{AUC} function. It can calculate the area under the curve with a naive algorithm or a more elaborated spline approach. The curve must be given by vectors of xy-coordinates. This function can handle unsorted x values (by sorting x) and ties for the x values (by ignoring duplicates). } \examples{ library(bayestestR) posterior <- distribution_normal(1000) dens <- estimate_density(posterior) dens <- dens[dens$x > 0, ] x <- dens$x y <- dens$y area_under_curve(x, y, method = "trapezoid") area_under_curve(x, y, method = "step") area_under_curve(x, y, method = "spline") } \seealso{ DescTools } bayestestR/man/reshape_iterations.Rd0000644000176200001440000000262214542333405017336 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reshape_iterations.R \name{reshape_iterations} \alias{reshape_iterations} \alias{reshape_draws} \title{Reshape estimations with multiple iterations (draws) to long format} \usage{ reshape_iterations(x, prefix = c("draw", "iter", "iteration", "sim")) reshape_draws(x, prefix = c("draw", "iter", "iteration", "sim")) } \arguments{ \item{x}{A data.frame containing posterior draws obtained from \code{estimate_response} or \code{estimate_link}.} \item{prefix}{The prefix of the draws (for instance, \code{"iter_"} for columns named as \verb{iter_1, iter_2, iter_3}). If more than one are provided, will search for the first one that matches.} } \value{ Data frame of reshaped draws in long format. } \description{ Reshape a wide data.frame of iterations (such as posterior draws or bootsrapped samples) as columns to long format. Instead of having all iterations as columns (e.g., \verb{iter_1, iter_2, ...}), will return 3 columns with the \verb{\\*_index} (the previous index of the row), the \verb{\\*_group} (the iteration number) and the \verb{\\*_value} (the value of said iteration). } \examples{ \donttest{ if (require("rstanarm")) { model <- stan_glm(mpg ~ am, data = mtcars, refresh = 0) draws <- insight::get_predicted(model) long_format <- reshape_iterations(draws) head(long_format) } } } bayestestR/man/bci.Rd0000644000176200001440000002250415005147105014177 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/bci.R \name{bci} \alias{bci} \alias{bcai} \alias{bci.numeric} \alias{bci.data.frame} \alias{bci.brmsfit} \alias{bci.get_predicted} \title{Bias Corrected and Accelerated Interval (BCa)} \usage{ bci(x, ...) bcai(x, ...) \method{bci}{numeric}(x, ci = 0.95, verbose = TRUE, ...) \method{bci}{data.frame}(x, ci = 0.95, rvar_col = NULL, verbose = TRUE, ...) \method{bci}{brmsfit}( x, ci = 0.95, effects = "fixed", component = "conditional", parameters = NULL, verbose = TRUE, ... ) \method{bci}{get_predicted}(x, ci = 0.95, use_iterations = FALSE, verbose = TRUE, ...) } \arguments{ \item{x}{Vector representing a posterior distribution, or a data frame of such vectors. Can also be a Bayesian model. \strong{bayestestR} supports a wide range of models (see, for example, \code{methods("hdi")}) and not all of those are documented in the 'Usage' section, because methods for other classes mostly resemble the arguments of the \code{.numeric} or \code{.data.frame}methods.} \item{...}{Currently not used.} \item{ci}{Value or vector of probability of the (credible) interval - CI (between 0 and 1) to be estimated. Default to \code{.95} (\verb{95\%}).} \item{verbose}{Toggle off warnings.} \item{rvar_col}{A single character - the name of an \code{rvar} column in the data frame to be processed. See example in \code{\link[=p_direction]{p_direction()}}.} \item{effects}{Should variables for fixed effects (\code{"fixed"}), random effects (\code{"random"}) or both (\code{"all"}) be returned? Only applies to mixed models. May be abbreviated. For models of from packages \strong{brms} or \strong{rstanarm} there are additional options: \itemize{ \item \code{"fixed"} returns fixed effects. \item \code{"random_variance"} return random effects parameters (variance and correlation components, e.g. those parameters that start with \code{sd_} or \code{cor_}). \item \code{"grouplevel"} returns random effects group level estimates, i.e. those parameters that start with \code{r_}. \item \code{"random"} returns both \code{"random_variance"} and \code{"grouplevel"}. \item \code{"all"} returns fixed effects and random effects variances. \item \code{"full"} returns all parameters. }} \item{component}{Which type of parameters to return, such as parameters for the conditional model, the zero-inflated part of the model, the dispersion term, etc. See details in section \emph{Model Components}. May be abbreviated. Note that the \emph{conditional} component also refers to the \emph{count} or \emph{mean} component - names may differ, depending on the modeling package. There are three convenient shortcuts (not applicable to \emph{all} model classes): \itemize{ \item \code{component = "all"} returns all possible parameters. \item If \code{component = "location"}, location parameters such as \code{conditional}, \code{zero_inflated}, \code{smooth_terms}, or \code{instruments} are returned (everything that are fixed or random effects - depending on the \code{effects} argument - but no auxiliary parameters). \item For \code{component = "distributional"} (or \code{"auxiliary"}), components like \code{sigma}, \code{dispersion}, \code{beta} or \code{precision} (and other auxiliary parameters) are returned. }} \item{parameters}{Regular expression pattern that describes the parameters that should be returned. Meta-parameters (like \code{lp__} or \code{prior_}) are filtered by default, so only parameters that typically appear in the \code{summary()} are returned. Use \code{parameters} to select specific parameters for the output.} \item{use_iterations}{Logical, if \code{TRUE} and \code{x} is a \code{get_predicted} object, (returned by \code{\link[insight:get_predicted]{insight::get_predicted()}}), the function is applied to the iterations instead of the predictions. This only applies to models that return iterations for predicted values (e.g., \code{brmsfit} models).} } \value{ A data frame with following columns: \itemize{ \item \code{Parameter} The model parameter(s), if \code{x} is a model-object. If \code{x} is a vector, this column is missing. \item \code{CI} The probability of the credible interval. \item \code{CI_low}, \code{CI_high} The lower and upper credible interval limits for the parameters. } } \description{ Compute the \strong{Bias Corrected and Accelerated Interval (BCa)} of posterior distributions. } \details{ Unlike equal-tailed intervals (see \code{\link[=eti]{eti()}}) that typically exclude \verb{2.5\%} from each tail of the distribution and always include the median, the HDI is \emph{not} equal-tailed and therefore always includes the mode(s) of posterior distributions. While this can be useful to better represent the credibility mass of a distribution, the HDI also has some limitations. See \code{\link[=spi]{spi()}} for details. The \href{https://easystats.github.io/bayestestR/articles/credible_interval.html}{\verb{95\%} or \verb{89\%} Credible Intervals (CI)} are two reasonable ranges to characterize the uncertainty related to the estimation (see \href{https://easystats.github.io/bayestestR/articles/credible_interval.html}{here} for a discussion about the differences between these two values). The \verb{89\%} intervals (\code{ci = 0.89}) are deemed to be more stable than, for instance, \verb{95\%} intervals (\emph{Kruschke, 2014}). An effective sample size of at least 10.000 is recommended if one wants to estimate \verb{95\%} intervals with high precision (\emph{Kruschke, 2014, p. 183ff}). Unfortunately, the default number of posterior samples for most Bayes packages (e.g., \code{rstanarm} or \code{brms}) is only 4.000 (thus, you might want to increase it when fitting your model). Moreover, 89 indicates the arbitrariness of interval limits - its only remarkable property is being the highest prime number that does not exceed the already unstable \verb{95\%} threshold (\emph{McElreath, 2015}). However, \verb{95\%} has some \href{https://easystats.github.io/blog/posts/bayestestr_95/}{advantages too}. For instance, it shares (in the case of a normal posterior distribution) an intuitive relationship with the standard deviation and it conveys a more accurate image of the (artificial) bounds of the distribution. Also, because it is wider, it makes analyses more conservative (i.e., the probability of covering zero is larger for the \verb{95\%} CI than for lower ranges such as \verb{89\%}), which is a good thing in the context of the reproducibility crisis. A \verb{95\%} equal-tailed interval (ETI) has \verb{2.5\%} of the distribution on either side of its limits. It indicates the 2.5th percentile and the 97.5th percentile. In symmetric distributions, the two methods of computing credible intervals, the ETI and the \link[=hdi]{HDI}, return similar results. This is not the case for skewed distributions. Indeed, it is possible that parameter values in the ETI have lower credibility (are less probable) than parameter values outside the ETI. This property seems undesirable as a summary of the credible values in a distribution. On the other hand, the ETI range does change when transformations are applied to the distribution (for instance, for a log odds scale to probabilities): the lower and higher bounds of the transformed distribution will correspond to the transformed lower and higher bounds of the original distribution. On the contrary, applying transformations to the distribution will change the resulting HDI. } \section{Model components}{ Possible values for the \code{component} argument depend on the model class. Following are valid options: \itemize{ \item \code{"all"}: returns all model components, applies to all models, but will only have an effect for models with more than just the conditional model component. \item \code{"conditional"}: only returns the conditional component, i.e. "fixed effects" terms from the model. Will only have an effect for models with more than just the conditional model component. \item \code{"smooth_terms"}: returns smooth terms, only applies to GAMs (or similar models that may contain smooth terms). \item \code{"zero_inflated"} (or \code{"zi"}): returns the zero-inflation component. \item \code{"location"}: returns location parameters such as \code{conditional}, \code{zero_inflated}, or \code{smooth_terms} (everything that are fixed or random effects - depending on the \code{effects} argument - but no auxiliary parameters). \item \code{"distributional"} (or \code{"auxiliary"}): components like \code{sigma}, \code{dispersion}, \code{beta} or \code{precision} (and other auxiliary parameters) are returned. } For models of class \code{brmsfit} (package \strong{brms}), even more options are possible for the \code{component} argument, which are not all documented in detail here. See also \href{https://easystats.github.io/insight/reference/find_parameters.BGGM.html}{\code{?insight::find_parameters}}. } \examples{ posterior <- rnorm(1000) bci(posterior) bci(posterior, ci = c(0.80, 0.89, 0.95)) } \references{ DiCiccio, T. J. and B. Efron. (1996). Bootstrap Confidence Intervals. Statistical Science. 11(3): 189–212. 10.1214/ss/1032280214 } \seealso{ Other ci: \code{\link{ci}()}, \code{\link{eti}()}, \code{\link{hdi}()}, \code{\link{si}()}, \code{\link{spi}()} } \concept{ci} bayestestR/man/sensitivity_to_prior.Rd0000644000176200001440000000330715052646230017756 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/sensitivity_to_prior.R \name{sensitivity_to_prior} \alias{sensitivity_to_prior} \alias{sensitivity_to_prior.stanreg} \title{Sensitivity to Prior} \usage{ sensitivity_to_prior(model, ...) \method{sensitivity_to_prior}{stanreg}(model, index = "Median", magnitude = 10, ...) } \arguments{ \item{model}{A Bayesian model (\code{stanreg} or \code{brmsfit}).} \item{...}{Arguments passed to or from other methods.} \item{index}{The indices from which to compute the sensitivity. Can be one or multiple names of the columns returned by \code{describe_posterior}. The case is important here (e.g., write 'Median' instead of 'median').} \item{magnitude}{This represent the magnitude by which to shift the antagonistic prior (to test the sensitivity). For instance, a magnitude of 10 (default) means that the mode will be updated with a prior located at 10 standard deviations from its original location.} } \description{ Computes the sensitivity to priors specification. This represents the proportion of change in some indices when the model is fitted with an antagonistic prior (a prior of same shape located on the opposite of the effect). } \examples{ \dontshow{if (require("rstanarm")) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf} \donttest{ library(bayestestR) # rstanarm models # ----------------------------------------------- model <- rstanarm::stan_glm(mpg ~ wt, data = mtcars) sensitivity_to_prior(model) model <- rstanarm::stan_glm(mpg ~ wt + cyl, data = mtcars) sensitivity_to_prior(model, index = c("Median", "MAP")) } \dontshow{\}) # examplesIf} } \seealso{ DescTools } bayestestR/man/ci.Rd0000644000176200001440000001713515005147105014041 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ci.R \name{ci} \alias{ci} \alias{ci.numeric} \alias{ci.data.frame} \alias{ci.brmsfit} \title{Confidence/Credible/Compatibility Interval (CI)} \usage{ ci(x, ...) \method{ci}{numeric}(x, ci = 0.95, method = "ETI", verbose = TRUE, BF = 1, ...) \method{ci}{data.frame}(x, ci = 0.95, method = "ETI", BF = 1, rvar_col = NULL, verbose = TRUE, ...) \method{ci}{brmsfit}( x, ci = 0.95, method = "ETI", effects = "fixed", component = "conditional", parameters = NULL, verbose = TRUE, BF = 1, ... ) } \arguments{ \item{x}{A \code{stanreg} or \code{brmsfit} model, or a vector representing a posterior distribution.} \item{...}{Currently not used.} \item{ci}{Value or vector of probability of the CI (between 0 and 1) to be estimated. Default to \code{0.95} (\verb{95\%}).} \item{method}{Can be \link[=eti]{"ETI"} (default), \link[=hdi]{"HDI"}, \link[=bci]{"BCI"}, \link[=spi]{"SPI"} or \link[=si]{"SI"}.} \item{verbose}{Toggle off warnings.} \item{BF}{The amount of support required to be included in the support interval.} \item{rvar_col}{A single character - the name of an \code{rvar} column in the data frame to be processed. See example in \code{\link[=p_direction]{p_direction()}}.} \item{effects}{Should variables for fixed effects (\code{"fixed"}), random effects (\code{"random"}) or both (\code{"all"}) be returned? Only applies to mixed models. May be abbreviated. For models of from packages \strong{brms} or \strong{rstanarm} there are additional options: \itemize{ \item \code{"fixed"} returns fixed effects. \item \code{"random_variance"} return random effects parameters (variance and correlation components, e.g. those parameters that start with \code{sd_} or \code{cor_}). \item \code{"grouplevel"} returns random effects group level estimates, i.e. those parameters that start with \code{r_}. \item \code{"random"} returns both \code{"random_variance"} and \code{"grouplevel"}. \item \code{"all"} returns fixed effects and random effects variances. \item \code{"full"} returns all parameters. }} \item{component}{Which type of parameters to return, such as parameters for the conditional model, the zero-inflated part of the model, the dispersion term, etc. See details in section \emph{Model Components}. May be abbreviated. Note that the \emph{conditional} component also refers to the \emph{count} or \emph{mean} component - names may differ, depending on the modeling package. There are three convenient shortcuts (not applicable to \emph{all} model classes): \itemize{ \item \code{component = "all"} returns all possible parameters. \item If \code{component = "location"}, location parameters such as \code{conditional}, \code{zero_inflated}, \code{smooth_terms}, or \code{instruments} are returned (everything that are fixed or random effects - depending on the \code{effects} argument - but no auxiliary parameters). \item For \code{component = "distributional"} (or \code{"auxiliary"}), components like \code{sigma}, \code{dispersion}, \code{beta} or \code{precision} (and other auxiliary parameters) are returned. }} \item{parameters}{Regular expression pattern that describes the parameters that should be returned. Meta-parameters (like \code{lp__} or \code{prior_}) are filtered by default, so only parameters that typically appear in the \code{summary()} are returned. Use \code{parameters} to select specific parameters for the output.} } \value{ A data frame with following columns: \itemize{ \item \code{Parameter} The model parameter(s), if \code{x} is a model-object. If \code{x} is a vector, this column is missing. \item \code{CI} The probability of the credible interval. \item \code{CI_low}, \code{CI_high} The lower and upper credible interval limits for the parameters. } } \description{ Compute Confidence/Credible/Compatibility Intervals (CI) or Support Intervals (SI) for Bayesian and frequentist models. The Documentation is accessible for: } \details{ \itemize{ \item \href{https://easystats.github.io/bayestestR/articles/credible_interval.html}{Bayesian models} \item \href{https://easystats.github.io/parameters/reference/ci.default.html}{Frequentist models} } } \note{ When it comes to interpretation, we recommend thinking of the CI in terms of an "uncertainty" or "compatibility" interval, the latter being defined as "Given any value in the interval and the background assumptions, the data should not seem very surprising" (\emph{Gelman & Greenland 2019}). There is also a \href{https://easystats.github.io/see/articles/bayestestR.html}{\code{plot()}-method} implemented in the \href{https://easystats.github.io/see/}{\pkg{see}-package}. } \section{Model components}{ Possible values for the \code{component} argument depend on the model class. Following are valid options: \itemize{ \item \code{"all"}: returns all model components, applies to all models, but will only have an effect for models with more than just the conditional model component. \item \code{"conditional"}: only returns the conditional component, i.e. "fixed effects" terms from the model. Will only have an effect for models with more than just the conditional model component. \item \code{"smooth_terms"}: returns smooth terms, only applies to GAMs (or similar models that may contain smooth terms). \item \code{"zero_inflated"} (or \code{"zi"}): returns the zero-inflation component. \item \code{"location"}: returns location parameters such as \code{conditional}, \code{zero_inflated}, or \code{smooth_terms} (everything that are fixed or random effects - depending on the \code{effects} argument - but no auxiliary parameters). \item \code{"distributional"} (or \code{"auxiliary"}): components like \code{sigma}, \code{dispersion}, \code{beta} or \code{precision} (and other auxiliary parameters) are returned. } For models of class \code{brmsfit} (package \strong{brms}), even more options are possible for the \code{component} argument, which are not all documented in detail here. See also \href{https://easystats.github.io/insight/reference/find_parameters.BGGM.html}{\code{?insight::find_parameters}}. } \examples{ \dontshow{if (require("rstanarm", quietly = TRUE)) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf} library(bayestestR) posterior <- rnorm(1000) ci(posterior, method = "ETI") ci(posterior, method = "HDI") df <- data.frame(replicate(4, rnorm(100))) ci(df, method = "ETI", ci = c(0.80, 0.89, 0.95)) ci(df, method = "HDI", ci = c(0.80, 0.89, 0.95)) model <- suppressWarnings(rstanarm::stan_glm( mpg ~ wt, data = mtcars, chains = 2, iter = 200, refresh = 0 )) ci(model, method = "ETI", ci = c(0.80, 0.89)) ci(model, method = "HDI", ci = c(0.80, 0.89)) \dontshow{\}) # examplesIf} \dontshow{if (require("BayesFactor", quietly = TRUE)) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf} bf <- BayesFactor::ttestBF(x = rnorm(100, 1, 1)) ci(bf, method = "ETI") ci(bf, method = "HDI") \dontshow{\}) # examplesIf} \dontshow{if (require("emmeans", quietly = TRUE) && require("rstanarm", quietly = TRUE)) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf} model <- emmeans::emtrends(model, ~1, "wt", data = mtcars) ci(model, method = "ETI") ci(model, method = "HDI") \dontshow{\}) # examplesIf} } \references{ Gelman A, Greenland S. Are confidence intervals better termed "uncertainty intervals"? BMJ 2019;l5381. 10.1136/bmj.l5381 } \seealso{ Other ci: \code{\link{bci}()}, \code{\link{eti}()}, \code{\link{hdi}()}, \code{\link{si}()}, \code{\link{spi}()} } \concept{ci} bayestestR/man/diagnostic_draws.Rd0000644000176200001440000000201315005147105016757 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/diagnostic_draws.R \name{diagnostic_draws} \alias{diagnostic_draws} \title{Diagnostic values for each iteration} \usage{ diagnostic_draws(posterior, ...) } \arguments{ \item{posterior}{A \code{stanreg}, \code{stanfit}, \code{brmsfit}, or \code{blavaan} object.} \item{...}{Currently only used for models of class \code{brmsfit}, where a \code{variable} argument can be used, which is directly passed to the \code{as.data.frame()} method (i.e., \code{as.data.frame(x, variable = variable)}).} } \description{ Returns the accumulated log-posterior, the average Metropolis acceptance rate, divergent transitions, treedepth rather than terminated its evolution normally. } \examples{ \donttest{ set.seed(333) if (require("brms", quietly = TRUE)) { model <- suppressWarnings(brm(mpg ~ wt * cyl * vs, data = mtcars, iter = 100, control = list(adapt_delta = 0.80), refresh = 0 )) diagnostic_draws(model) } } } bayestestR/DESCRIPTION0000644000176200001440000001110415054341320014077 0ustar liggesusersType: Package Package: bayestestR Title: Understand and Describe Bayesian Models and Posterior Distributions Version: 0.17.0 Authors@R: c(person(given = "Dominique", family = "Makowski", role = c("aut", "cre"), email = "officialeasystats@gmail.com", comment = c(ORCID = "0000-0001-5375-9967")), person(given = "Daniel", family = "Lüdecke", role = "aut", email = "d.luedecke@uke.de", comment = c(ORCID = "0000-0002-8895-3206")), person(given = "Mattan S.", family = "Ben-Shachar", role = "aut", email = "matanshm@post.bgu.ac.il", comment = c(ORCID = "0000-0002-4287-4801")), person(given = "Indrajeet", family = "Patil", role = "aut", email = "patilindrajeet.science@gmail.com", comment = c(ORCID = "0000-0003-1995-6531")), person(given = "Micah K.", family = "Wilson", role = "aut", email = "micah.k.wilson@curtin.edu.au", comment = c(ORCID = "0000-0003-4143-7308")), person(given = "Brenton M.", family = "Wiernik", role = "aut", email = "brenton@wiernik.org", comment = c(ORCID = "0000-0001-9560-6336")), person(given = "Paul-Christian", family = "Bürkner", role = "rev", email = "paul.buerkner@gmail.com"), person(given = "Tristan", family = "Mahr", role = "rev", email = "tristan.mahr@wisc.edu", comment = c(ORCID = "0000-0002-8890-5116")), person(given = "Henrik", family = "Singmann", role = "ctb", email = "singmann@gmail.com", comment = c(ORCID = "0000-0002-4842-3657")), person(given = "Quentin F.", family = "Gronau", role = "ctb", comment = c(ORCID = "0000-0001-5510-6943")), person(given = "Sam", family = "Crawley", role = "ctb", email = "sam@crawley.nz", comment = c(ORCID = "0000-0002-7847-0411"))) Maintainer: Dominique Makowski Description: Provides utilities to describe posterior distributions and Bayesian models. It includes point-estimates such as Maximum A Posteriori (MAP), measures of dispersion (Highest Density Interval - HDI; Kruschke, 2015 ) and indices used for null-hypothesis testing (such as ROPE percentage, pd and Bayes factors). References: Makowski et al. (2021) . Depends: R (>= 3.6) Imports: insight (>= 1.4.1), datawizard (>= 1.2.0), graphics, methods, stats, utils Suggests: BayesFactor (>= 0.9.12-4.4), bayesQR, bayesplot, betareg, BH, blavaan, bridgesampling, brms, collapse, curl, effectsize, emmeans, gamm4, ggdist, ggplot2, glmmTMB, httr2, KernSmooth, knitr, lavaan, lme4, logspline (>= 2.1.21), marginaleffects (>= 0.29.0), MASS, mclust, mediation, modelbased, ordbetareg, parameters, patchwork, performance, posterior, quadprog, RcppEigen, rmarkdown, rstan, rstanarm, see (>= 0.8.5), testthat, tinytable, tweedie, withr License: GPL-3 URL: https://easystats.github.io/bayestestR/ BugReports: https://github.com/easystats/bayestestR/issues VignetteBuilder: knitr Encoding: UTF-8 Language: en-US RoxygenNote: 7.3.2 Config/testthat/edition: 3 Config/testthat/parallel: true Config/rcmdcheck/ignore-inconsequential-notes: true Config/Needs/website: easystats/easystatstemplate Config/Needs/check: stan-dev/cmdstanr NeedsCompilation: no Packaged: 2025-08-29 11:10:27 UTC; Daniel Author: Dominique Makowski [aut, cre] (ORCID: ), Daniel Lüdecke [aut] (ORCID: ), Mattan S. Ben-Shachar [aut] (ORCID: ), Indrajeet Patil [aut] (ORCID: ), Micah K. Wilson [aut] (ORCID: ), Brenton M. Wiernik [aut] (ORCID: ), Paul-Christian Bürkner [rev], Tristan Mahr [rev] (ORCID: ), Henrik Singmann [ctb] (ORCID: ), Quentin F. Gronau [ctb] (ORCID: ), Sam Crawley [ctb] (ORCID: ) Repository: CRAN Date/Publication: 2025-08-29 15:10:07 UTC