parameters/0000755000176200001440000000000015111307212012406 5ustar liggesusersparameters/tests/0000755000176200001440000000000014413515226013562 5ustar liggesusersparameters/tests/testthat/0000755000176200001440000000000015111307212015410 5ustar liggesusersparameters/tests/testthat/test-p_direction.R0000644000176200001440000000325714716604201021026 0ustar liggesusersskip_on_cran() skip_if_not_installed("bayestestR") skip_if_not_installed("distributional") test_that("p_direction", { data(mtcars) m <- lm(mpg ~ gear + wt + cyl + hp, data = mtcars) set.seed(123) x <- p_direction(m) expect_identical(c(nrow(x), ncol(x)), c(5L, 5L)) expect_named(x, c("Parameter", "CI", "CI_low", "CI_high", "pd")) expect_snapshot(print(x)) set.seed(123) x <- p_direction(m, ci = 0.8) expect_equal(x$pd, c(1, 0.6359, 0.9992, 0.882, 0.9117), tolerance = 1e-3) set.seed(123) x <- p_direction(m, null = 0.2) expect_equal(x$pd, c(1, 0.5567, 0.9997, 0.9309, 1), tolerance = 1e-3) }) test_that("p_direction", { skip_if_not_installed("sandwich") data(mtcars) m <- lm(mpg ~ gear + wt + cyl + hp, data = mtcars) set.seed(123) x <- p_direction(m, ci = 0.8, vcov = "HC3") expect_equal(x$pd, c(1, 0.6162, 0.9984, 0.8323, 0.8962), tolerance = 1e-3) set.seed(123) x <- p_direction(m, null = 0.2, vcov = "HC3") expect_equal(x$pd, c(1, 0.5464, 0.9989, 0.88, 1), tolerance = 1e-3) }) test_that("p_direction, glmmTMB", { skip_if_not_installed("glmmTMB") data(Salamanders, package = "glmmTMB") m1 <- glmmTMB::glmmTMB(count ~ mined + cover + (1 | site), zi = ~mined, family = poisson, data = Salamanders ) out <- p_direction(m1) expect_identical(c(nrow(out), ncol(out)), c(5L, 6L)) expect_named(out, c("Parameter", "CI", "CI_low", "CI_high", "pd", "Component")) expect_equal(out$pd, c(0.8245, 1, 0.9974, 1, 1), tolerance = 1e-4) expect_identical( out$Parameter, c( "(Intercept)_cond", "minedno_cond", "cover_cond", "(Intercept)_zi", "minedno_zi" ) ) }) parameters/tests/testthat/test-glmmTMB-profile_CI.R0000644000176200001440000000100614413515226022027 0ustar liggesuserstest_that("glmmTMB profiled and uniroot CI work", { skip_on_cran() skip_if_not_installed("TMB") skip_if_not_installed("glmmTMB") skip_if_not_installed("lme4") data(sleepstudy, package = "lme4") m <- glmmTMB::glmmTMB(Reaction ~ Days + (Days | Subject), data = sleepstudy) expect_silent({ mp1 <- model_parameters(m, ci_method = "uniroot") }) expect_silent({ mp2 <- model_parameters(m, ci_method = "profile") }) expect_snapshot(print(mp1)) expect_snapshot(print(mp2)) }) parameters/tests/testthat/test-svylme.R0000644000176200001440000000124214716604201020036 0ustar liggesusersskip_on_cran() skip_on_os(c("mac", "linux", "solaris")) skip_if_not_installed("withr") skip_if_not_installed("survey") skip_if_not_installed("lme4") skip_if_not_installed("svylme") withr::with_environment( new.env(), test_that("model_parameters svylme", { data(api, package = "survey") # two-stage cluster sample dclus2 <- survey::svydesign( id = ~ dnum + snum, fpc = ~ fpc1 + fpc2, data = apiclus2 ) m <- svylme::svy2lme( api00 ~ ell + mobility + api99 + (1 + api99 | dnum), design = dclus2, method = "nested" ) mp <- model_parameters(m) expect_snapshot(print(mp)) }) ) parameters/tests/testthat/test-model_parameters.coxme.R0000644000176200001440000000521215057525051023161 0ustar liggesusersskip_on_cran() skip_if_not_installed("coxme") skip_if_not_installed("survival") skip_if_not_installed("withr") # modelparameters ---------------------------------- withr::with_environment( new.env(), test_that("model_parameters.coxme", { Surv <- survival::Surv rats <- survival::rats lung <- survival::lung set.seed(1234) rats$grp <- sample(letters[1:3], nrow(rats), replace = TRUE) data(eortc, package = "coxme") d <<- coxme::eortc d2 <<- rats m1 <- coxme::coxme(Surv(y, uncens) ~ trt + (1 | center), data = d) out <- model_parameters(m1) expect_named( out, c("Parameter", "Coefficient", "SE", "CI", "CI_low", "CI_high", "z", "df_error", "p", "Effects", "Group") ) expect_equal(out$Coefficient, c(0.708613, 0.329214, NA), tolerance = 1e-4) out <- model_parameters(m1, effects = "grouplevel") expect_identical(dim(out), c(37L, 9L)) m2 <- coxme::coxme(Surv(time, status) ~ ph.ecog + age + (1 | inst), lung) out <- model_parameters(m2) expect_named( out, c("Parameter", "Coefficient", "SE", "CI", "CI_low", "CI_high", "z", "df_error", "p", "Effects", "Group") ) expect_equal(out$Coefficient, c(0.473195, 0.011394, 0.146955, NA), tolerance = 1e-4) out <- model_parameters(m2, effects = "grouplevel") expect_identical(dim(out), c(18L, 9L)) m3 <- coxme::coxme(Surv(time, status) ~ rx + (1 + rx | litter) + (1 | grp), d2) out <- model_parameters(m3) expect_named( out, c("Parameter", "Coefficient", "SE", "CI", "CI_low", "CI_high", "z", "df_error", "p", "Effects", "Group") ) expect_equal( out$Coefficient, c(0.730075, 1.147669, 0.018608, 0.038953, 0.000791, NA), tolerance = 1e-4 ) expect_identical( out$Parameter, c("rx", "SD (Intercept)", "SD (Intercept)", "SD (rx)", "Cor (Intercept~rx)", "SD (Observations)") ) out <- model_parameters(m3, effects = "fixed") expect_equal(out$Coefficient, 0.730075, tolerance = 1e-4) expect_identical(out$Parameter, "rx") out <- model_parameters(m3, effects = "random") expect_equal( out$Coefficient, c(1.147669, 0.018608, 0.038953, 0.000791, NA), tolerance = 1e-4 ) expect_identical( out$Parameter, c("SD (Intercept)", "SD (Intercept)", "SD (rx)", "Cor (Intercept~rx)", "SD (Observations)") ) out <- model_parameters(m3, effects = "grouplevel") expect_identical(dim(out), c(203L, 9L)) expect_identical(unique(out$Parameter), c("(Intercept)", "rx")) expect_identical(unique(out$Group), c("litter", "grp")) }) ) parameters/tests/testthat/test-complete_separation.R0000644000176200001440000000226114716604201022556 0ustar liggesusersskip_if(getRversion() < "4.0.0") skip_if_not_installed("withr") withr::with_options( list(parameters_warning_exponentiate = TRUE), test_that("print warning about complete separation", { d_sep <- data.frame( y = c(0, 0, 0, 0, 1, 1, 1, 1), x1 = c(1, 2, 3, 3, 5, 6, 10, 11), x2 = c(3, 2, -1, -1, 2, 4, 1, 0) ) m_sep <- suppressWarnings(glm(y ~ x1 + x2, data = d_sep, family = binomial)) out <- model_parameters(m_sep) expect_snapshot(print(out)) }) ) withr::with_options( list(parameters_warning_exponentiate = TRUE), test_that("print warning about complete separation", { data(mtcars) m_sep2 <- suppressWarnings(glm(am ~ gear, data = mtcars, family = binomial)) out <- model_parameters(m_sep2) expect_snapshot(print(out)) }) ) withr::with_options( list(parameters_warning_exponentiate = TRUE), test_that("print warning about quasi complete separation", { data(mtcars) set.seed(323) m_sep3 <- suppressWarnings(glm(vs ~ qsec, data = mtcars[sample.int(32, 27, replace = TRUE), ], family = binomial)) # nolint out <- model_parameters(m_sep3) expect_snapshot(print(out)) }) ) parameters/tests/testthat/test-p_value.R0000644000176200001440000001056715004426605020165 0ustar liggesuserstest_that("p_value", { expect_equal(p_value(c(1, 1, 1)), p_value(-c(1, 1, 1)), tolerance = 1e-3) set.seed(123) x <- rnorm(100, mean = 1.5) expect_equal(p_value(x), p_value(-x), tolerance = 1e-3) expect_gt(p_value(x, null = 1), p_value(x)) expect_gt(p_value(x), p_value(x, null = -1)) expect_equal(p_value(x, null = -1), p_value(-x, null = 1), tolerance = 1e-3) }) skip_on_cran() test_that("p_value", { skip_if_not_installed("curl") skip_if_offline() skip_if_not_installed("httr2") skip_if_not_installed("lme4") # h-tests model <- insight::download_model("htest_1") skip_if(is.null(model)) expect_equal(p_value(model), 0.04136799, tolerance = 0.01) model <- insight::download_model("htest_2") skip_if(is.null(model)) expect_equal(p_value(model), 0.1518983, tolerance = 0.01) model <- insight::download_model("htest_3") skip_if(is.null(model)) expect_equal(p_value(model), 0.182921, tolerance = 0.01) model <- insight::download_model("htest_4") skip_if(is.null(model)) expect_equal(p_value(model), 0, tolerance = 0.01) model <- insight::download_model("htest_5") skip_if(is.null(model)) expect_equal(p_value(model), 0, tolerance = 0.01) model <- insight::download_model("htest_6") skip_if(is.null(model)) expect_equal(p_value(model), 0, tolerance = 0.01) model <- insight::download_model("htest_7") skip_if(is.null(model)) expect_equal(p_value(model), 0, tolerance = 0.01) model <- insight::download_model("htest_8") skip_if(is.null(model)) expect_equal(p_value(model), 0, tolerance = 0.01) # ANOVAs model <- insight::download_model("aov_1") skip_if(is.null(model)) expect_equal(p_value(model)$p, 0, tolerance = 0.01) model <- insight::download_model("anova_1") skip_if(is.null(model)) expect_equal(p_value(model)$p, 0, tolerance = 0.01) model <- insight::download_model("aovlist_1") skip_if(is.null(model)) expect_equal(p_value(model)$p, 0, tolerance = 0.01) model <- insight::download_model("aov_2") skip_if(is.null(model)) expect_equal(p_value(model)$p[1], 0, tolerance = 0.01) model <- insight::download_model("anova_2") skip_if(is.null(model)) expect_equal(p_value(model)$p[1], 0, tolerance = 0.01) model <- insight::download_model("aovlist_2") skip_if(is.null(model)) expect_equal(p_value(model)$p[1], 0, tolerance = 0.01) model <- insight::download_model("aov_3") skip_if(is.null(model)) expect_equal(p_value(model)$p[1], 0, tolerance = 0.01) model <- insight::download_model("anova_3") skip_if(is.null(model)) expect_equal(p_value(model)$p[1], 0, tolerance = 0.01) model <- insight::download_model("aovlist_3") skip_if(is.null(model)) expect_equal(p_value(model)$p[1], 0, tolerance = 0.01) model <- insight::download_model("anova_4") skip_if(is.null(model)) expect_equal(p_value(model)$p[2], 0, tolerance = 0.01) # ANOVA lmer model <- insight::download_model("anova_lmerMod_0") skip_if(is.null(model)) expect_identical(p_value(model), NA) model <- insight::download_model("anova_lmerMod_1") skip_if(is.null(model)) expect_identical(p_value(model), NA) model <- insight::download_model("anova_lmerMod_2") skip_if(is.null(model)) expect_identical(p_value(model), NA) model <- insight::download_model("anova_lmerMod_3") skip_if(is.null(model)) expect_identical(p_value(model), NA) model <- insight::download_model("anova_lmerMod_4") skip_if(is.null(model)) expect_identical(p_value(model), NA) model <- insight::download_model("anova_lmerMod_5") skip_if(is.null(model)) expect_identical(p_value(model), NA) model <- insight::download_model("anova_lmerMod_6") skip_if(is.null(model)) expect_equal(p_value(model)$p[2], 0, tolerance = 0.01) # Mixed models model <- lme4::lmer(wt ~ cyl + (1 | gear), data = mtcars) expect_equal(p_value(model)$p[1], 0.206219, tolerance = 0.01) expect_equal(p_value(model, method = "normal")$p[1], 0.1956467, tolerance = 0.01) expect_equal(p_value(model, method = "kr")$p[1], 0.319398, tolerance = 0.01) model <- insight::download_model("merMod_1") skip_if(is.null(model)) expect_equal(p_value(model)$p[1], 0.06578, tolerance = 0.01) model <- insight::download_model("merMod_2") skip_if(is.null(model)) expect_equal(p_value(model)$p[1], 0.29912, tolerance = 0.01) }) parameters/tests/testthat/test-model_parameters.lqmm.R0000644000176200001440000000522314413515226023014 0ustar liggesusers# lqm ----------------------- test_that("model_parameters - lqm", { skip_if_not_installed("lqmm") # data set.seed(123) n <- 500 p <- 1:3 / 4 set.seed(123) x <- runif(n, 0, 1) y <- 30 + x + rnorm(n) test <<- data.frame(x, y) # model set.seed(123) fit.lqm <- lqmm::lqm( y ~ x, data = test, tau = p, control = list(verbose = FALSE, loop_tol_ll = 1e-9), fit = TRUE ) df_lqm <- as.data.frame(model_parameters(fit.lqm)) expect_equal(df_lqm$Coefficient, c( 29.3220715172958, 1.1244506550584, 29.9547605920406, 1.1822574944936, 30.6283792821576, 1.25165747424685 ), tolerance = 0.001 ) }) # lqmm ----------------------- test_that("model_parameters - lqmm", { skip("TODO: fix this test") skip_if_not_installed("lqmm") # setup set.seed(123) # data M <- 50 n <- 10 set.seed(123) x <- runif(n * M, 0, 1) group <- rep(1:M, each = n) y <- 10 * x + rep(rnorm(M, 0, 2), each = n) + rchisq(n * M, 3) test <<- data.frame(x, y, group) # model set.seed(123) fit.lqmm <- lqmm::lqmm( fixed = y ~ x, random = ~1, group = group, data = test, tau = 0.5, nK = 11, type = "normal" ) df_lqmm <- as.data.frame(model_parameters(fit.lqmm)) expect_equal(df_lqmm, structure( list( Parameter = c("(Intercept)", "x"), Coefficient = c( 3.44347538706013, 9.25833091219961 ), SE = c(0.491049614414579, 0.458163772053399), CI = c(0.95, 0.95), CI_low = c(2.47868633791118, 8.35815427623814), CI_high = c(4.40826443620908, 10.1585075481611), t = c( 7.01247956617455, 20.207470509302 ), df_error = c(497L, 497L), p = c( 6.34497395571023e-09, 2.05172540270515e-25 ) ), row.names = 1:2, pretty_names = c( `(Intercept)` = "(Intercept)", x = "x" ), ci = 0.95, verbose = TRUE, exponentiate = FALSE, ordinal_model = FALSE, linear_model = TRUE, mixed_model = TRUE, n_obs = 500L, model_class = "lqmm", bootstrap = FALSE, iterations = 1000, ignore_group = TRUE, ran_pars = TRUE, weighted_nobs = 500, model_formula = "y ~ x", coefficient_name = "Coefficient", zi_coefficient_name = "Log-Odds", digits = 2, ci_digits = 2, p_digits = 3, class = "data.frame", object_name = "fit.lqmm" ), tolerance = 0.001 ) }) parameters/tests/testthat/test-sort_parameters.R0000644000176200001440000000346514355245205021746 0ustar liggesusers# easystats convention ------------------------ mod <- parameters(stats::lm(wt ~ am * cyl, data = mtcars)) test_that("sort_parameters returns original object when no sorting - easystats", { expect_equal(sort_parameters(mod), mod) }) test_that("sort_parameters returns sorted object when necessary - easystats", { expect_equal( sort_parameters(mod, sort = "ascending")$Coefficient, sort(mod$Coefficient) ) expect_equal( sort_parameters(mod, sort = "descending")$Coefficient, sort(mod$Coefficient, decreasing = TRUE) ) expect_s3_class(sort_parameters(mod, sort = "ascending"), "parameters_model") }) # broom convention ------------------------ df <- structure(list( term = c("(Intercept)", "am", "cyl", "am:cyl"), estimate = c( 1.65820588235294, -0.956184605757196, 0.303811274509804, 0.0328057467667917 ), std.error = c( 0.587149249513266, 0.792732452856412, 0.0826018347687406, 0.130209483362154 ), statistic = c( 2.82416418606949, -1.20618829506957, 3.67802089820863, 0.251945909926916 ), p.value = c( 0.00863653784417726, 0.237838251537444, 0.000989221758576308, 0.802923027949227 ) ), class = c("tbl_df", "tbl", "data.frame"), row.names = c(NA, -4L)) test_that("sort_parameters returns original object when no sorting - broom", { expect_equal(sort_parameters(df), df) }) test_that("sort_parameters returns sorted object when necessary - broom", { expect_equal( sort_parameters(df, sort = "ascending", column = "estimate")$estimate, sort(df$estimate) ) expect_equal( sort_parameters(df, sort = "descending", column = "estimate")$estimate, sort(df$estimate, decreasing = TRUE) ) expect_s3_class(sort_parameters(df, sort = "ascending", column = "estimate"), "tbl_df") }) parameters/tests/testthat/test-emmGrid-df_colname.R0000644000176200001440000000201714413515226022173 0ustar liggesusersskip_on_cran() skip_if_not_installed("emmeans") skip_if_not_installed("lme4") data(sleep) data(fiber, package = "emmeans") m <- lm(strength ~ diameter + machine, data = fiber) emm <- emmeans::emmeans(m, "machine") es1 <- emmeans::eff_size(emm, sigma = sigma(m), edf = df.residual(m)) sleep$group <- as.factor(sleep$group) m2 <- lme4::lmer(extra ~ group + (1 | ID), sleep) emm2 <- emmeans::emmeans(m2, ~group, df = NA) es2 <- emmeans::eff_size(emm2, sigma = sigma(m2), edf = df.residual(m2)) test_that("df", { expect_identical( colnames(model_parameters(es1)), c( "contrast", "Coefficient", "SE", "CI", "CI_low", "CI_high", "t", "df_error", "p" ) ) expect_identical( colnames(model_parameters(es2)), c( "contrast", "Coefficient", "SE", "CI", "CI_low", "CI_high", "z", "df_error", "p" ) ) }) test_that("print model_parameters", { mp <- model_parameters(emm) expect_snapshot(mp) mp <- model_parameters(es1) expect_snapshot(mp) }) parameters/tests/testthat/test-glmmTMB-2.R0000644000176200001440000000437114413515226020165 0ustar liggesusersskip_on_cran() skip_if_not(getRversion() >= "4.0.0") skip_if_not_installed("glmmTMB") data(Salamanders, package = "glmmTMB") model <- suppressWarnings(glmmTMB::glmmTMB( count ~ spp + mined + spp * mined, ziformula = ~ spp + mined + spp * mined, family = glmmTMB::truncated_poisson, data = Salamanders )) mp <- model_parameters(model, effects = "fixed", component = "conditional") test_that("model_parameters", { expect_equal(mp$Coefficient, as.vector(glmmTMB::fixef(model)[[1]]), tolerance = 1e-3) expect_equal(mp$Parameter, names(glmmTMB::fixef(model)[[1]])) }) mp <- model_parameters(model, effects = "fixed", component = "all") test_that("model_parameters", { expect_equal(mp$Coefficient, as.vector(unlist(glmmTMB::fixef(model))), tolerance = 1e-3) expect_equal(mp$Parameter, gsub("^(cond\\.|zi\\.)", "", names(unlist(glmmTMB::fixef(model))))) expect_equal( mp$Component, c( "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "zero_inflated", "zero_inflated", "zero_inflated", "zero_inflated", "zero_inflated", "zero_inflated", "zero_inflated", "zero_inflated", "zero_inflated", "zero_inflated", "zero_inflated", "zero_inflated", "zero_inflated", "zero_inflated" ) ) }) sim1 <- function(nfac = 40, nt = 100, facsd = 0.1, tsd = 0.15, mu = 0, residsd = 1) { dat <- expand.grid(fac = factor(letters[1:nfac]), t = 1:nt) n <- nrow(dat) dat$REfac <- rnorm(nfac, sd = facsd)[dat$fac] dat$REt <- rnorm(nt, sd = tsd)[dat$t] dat$x <- rnorm(n, mean = mu, sd = residsd) + dat$REfac + dat$REt dat } set.seed(101) d1 <- sim1(mu = 100, residsd = 10) d2 <- sim1(mu = 200, residsd = 5) d1$sd <- "ten" d2$sd <- "five" dat <- rbind(d1, d2) model <- suppressWarnings(glmmTMB::glmmTMB(x ~ sd + (1 | t), dispformula = ~sd, data = dat)) mp <- model_parameters(model, effects = "fixed") test_that("model_parameters", { expect_equal(mp$Coefficient, as.vector(unlist(glmmTMB::fixef(model))), tolerance = 1e-3) expect_equal(mp$Component, c("conditional", "conditional", "dispersion", "dispersion")) }) parameters/tests/testthat/test-format.R0000644000176200001440000000040114412513617020006 0ustar liggesuserstest_that("format_order", { expect_identical(format_order(2), "second") expect_identical(format_order(45), "forty fifth") expect_identical(format_order(2, textual = FALSE), "2nd") expect_identical(format_order(45, textual = FALSE), "45th") }) parameters/tests/testthat/test-model_parameters.fixest_multi.R0000644000176200001440000000143515111301621024547 0ustar liggesusersskip_on_cran() skip_if_not_installed("fixest") set.seed(123) iris$x <- rnorm(150) test_that("model_parameters.fixest_multi", { mod <- fixest::feols( c(Petal.Width, Sepal.Width) ~ x + csw0(Petal.Length, Sepal.Length) | Species, iris ) expect_snapshot(print(model_parameters(mod))) expect_snapshot(print(ci(mod))) }) test_that("model_parameters.fixest_multi", { mod <- fixest::feols(c(Petal.Width, Sepal.Width) ~ x + Petal.Length | Species, iris) expect_snapshot(print(model_parameters(mod))) expect_snapshot(print(ci(mod))) }) test_that("model_parameters.fixest_multi", { mod <- fixest::feols(Petal.Width ~ x + csw0(Petal.Length, Sepal.Length) | Species, iris) expect_snapshot(print(model_parameters(mod))) expect_snapshot(print(ci(mod))) }) parameters/tests/testthat/test-model_parameters_robust.R0000644000176200001440000001267614716604201023455 0ustar liggesusersskip_if_not_installed("sandwich") skip_if_not_installed("clubSandwich") data(mtcars) mtcars$am <- as.factor(mtcars$am) model <- lm(mpg ~ wt * am + cyl + gear, data = mtcars) test_that("model_parameters, robust CL", { params1 <- model_parameters( model, vcov = "CL", vcov_args = list(type = "HC1"), verbose = FALSE ) robust_se <- unname(sqrt(diag(sandwich::vcovCL(model)))) expect_equal(params1$SE, robust_se, tolerance = 1e-3) expect_equal(params1$p, c(0, 0.00695, 0.00322, 0.00435, 0.94471, 0.00176), tolerance = 1e-3) }) test_that("model_parameters, robust", { params <- model_parameters(model, vcov = "HC", verbose = FALSE) robust_se <- unname(sqrt(diag(sandwich::vcovHC(model)))) expect_equal(params$SE, robust_se, tolerance = 1e-3) expect_equal(params$p, c(0, 0.0259, 0.01478, 0.01197, 0.95238, 0.01165), tolerance = 1e-3) }) test_that("ci, robust", { params <- ci(model, vcov = "HC", verbose = FALSE) robust_se <- unname(sqrt(diag(sandwich::vcovHC(model)))) upper_ci <- as.vector(coef(model) + qt(0.975, df.residual(model)) * robust_se) expect_equal(params$CI_high, upper_ci, tolerance = 1e-3, ignore_attr = TRUE) }) test_that("model_parameters, robust CL", { params <- model_parameters(model, vcov = "vcovCL", verbose = FALSE) robust_se <- unname(sqrt(diag(sandwich::vcovCL(model)))) expect_equal(params$SE, robust_se, tolerance = 1e-3) expect_equal(params$p, c(0, 0.00695, 0.00322, 0.00435, 0.94471, 0.00176), tolerance = 1e-3) }) model2 <- lm(mpg ~ wt * am + cyl + gear, data = datawizard::standardize(mtcars)) test_that("model_parameters, robust", { params <- model_parameters(model, standardize = "refit", vcov = "HC", verbose = FALSE) robust_se <- unname(sqrt(diag(sandwich::vcovHC(model2)))) expect_equal(params$SE, robust_se, tolerance = 1e-3) expect_equal(params$p, c(0.28624, 0.0259, 0.43611, 0.01197, 0.95238, 0.01165), tolerance = 1e-3) }) # cluster-robust standard errors, using clubSandwich data(iris) model <- lm(Petal.Length ~ Sepal.Length * Species, data = iris) iris$cluster <- factor(rep(LETTERS[1:8], length.out = nrow(iris))) test_that("model_parameters, robust CR", { params <- model_parameters( model, vcov = "CR1", vcov_args = list(cluster = iris$cluster), verbose = FALSE ) robust_se <- unname(sqrt(diag(clubSandwich::vcovCR(model, type = "CR1", cluster = iris$cluster)))) expect_equal(params$SE, robust_se, tolerance = 1e-3) expect_equal(params$p, c(0.01246, 0.04172, 0.18895, 0.57496, 0, 0), tolerance = 1e-3) }) test_that("model_parameters, normal", { params <- model_parameters(model) expect_equal(params$p, c(0.13267, 0.21557, 0.36757, 0.77012, 3e-05, 0), tolerance = 1e-3) }) data(mtcars) mtcars$am <- as.factor(mtcars$am) model <- lm(mpg ~ wt * am + cyl + gear, data = mtcars) test_that("model_parameters, robust", { params <- model_parameters(model, vcov = "HC3") robust_se <- unname(sqrt(diag(sandwich::vcovHC(model)))) expect_equal(params$SE, robust_se, tolerance = 1e-3) expect_equal(params$p, c(0, 0.0259, 0.01478, 0.01197, 0.95238, 0.01165), tolerance = 1e-3) }) test_that("ci, robust", { params <- ci(model, vcov = "HC3") robust_se <- unname(sqrt(diag(sandwich::vcovHC(model)))) upper_ci <- as.vector(coef(model) + qt(0.975, df.residual(model)) * robust_se) expect_equal(params$CI_high, upper_ci, tolerance = 1e-3, ignore_attr = TRUE) }) test_that("model_parameters, robust CL", { params <- model_parameters(model, vcov = "vcovCL", vcov_args = list(type = "HC1")) robust_se <- unname(sqrt(diag(sandwich::vcovCL(model)))) expect_equal(params$SE, robust_se, tolerance = 1e-3) expect_equal(params$p, c(0, 0.00695, 0.00322, 0.00435, 0.94471, 0.00176), tolerance = 1e-3) }) d <- datawizard::standardize(mtcars) model2 <- lm(mpg ~ wt * am + cyl + gear, data = d) test_that("model_parameters, robust", { params <- model_parameters(model, standardize = "refit", vcov = "HC3") robust_se <- unname(sqrt(diag(sandwich::vcovHC(model2)))) expect_equal(params$SE, robust_se, tolerance = 1e-3) expect_equal(params$p, c(0.28624, 0.0259, 0.43611, 0.01197, 0.95238, 0.01165), tolerance = 1e-3) }) # cluster-robust standard errors, using clubSandwich data(iris) model <- lm(Petal.Length ~ Sepal.Length * Species, data = iris) iris$cluster <- factor(rep(LETTERS[1:8], length.out = nrow(iris))) test_that("model_parameters, robust CR", { params <- model_parameters(model, vcov = "vcovCR", vcov_args = list(type = "CR1", cluster = iris$cluster)) robust_se <- unname(sqrt(diag(clubSandwich::vcovCR(model, type = "CR1", cluster = iris$cluster)))) expect_equal(params$SE, robust_se, tolerance = 1e-3) expect_equal(params$p, c(0.01246, 0.04172, 0.18895, 0.57496, 0, 0), tolerance = 1e-3) }) test_that("model_parameters, normal", { params <- model_parameters(model) expect_equal(params$p, c(0.13267, 0.21557, 0.36757, 0.77012, 3e-05, 0), tolerance = 1e-3) }) test_that("ci_ml1, robust", { skip("TODO: this one actually is not correct.") skip_if_not(packageVersion("parameters") < "0.16.9.9") skip_if_not_installed("lme4") model <- lme4::lmer(Petal.Length ~ Sepal.Length + (1 | Species), data = iris) params <- ci_ml1(model, vcov = "CR", vcov_args = list(cluster = iris$Species)) robust_se <- unname(sqrt(diag(clubSandwich::vcovCR(model, type = "CR1", cluster = iris$Species)))) upper_ci <- fixef(model) + qt(0.975, dof_ml1(model)) * robust_se }) parameters/tests/testthat/test-model_parameters.logistf.R0000644000176200001440000000141314716604201023510 0ustar liggesusersskip_on_cran() skip_if_not_installed("logistf") skip_if_not_installed("withr") withr::with_options( list(parameters_exponentiate = FALSE), { data(sex2, package = "logistf") m1 <- logistf::logistf(case ~ age + oc + vic + vicl + vis + dia, data = sex2) m2 <- logistf::flic(m1) m3 <- logistf::flac(m1, data = sex2) test_that("model_parameters.logistf", { params <- model_parameters(m1) expect_snapshot(params, variant = "windows") }) test_that("model_parameters.flic", { params <- model_parameters(m2) expect_snapshot(params, variant = "windows") }) test_that("model_parameters.flac", { params <- model_parameters(m3) expect_snapshot(params, variant = "windows") }) } ) parameters/tests/testthat/test-gls.R0000644000176200001440000000151014413515226017304 0ustar liggesusersskip_if_not_installed("nlme") data(Ovary, package = "nlme") m1 <- nlme::gls(follicles ~ sin(2 * pi * Time) + cos(2 * pi * Time), data = Ovary, correlation = nlme::corAR1(form = ~ 1 | Mare) ) test_that("ci", { expect_equal( ci(m1)$CI_low, c(10.90853, -4.04402, -2.2722), tolerance = 1e-4 ) }) test_that("se", { expect_equal( standard_error(m1)$SE, c(0.664643651063474, 0.645047778144975, 0.697538308948056), tolerance = 1e-4 ) }) test_that("p_value", { expect_equal( p_value(m1)$p, c(2.6187369542827e-51, 2.28628382225752e-05, 0.198137111907874), tolerance = 1e-4 ) }) test_that("model_parameters", { expect_equal( model_parameters(m1)$Coefficient, c(12.2163981810227, -2.77471219793581, -0.899604717105857), tolerance = 1e-4 ) }) parameters/tests/testthat/test-model_parameters.mfx.R0000644000176200001440000000421714413515226022642 0ustar liggesusersskip_if_not_installed("mfx") skip_if_not_installed("MASS") set.seed(12345) n <- 1000 x <- rnorm(n) y <- rbeta(n, shape1 = plogis(1 + 0.5 * x), shape2 = (abs(0.2 * x))) y <- (y * (n - 1) + 0.5) / n data <- data.frame(y, x) model <- mfx::betamfx(y ~ x | x, data = data) params <- suppressWarnings(model_parameters(model)) test_that("model_parameters.betamfx", { expect_equal(params$Parameter, c("x", "(Intercept)", "x", "(Intercept)", "x")) expect_equal(params$Coefficient, c(0.02259, 1.35961, 0.13947, 0.07498, 0.12071), tolerance = 1e-2) expect_equal(params$Component, c("marginal", "conditional", "conditional", "precision", "precision")) }) model <- mfx::betaor(y ~ x | x, data = data) params <- suppressWarnings(model_parameters(model)) test_that("model_parameters.betaor", { expect_equal(params$Parameter, c("(Intercept)", "x")) expect_equal(params$Coefficient, c(1.35961, 0.13947), tolerance = 1e-2) expect_null(params$Component) }) params <- suppressWarnings(model_parameters(model, component = "all")) test_that("model_parameters.betaor", { expect_equal(params$Parameter, c("(Intercept)", "x", "(Intercept)", "x")) expect_equal(params$Coefficient, unname(do.call(rbind, coef(summary(model$fit)))[, 1]), tolerance = 1e-2) expect_equal(params$Component, c("conditional", "conditional", "precision", "precision")) }) set.seed(12345) n <- 1000 x <- rnorm(n) y <- MASS::rnegbin(n, mu = exp(1 + 0.5 * x), theta = 0.5) data <- data.frame(y, x) model <- mfx::poissonmfx(formula = y ~ x, data = data) params <- suppressWarnings(model_parameters(model)) test_that("model_parameters.poissonmfx", { expect_equal(params$Parameter, c("x", "(Intercept)", "x")) expect_equal(params$Coefficient, c(1.46009, 0.96036, 0.54496), tolerance = 1e-2) expect_equal(params$Component, c("marginal", "conditional", "conditional")) }) params <- suppressWarnings(model_parameters(model, component = "cond")) test_that("model_parameters.poissonmfx", { expect_equal(params$Parameter, c("(Intercept)", "x")) expect_equal(params$Coefficient, c(0.96036, 0.54496), tolerance = 1e-2) expect_null(params$Component) }) parameters/tests/testthat/test-panelr.R0000644000176200001440000000426614716610124020012 0ustar liggesusersskip_on_cran() skip_if_not_installed("panelr") data("WageData", package = "panelr") wages <- panelr::panel_data(WageData, id = id, wave = t) m1 <- panelr::wbm(lwage ~ lag(union) + wks | blk + fem | blk * lag(union), data = wages) m2 <- suppressWarnings(panelr::wbm(lwage ~ lag(union) + wks | blk + fem | blk * (t | id), data = wages)) test_that("ci", { expect_equal( ci(m1)$CI_low, c(0.00807, -0.00376, 6.14479, -0.09624, -0.00507, -0.34607, -0.53918, -0.37071), tolerance = 1e-3 ) expect_equal( ci(m2)$CI_low, c(-0.01668, -0.00139, 6.01762, -0.08795, -0.0055, -0.32126, -0.54359), tolerance = 1e-3 ) }) test_that("se", { expect_equal( standard_error(m1)$SE, c(0.0256, 0.00108, 0.2313, 0.03482, 0.00482, 0.05952, 0.04971, 0.12418), tolerance = 1e-3 ) expect_equal( standard_error(m2)$SE, c(0.01838, 0.00073, 0.22549, 0.03394, 0.0047, 0.05803, 0.04846), tolerance = 1e-3 ) }) test_that("p_value", { expect_equal( p_value(m1)$p, c(0.02295, 0.13007, 0, 0.42167, 0.36422, 0.00013, 0, 0.30533), tolerance = 1e-3 ) expect_equal( p_value(m2)$p, c(0.29282, 0.9538, 0, 0.52805, 0.43004, 0.00038, 0), tolerance = 1e-3 ) }) test_that("model_parameters", { expect_equal( model_parameters(m1, effects = "fixed")$Coefficient, c(0.05825, -0.00164, 6.59813, -0.028, 0.00438, -0.22941, -0.44176, -0.12732), tolerance = 1e-3 ) expect_equal( model_parameters(m1, effects = "all")$Coefficient, c( 0.05825, -0.00164, 6.59813, -0.028, 0.00438, -0.22941, -0.44176, -0.12732, 0.35399, 0.23264 ), tolerance = 1e-3 ) expect_equal( model_parameters(m2, effects = "fixed")$Coefficient, c(0.01934, 4e-05, 6.45957, -0.02143, 0.00371, -0.20753, -0.44861), tolerance = 1e-3 ) }) skip_if_not_installed("car") skip_if_not_installed("clubSandwich") test_that("model_parameters, asym", { data("teen_poverty", package = "panelr") teen <- panelr::long_panel(teen_poverty, begin = 1, end = 5) m4 <- panelr::asym(hours ~ lag(pov) + spouse, data = teen, use.wave = TRUE) expect_snapshot(print(model_parameters(m4))) }) parameters/tests/testthat/test-model_parameters_std.R0000644000176200001440000000357214413515226022726 0ustar liggesusersskip_on_cran() skip_if_not_installed("effectsize") data(mtcars) mtcars$am <- as.factor(mtcars$am) d <- mtcars model <- lm(mpg ~ wt * am, data = d) test_that("model_parameters, standardize-refit", { params <- model_parameters(model, standardize = "refit") expect_identical(c(nrow(params), ncol(params)), c(4L, 9L)) expect_equal(params$Coefficient, c(-0.14183, -0.61463, -0.35967, -0.86017), tolerance = 1e-3) expect_equal(params$SE, c(0.12207, 0.12755, 0.23542, 0.23454), tolerance = 1e-3) expect_equal(params$CI_high, c(0.10821, -0.35336, 0.12257, -0.37973), tolerance = 1e-3) }) test_that("model_parameters, standardize-posthoc", { params <- model_parameters(model, standardize = "posthoc") expect_identical(c(nrow(params), ncol(params)), c(4L, 9L)) expect_equal(params$Std_Coefficient, c(0, -0.61463, 2.46865, -0.87911), tolerance = 1e-3) expect_equal(params$SE, c(0, 0.12755, 0.7075, 0.23971), tolerance = 1e-3) expect_equal(params$CI_high, c(0, -0.35336, 3.91789, -0.38809), tolerance = 0.1) }) test_that("model_parameters, standardize-basic", { params <- model_parameters(model, standardize = "basic") expect_identical(c(nrow(params), ncol(params)), c(4L, 9L)) expect_equal(params$Std_Coefficient, c(0, -0.61463, 1.23183, -1.11016), tolerance = 1e-3) expect_equal(params$SE, c(0, 0.12755, 0.35303, 0.30271), tolerance = 1e-3) expect_equal(params$CI_high, c(0, -0.35336, 1.95499, -0.4901), tolerance = 0.1) }) test_that("model_parameters, standardize-smart", { params <- model_parameters(model, standardize = "smart") expect_identical(c(nrow(params), ncol(params)), c(4L, 9L)) expect_equal(params$Std_Coefficient, c(0, -0.61463, 2.41278, -0.85922), tolerance = 1e-3) expect_equal(params$SE, c(0, 0.12755, 0.69148, 0.23428), tolerance = 1e-3) expect_equal(params$CI_high, c(0, -0.35336, 3.82922, -0.37931), tolerance = 0.1) }) parameters/tests/testthat/test-gamm.R0000644000176200001440000000210714413515226017443 0ustar liggesusersskip_if_not_installed("mgcv") set.seed(123) void <- capture.output({ dat <- mgcv::gamSim(6, n = 200, scale = 0.2, dist = "poisson") }) m1_gamm <- mgcv::gamm( y ~ s(x0) + s(x1) + s(x2), family = poisson, data = dat, random = list(fac = ~1), verbosePQL = FALSE ) test_that("ci", { expect_equal( ci(m1_gamm)$CI_low, c(2.361598, NA, NA, NA), tolerance = 1e-3 ) }) test_that("se", { expect_equal( standard_error(m1_gamm)$SE, c(0.3476989, NA, NA, NA), tolerance = 1e-3 ) }) test_that("p_value", { expect_equal( p_value(m1_gamm)$p, c(0, 0, 0, 0), tolerance = 1e-3 ) }) mp <- model_parameters(m1_gamm) test_that("model_parameters", { expect_equal( mp$Coefficient, c(3.0476, NA, NA, NA), tolerance = 1e-3 ) }) test_that("model_parameters", { expect_equal( mp$df, c(NA, 3.84696, 3.17389, 8.51855), tolerance = 1e-3 ) }) test_that("model_parameters", { expect_equal( mp$df_error, c(183.4606, NA, NA, NA), tolerance = 1e-3 ) }) parameters/tests/testthat/test-equivalence_test.R0000644000176200001440000001037015057525051022065 0ustar liggesusersskip_if_not_installed("bayestestR") test_that("equivalence_test", { data(mtcars) m <- lm(mpg ~ gear + wt + cyl + hp, data = mtcars) x <- equivalence_test(m) expect_identical(c(nrow(x), ncol(x)), c(5L, 9L)) expect_type(capture.output(equivalence_test(m)), "character") expect_snapshot(print(x)) }) test_that("equivalence_test, robust", { skip_if_not_installed("sandwich") data(mtcars) m <- lm(mpg ~ gear + wt + cyl + hp, data = mtcars) x <- equivalence_test(m, vcov = "HC3") expect_snapshot(print(x)) }) test_that("equivalence_test, robust", { skip_on_cran() skip_if_not_installed("glmmTMB") set.seed(123) sim_data <- data.frame( x = rbeta(100, 2, 5), y = sample(0:1, 100, replace = TRUE), z = rnorm(100), dataset = rep(1:10, each = 10) ) mod <- glmmTMB::glmmTMB( x ~ y * z + (1 | dataset), data = sim_data, family = glmmTMB::beta_family(link = "logit") ) out <- equivalence_test(mod) expect_identical(dim(out), c(4L, 10L)) expect_identical( out$ROPE_Equivalence, c("Rejected", "Undecided", "Accepted", "Undecided") ) expect_equal( out$SGPV, c(0, 0.8726, 0.9739, 0.6741), tolerance = 1e-3 ) }) test_that("equivalence_test, unequal rope-range", { data(iris) m <- lm(Sepal.Length ~ Species, data = iris) rez <- equivalence_test(m, range = c(-Inf, 0.1)) expect_identical(rez$ROPE_Equivalence, c("Rejected", "Rejected", "Rejected")) expect_identical(rez$ROPE_low, c(-Inf, -Inf, -Inf)) rez <- equivalence_test(m, range = c(-99, 0.1)) expect_identical(rez$ROPE_Equivalence, c("Rejected", "Rejected", "Rejected")) expect_identical(rez$ROPE_low, c(-99, -99, -99)) data(mtcars) mtcars[c("gear", "cyl")] <- lapply(mtcars[c("gear", "cyl")], as.factor) m <- lm(mpg ~ hp + gear + cyl, data = mtcars) rez <- equivalence_test(m, range = c(-Inf, 0.5)) expect_identical( rez$ROPE_Equivalence, c("Rejected", "Accepted", "Undecided", "Rejected", "Accepted", "Undecided") ) # validate that range of CI equals approximated normal distribution diff_ci <- abs(diff(c(rez$CI_low[3], rez$CI_high[3]))) set.seed(123) out <- bayestestR::distribution_normal( n = 1000, mean = rez$CI_high[3] - (diff_ci / 2), sd = (diff_ci / 2) / 3.290525 ) expect_equal(range(out)[1], rez$CI_low[3], tolerance = 1e-4) expect_equal(range(out)[2], rez$CI_high[3], tolerance = 1e-4) # need procedure for SGP here... diff_ci <- abs(diff(c(rez$CI_low[3], rez$CI_high[3]))) z_value <- stats::qnorm((1 + 0.95) / 2) sd_dist <- diff_ci / diff(c(-1 * z_value, z_value)) set.seed(123) out <- bayestestR::distribution_normal( n = 10000, mean = rez$CI_high[3] - (diff_ci / 2), sd = sd_dist ) expect_equal( rez$SGPV[3], bayestestR::rope(out, range = c(-Inf, 0.5), ci = 1)$ROPE_Percentage, tolerance = 1e-4 ) rez <- equivalence_test(m, range = c(-0.5, 0.5)) expect_identical( rez$ROPE_Equivalence, c("Rejected", "Accepted", "Undecided", "Rejected", "Rejected", "Undecided") ) rez <- equivalence_test(m, range = c(-2, 2)) expect_identical( rez$ROPE_Equivalence, c("Rejected", "Accepted", "Undecided", "Rejected", "Rejected", "Undecided") ) }) test_that("equivalence_test, unequal rope-range, plots", { skip_on_cran() skip_if_not_installed("see") skip_if_not_installed("vdiffr") data(iris) m <- lm(Sepal.Length ~ Species, data = iris) rez <- equivalence_test(m, range = c(-Inf, 0.1)) vdiffr::expect_doppelganger( "Equivalence-Test 1", plot(rez) ) rez <- equivalence_test(m, range = c(-99, 0.1)) vdiffr::expect_doppelganger( "Equivalence-Test 2", plot(rez) ) data(mtcars) mtcars[c("gear", "cyl")] <- lapply(mtcars[c("gear", "cyl")], as.factor) m <- lm(mpg ~ hp + gear + cyl, data = mtcars) rez <- equivalence_test(m, range = c(-Inf, 0.5)) vdiffr::expect_doppelganger( "Equivalence-Test 3", plot(rez) ) rez <- equivalence_test(m, range = c(-0.5, 0.5)) vdiffr::expect_doppelganger( "Equivalence-Test 4", plot(rez) ) rez <- equivalence_test(m, range = c(-2, 2)) vdiffr::expect_doppelganger( "Equivalence-Test 5", plot(rez) ) }) parameters/tests/testthat/test-model_parameters.gam.R0000644000176200001440000000131414716604201022605 0ustar liggesuserstest_that("model_parameters.gam", { skip_if_not_installed("mgcv") set.seed(123) model <- mgcv::gam( formula = mpg ~ s(hp) + s(wt) + factor(cyl) + am + qsec, family = stats::quasi(), data = mtcars ) params <- model_parameters(model) expect_equal(params$SE, c(10.83359, 1.80704, 2.82608, 1.71366, 0.53172, NA, NA), tolerance = 1e-2) expect_equal(params$df_error, c(23.3923, 23.3923, 23.3923, 23.3923, 23.3923, NA, NA), tolerance = 1e-2) expect_equal(params$CI[[1]], 0.95, tolerance = 1e-2) expect_named( params, c( "Parameter", "Coefficient", "SE", "CI", "CI_low", "CI_high", "t / F", "df", "df_error", "p", "Component" ) ) }) parameters/tests/testthat/test-MCMCglmm.R0000644000176200001440000000166614413515226020127 0ustar liggesusersskip_if_not_installed("MCMCglmm") data(PlodiaPO, package = "MCMCglmm") set.seed(123) m1 <- MCMCglmm::MCMCglmm( PO ~ plate, random = ~FSfamily, data = PlodiaPO, verbose = FALSE, nitt = 1300, burnin = 300, thin = 1 ) test_that("ci", { expect_equal( ci(m1)$CI_low, c(0.97495, 0.03407), tolerance = 0.01 ) }) test_that("se", { expect_equal( standard_error(m1)$SE, c(0.02309, 0.00509), tolerance = 0.01 ) }) test_that("p_value", { expect_equal( p_value(m1)$p, c(0, 0), tolerance = 0.01 ) }) test_that("model_parameters", { expect_equal( model_parameters(m1, centrality = "mean", verbose = FALSE)$Mean, c(1.0132, 0.04232), tolerance = 0.01 ) }) test_that("model_parameters", { expect_equal( model_parameters(m1, centrality = "median", verbose = FALSE)$Median, c(1.01382, 0.04207), tolerance = 0.01 ) }) parameters/tests/testthat/test-standardize_info.R0000644000176200001440000000121515057525051022046 0ustar liggesuserstest_that("standardize_info", { skip_if_not_installed("datawizard") skip_if_not_installed("nlme") skip_if_not_installed("lme4") fm1 <- nlme::lme(mpg ~ cyl, mtcars, random = ~ 1 | gear) fm2 <- nlme::gls(mpg ~ cyl, mtcars) i1 <- standardize_info(fm1) i2 <- standardize_info(fm2) expect_equal(i1$Deviation_Response_Basic, c(sd(mtcars$mpg), sd(mtcars$mpg)), tolerance = 1e-3) expect_equal(i2$Deviation_Response_Basic, c(sd(mtcars$mpg), sd(mtcars$mpg)), tolerance = 1e-3) expect_equal(i1$Deviation_Basic, c(0, sd(mtcars$cyl)), tolerance = 1e-3) expect_equal(i2$Deviation_Basic, c(0, sd(mtcars$cyl)), tolerance = 1e-3) }) parameters/tests/testthat/test-p_calibrate.R0000644000176200001440000000305214716604201020765 0ustar liggesusersdata(mtcars) model <- lm(mpg ~ wt + as.factor(gear) + am, data = mtcars) test_that("p_calibrate model", { expect_silent(p_calibrate(model, verbose = FALSE)) expect_warning({ out <- p_calibrate(model) }) expect_identical(dim(out), c(5L, 3L)) expect_named(out, c("Parameter", "p", "p_calibrated")) expect_equal(out$p_calibrated, c(0, 5e-05, 0.48261, NA, NA), tolerance = 1e-4) expect_warning({ out <- p_calibrate(model, type = "bayes") }) expect_equal(out$p_calibrated, c(0, 5e-05, 0.93276, NA, NA), tolerance = 1e-4) }) test_that("p_calibrate numeric", { p <- c(0.2, 0.1, 0.05, 0.01, 0.005, 0.001) # See Table 1 Sellke et al. doi: 10.1198/000313001300339950 out <- p_calibrate(p) expect_equal(out, c(0.4667, 0.385, 0.2893, 0.1113, 0.0672, 0.0184), tolerance = 1e-3) out <- p_calibrate(p, type = "bayes") expect_equal(out, c(0.875, 0.6259, 0.4072, 0.1252, 0.072, 0.0188), tolerance = 1e-3) }) test_that("p_calibrate print", { out <- p_calibrate(model, verbose = FALSE) ref <- capture.output(print(out)) expect_identical( ref, c( "Parameter | p | p (calibrated)", "------------------------------------------", "(Intercept) | < .001 | < .001", "wt | < .001 | < .001", "as.factor(gear)4 | 0.242 | 0.483 ", "as.factor(gear)5 | 0.660 | ", "am | 0.925 | ", "Calibrated p-values indicate the posterior probability of H0." ) ) }) parameters/tests/testthat/test-gam.R0000644000176200001440000000124214413515226017265 0ustar liggesusersskip_if_not_installed("mgcv") set.seed(123) dat <- mgcv::gamSim(1, n = 400, dist = "normal", scale = 2, verbose = FALSE) m1 <- mgcv::gam(y ~ s(x0) + s(x1) + s(x2) + s(x3), data = dat) test_that("ci", { expect_equal( ci(m1)$CI_low, c(7.771085, NA, NA, NA, NA), tolerance = 1e-2 ) }) test_that("se", { expect_equal( standard_error(m1)$SE, c(0.1020741, NA, NA, NA, NA), tolerance = 1e-2 ) }) test_that("p_value", { expect_equal( p_value(m1)$p, c(0, 0, 0, 0, 0.00196), tolerance = 1e-2 ) }) skip_on_cran() mp <- model_parameters(m1) test_that("model_parameters", { expect_snapshot(mp) }) parameters/tests/testthat/test-model_parameters.pairwise.htest.R0000644000176200001440000000150214355245205025015 0ustar liggesuserstest_that("model_parameters.pairwise.htest", { data(airquality) airquality$Month <- factor(airquality$Month, labels = month.abb[5:9]) model <- pairwise.t.test(airquality$Ozone, airquality$Month) mp <- model_parameters(model) expect_equal( mp$Group1, c("Jun", "Jul", "Jul", "Aug", "Aug", "Aug", "Sep", "Sep", "Sep", "Sep") ) expect_equal( mp$p, c(1, 0.00026, 0.05113, 0.00019, 0.04987, 1, 1, 1, 0.00488, 0.00388), tolerance = 1e-3 ) smokers <- c(83, 90, 129, 70) patients <- c(86, 93, 136, 82) model <- suppressWarnings(pairwise.prop.test(smokers, patients)) mp <- model_parameters(model) expect_equal( mp$Group1, c("2", "3", "3", "4", "4", "4") ) expect_equal( mp$p, c(1, 1, 1, 0.11856, 0.09322, 0.12377), tolerance = 1e-3 ) }) parameters/tests/testthat/test-gee.R0000644000176200001440000000137614413515226017271 0ustar liggesusersskip_if_not_installed("gee") data(warpbreaks) void <- capture.output({ m1_gee <- suppressMessages(gee::gee(breaks ~ tension, id = wool, data = warpbreaks)) }) test_that("ci", { expect_equal( suppressMessages(ci(m1_gee))$CI_low, c(30.90044, -17.76184, -22.48406), tolerance = 1e-3 ) }) test_that("se", { expect_equal( standard_error(m1_gee)$SE, c(2.80028, 3.96019, 3.96019), tolerance = 1e-3 ) }) test_that("p_value", { expect_equal( p_value(m1_gee)$p, c(0, 0.01157, 2e-04), tolerance = 1e-3 ) }) mp <- suppressWarnings(model_parameters(m1_gee)) test_that("model_parameters", { expect_equal( mp$Coefficient, c(36.38889, -10, -14.72222), tolerance = 1e-3 ) }) parameters/tests/testthat/test-pipe.R0000644000176200001440000000154414716604201017461 0ustar liggesusersskip_on_cran() skip_if(getRversion() < "4.2.0") test_that("print in pipe", { data(iris) out <- capture.output({ lm(Sepal.Length ~ Petal.Length + Species, data = iris) |> model_parameters() |> print(include_reference = TRUE) }) expect_identical( out[5], "Species [setosa] | 0.00 | | | | " ) }) skip_if_not_installed("withr") withr::with_options( list(easystats_table_width = Inf), test_that("print in pipe, on-the-fly factor", { data(mtcars) out <- capture.output({ mtcars |> lm(mpg ~ cut(wt, c(0, 2.5, 3, 5)), data = _) |> model_parameters(include_reference = TRUE) }) expect_identical( out[4], "cut(wt, c(0, 2.5, 3, 5)) [>0-2.5] | 0.00 | | | | " ) }) ) parameters/tests/testthat/test-printing2.R0000644000176200001440000001013214716604201020431 0ustar liggesusersskip_if_not_installed("withr") skip_if(getRversion() < "4.0.0") withr::with_options( list(parameters_interaction = "*", easystats_table_width = Inf), { lm1 <- lm(Sepal.Length ~ Species, data = iris) lm2 <- lm(Sepal.Length ~ Species + Petal.Length, data = iris) lm3 <- lm(Sepal.Length ~ Species * Petal.Length, data = iris) # Basic ------- out <- compare_parameters(lm1, lm2, lm3) test_that("multiple model", { expect_snapshot(print(out)) }) # templates -------------- out <- compare_parameters(lm1, lm2, lm3, select = "se_p") test_that("templates", { expect_snapshot(print(out)) }) out <- compare_parameters(lm1, lm2, lm3, select = "{estimate}{stars} ({se})") test_that("templates, glue-1", { expect_snapshot(print(out)) }) out <- compare_parameters(lm1, lm2, lm3, select = "{estimate} ({ci_low}, {ci_high}), p={p}{stars}") test_that("templates, glue-2", { expect_snapshot(print(out)) }) out <- compare_parameters(lm1, lm2, lm3, select = "{estimate} ({se})|{p}") test_that("templates, glue-3, separate columnns", { expect_snapshot(print(out)) }) # grouping parameters -------------- lm1 <- lm(Sepal.Length ~ Species + Petal.Length, data = iris) lm2 <- lm(Sepal.Width ~ Species * Petal.Length, data = iris) # remove intercept out <- compare_parameters(lm1, lm2, drop = "^\\(Intercept") test_that("templates, glue-3, separate columnns", { expect_snapshot( print(out, groups = list( Species = c( "Species [versicolor]", "Species [virginica]" ), Interactions = c( "Species [versicolor] * Petal Length", "Species [virginica] * Petal Length" ), Controls = "Petal Length" )) ) expect_snapshot( print(out, groups = list( Species = c( "Species [versicolor]", "Species [virginica]" ), Interactions = c( "Species [versicolor] * Petal Length", "Species [virginica] * Petal Length" ), Controls = "Petal Length" ), select = "{estimate}{stars}") ) expect_snapshot( print(out, groups = list( Species = c( "Species [versicolor]", "Species [virginica]" ), Interactions = c( "Species [versicolor] * Petal Length", "Species [virginica] * Petal Length" ), Controls = "Petal Length" ), select = "{estimate}|{p}") ) }) test_that("combination of different models", { skip_on_cran() skip_if_not_installed("glmmTMB") data("fish") m0 <- glm(count ~ child + camper, data = fish, family = poisson()) m1 <- glmmTMB::glmmTMB( count ~ child + camper + (1 | persons) + (1 | ID), data = fish, family = poisson() ) m2 <- glmmTMB::glmmTMB( count ~ child + camper + zg + (1 | ID), ziformula = ~ child + (1 | persons), data = fish, family = glmmTMB::truncated_poisson() ) cp <- compare_parameters(m0, m1, m2, effects = "all", component = "all") expect_snapshot(print(cp)) }) } ) withr::with_options( list(parameters_interaction = "*"), { lm1 <- lm(Sepal.Length ~ Species + Petal.Length, data = iris) lm2 <- lm(Sepal.Width ~ Species * Petal.Length, data = iris) # remove intercept out <- compare_parameters(lm1, lm2, drop = "^\\(Intercept") test_that("templates, glue-3, separate columnns", { expect_snapshot( print(out, groups = list( Species = c( "Species [versicolor]", "Species [virginica]" ), Interactions = c( "Species [versicolor] * Petal Length", "Species [virginica] * Petal Length" ), Controls = "Petal Length" ), select = "{estimate}|{p}") ) }) } ) parameters/tests/testthat/test-marginaleffects.R0000644000176200001440000002232515057525051021662 0ustar liggesusersskip_if_not_installed("marginaleffects", minimum_version = "0.29.0") skip_if_not_installed("rstanarm") test_that("marginaleffects()", { # Frequentist x <- lm(Sepal.Width ~ Species * Petal.Length, data = iris) model <- marginaleffects::avg_slopes( x, newdata = insight::get_datagrid(x, by = "Species"), variables = "Petal.Length" ) out <- model_parameters(model) expect_identical(nrow(out), 1L) cols <- c( "Parameter", "Comparison", "Coefficient", "SE", "Statistic", "p", "S", "CI", "CI_low", "CI_high" ) expect_true(all(cols %in% colnames(out))) out <- model_parameters(model, exponentiate = TRUE) expect_equal(out$Coefficient, 1.394, tolerance = 1e-3) # Bayesian x <- suppressWarnings(rstanarm::stan_glm( Sepal.Width ~ Species * Petal.Length, data = iris, refresh = 0, iter = 100, chains = 1 )) model <- marginaleffects::avg_slopes( x, newdata = insight::get_datagrid(x, by = "Species"), variables = "Petal.Length" ) expect_identical(nrow(parameters(model)), 1L) # remove redundant columns skip_if_not_installed("mgcv") data(iris) model <- mgcv::gam(Sepal.Width ~ s(Petal.Length, by = Species), data = iris) mfx <- marginaleffects::avg_slopes(model, variables = "Petal.Length") out <- model_parameters(mfx) expect_identical(dim(out), c(1L, 10L)) expect_named( out, c( "Parameter", "Comparison", "Coefficient", "SE", "Statistic", "p", "S", "CI", "CI_low", "CI_high" ) ) mfx <- marginaleffects::avg_slopes(model, variables = "Petal.Length", by = "Species") out <- model_parameters(mfx) expect_identical(dim(out), c(3L, 11L)) expect_named( out, c( "Parameter", "Comparison", "Species", "Coefficient", "SE", "Statistic", "p", "S", "CI", "CI_low", "CI_high" ) ) }) test_that("predictions()", { x <- lm(Sepal.Width ~ Species * Petal.Length, data = iris) p <- marginaleffects::avg_predictions(x, by = "Species") out <- model_parameters(p) expect_identical(nrow(out), 3L) expect_named(out, c( "Predicted", "Species", "SE", "CI", "CI_low", "CI_high", "S", "Statistic", "df", "p" )) out <- model_parameters(p, exponentiate = TRUE) expect_equal(out$Predicted, c(30.81495, 15.95863, 19.57004), tolerance = 1e-4) }) test_that("comparisons()", { data(iris) # Frequentist x <- lm(Sepal.Width ~ Species * Petal.Length, data = iris) m <- marginaleffects::avg_comparisons( x, newdata = insight::get_datagrid(x, by = "Species"), variables = "Petal.Length" ) expect_identical(nrow(model_parameters(m)), 1L) out <- model_parameters(m, exponentiate = TRUE) expect_equal(out$Coefficient, 1.393999, tolerance = 1e-4) # Bayesian x <- suppressWarnings(rstanarm::stan_glm( Sepal.Width ~ Species * Petal.Length, data = iris, refresh = 0, iter = 100, chains = 1 )) m <- marginaleffects::avg_slopes( x, newdata = insight::get_datagrid(x, by = "Species"), variables = "Petal.Length" ) expect_identical(nrow(parameters(m)), 1L) }) test_that("hypotheses()", { data(mtcars) x <- lm(mpg ~ hp + wt, data = mtcars) m <- marginaleffects::hypotheses(x, "hp = wt") expect_identical(nrow(model_parameters(m)), 1L) }) test_that("slopes()", { m <- lm(Sepal.Width ~ Species * Petal.Length, data = iris) x <- marginaleffects::slopes(m, variables = "Petal.Length", newdata = insight::get_datagrid(m, by = "Species") ) out <- model_parameters(x) expect_named( out, c( "rowid", "Parameter", "Comparison", "Coefficient", "SE", "Statistic", "p", "S", "CI", "CI_low", "CI_high", "Species", "Petal.Length", "Predicted" ) ) expect_identical(dim(out), c(3L, 14L)) }) test_that("multiple contrasts: Issue #779", { skip_if(getRversion() < "4.0.0") data(mtcars) mod <- lm(mpg ~ as.factor(gear) * as.factor(cyl), data = mtcars) cmp <- suppressWarnings(marginaleffects::comparisons( mod, variables = c("gear", "cyl"), newdata = insight::get_datagrid(mod, by = c("gear", "cyl")), cross = TRUE )) cmp <- suppressWarnings(model_parameters(cmp)) expect_true("Comparison: gear" %in% colnames(cmp)) expect_true("Comparison: cyl" %in% colnames(cmp)) }) test_that("model_parameters defaults to FALSE: Issue #916", { data(mtcars) mod <- lm(mpg ~ wt, data = mtcars) pred <- marginaleffects::predictions(mod, newdata = marginaleffects::datagrid(wt = c(1, 2))) out1 <- model_parameters(pred) out2 <- model_parameters(pred, exponentiate = FALSE) expect_equal(out1$Predicted, out2$Predicted, tolerance = 1e-4) }) test_that("digits and ci_digits for marginaleffects", { data(mtcars) skip_if(getRversion() < "4.2.0") out <- lm(mpg ~ wt, data = mtcars) |> marginaleffects::hypotheses(hypothesis = "10*wt = 0") |> model_parameters(digits = 1) expect_snapshot(out) }) test_that("preserve columns with same name as reserved words", { data(mtcars) skip_if(getRversion() < "4.2.0") skip_if_not_installed("modelbased") set.seed(1234) x <- rnorm(200) z <- rnorm(200) # quadratic relationship y <- 2 * x + x^2 + 4 * z + rnorm(200) d <- data.frame(x, y, z) model <- lm(y ~ x + z, data = d) pred <- modelbased::estimate_means(model, c("x", "z")) expect_named(pred, c("x", "z", "Mean", "SE", "CI_low", "CI_high", "t", "df")) }) test_that("predictions, bmrs with special response formula", { skip_on_ci() skip_on_cran() skip_if_not_installed("curl") skip_if_offline() skip_if_not_installed("httr2") skip_if_not_installed("brms") skip_if_not_installed("marginaleffects", minimum_version = "0.29.0") m <- insight::download_model("brms_ipw_1") skip_if(is.null(m)) x <- marginaleffects::avg_predictions(m, variables = "treatment", hypothesis = ~pairwise) out <- model_parameters(x) expect_identical(dim(out), c(1L, 10L)) }) test_that("modelbased, tidiers work", { skip_if_not_installed("marginaleffects", minimum_version = "0.29.0") skip_if_not_installed("modelbased", minimum_version = "0.12.0.17") skip_if(getRversion() < "4.5.0") data(penguins) m <- lm(bill_len ~ island * sex + bill_dep + species, data = penguins) out <- modelbased::estimate_contrasts(m, "island", by = "sex", comparison = ratio ~ pairwise) expect_named( out, c("Level1", "Level2", "sex", "Ratio", "SE", "CI_low", "CI_high", "t", "df", "p") ) expect_identical(dim(out), c(6L, 10L)) datagrid <- insight::get_datagrid(m, by = c("island", "sex"), factors = "all") out <- marginaleffects::avg_predictions( model = m, variables = c("island", "sex"), newdata = datagrid, hypothesis = ratio ~ pairwise | sex ) params <- model_parameters(out) expect_named( params, c( "Parameter", "Predicted", "SE", "CI", "CI_low", "CI_high", "S", "Statistic", "df", "p", "sex" ) ) expect_identical(dim(params), c(6L, 11L)) out <- modelbased::estimate_contrasts(m, "island", by = "sex", comparison = ratio ~ inequality) expect_named(out, c("sex", "Mean_Ratio", "SE", "CI_low", "CI_high", "z", "p")) expect_identical(dim(out), c(2L, 7L)) datagrid <- insight::get_datagrid(m, by = c("island", "sex"), factors = "all") out <- marginaleffects::avg_predictions( model = m, variables = c("island", "sex"), newdata = datagrid, hypothesis = ratio ~ pairwise | sex ) out <- marginaleffects::hypotheses(out, hypothesis = ~I(mean(abs(x))) | sex) params <- model_parameters(out) expect_named( params, c("Coefficient", "SE", "Statistic", "p", "S", "CI", "CI_low", "CI_high", "sex") ) expect_identical(dim(params), c(2L, 9L)) }) test_that("predictions, using bayestestR #1063", { # Following test may fail on CI, probably due to scoping issues? # ── Error (test-marginaleffects.R:179:3): predictions, using bayestestR #1063 ─── # Error in ``[.data.frame`(data, random_factors)`: undefined columns selected # Backtrace: # ▆ # 1. ├─insight::get_datagrid(m, by = "Days", include_random = TRUE) at test-marginaleffects.R:179:3 # 2. └─insight:::get_datagrid.default(m, by = "Days", include_random = TRUE) # 3. ├─base::lapply(data[random_factors], as.factor) # 4. ├─data[random_factors] # 5. └─base::`[.data.frame`(data, random_factors) ## TODO: check this test locally skip("TODO: check this test locally, fails on CI, probably due to scoping issues?") skip_on_ci() skip_on_cran() skip_if_not_installed("curl") skip_if_offline() skip_if_not_installed("httr2") skip_if_not_installed("brms") skip_if_not_installed("marginaleffects", minimum_version = "0.29.0") m <- insight::download_model("brms_mixed_3") skip_if(is.null(m)) d <- insight::get_datagrid(m, by = "Days", include_random = TRUE) x <- marginaleffects::avg_predictions(m, newdata = d, by = "Days", allow_new_levels = TRUE) out <- model_parameters(x) expect_named( out, c( "Median", "CI", "CI_low", "CI_high", "pd", "ROPE_CI", "ROPE_low", "ROPE_high", "ROPE_Percentage", "Days" ) ) }) parameters/tests/testthat/test-pca.R0000644000176200001440000000564515030725674017305 0ustar liggesusersskip_if_not_installed("psych") skip_if_not_installed("nFactors") skip_if_not_installed("GPArotation") test_that("principal_components", { x <- principal_components(mtcars[, 1:7], rotation = "varimax") expect_equal( x$RC1, c( -0.836114674884308, 0.766808147590597, 0.85441780762136, 0.548502661888057, -0.889046093964722, 0.931879020871552, -0.030485507571411 ), tolerance = 0.01 ) expect_named(x, c("Variable", "RC1", "RC2", "Complexity", "Uniqueness", "MSA")) expect_identical(dim(predict(x)), c(32L, 2L)) expect_named(predict(x, names = c("A", "B")), c("A", "B")) expect_identical(nrow(predict(x, newdata = mtcars[1:3, 1:7])), 3L) }) test_that("principal_components, n", { data(iris) x <- principal_components(iris[1:4], n = 2) expect_named(x, c("Variable", "PC1", "PC2", "Complexity")) x <- principal_components(iris[1:4], n = 1) expect_named(x, c("Variable", "PC1", "Complexity")) }) test_that("principal_components", { x <- principal_components(mtcars[, 1:7]) expect_equal( x$PC1, c( -0.930866058535747, 0.9578708009312, 0.952846253483008, 0.874493647245971, -0.746868056938478, 0.882509152331738, -0.541093678419456 ), tolerance = 0.01 ) expect_named(x, c("Variable", "PC1", "PC2", "Complexity")) expect_identical(dim(predict(x)), c(32L, 2L)) }) # print ---- test_that("print model_parameters pca", { data(mtcars) expect_snapshot(print(principal_components(mtcars[, 1:4], n = "auto"))) expect_snapshot(print( principal_components(mtcars[, 1:4], n = "auto"), labels = c( "Miles/(US) gallon", "Number of cylinders", "Displacement (cu.in.)", "Gross horsepower" ) )) }) # predict ---------------------- # N.B tests will fail if `GPArotation` package is not installed test_that("predict model_parameters fa", { d <- na.omit(psych::bfi[, 1:25]) model <- psych::fa(d, nfactors = 5) mp <- model_parameters(model, sort = TRUE, threshold = "max") pr <- suppressMessages( predict(mp, names = c("Neuroticism", "Conscientiousness", "Extraversion", "Agreeableness", "Opennness")) ) out <- head(pr, 5) expect_equal( out$Neuroticism, c(-0.22242, 0.1618, 0.61907, -0.11692, -0.17372), tolerance = 0.01 ) expect_equal( out$Opennness, c(-1.6092, -0.17222, 0.23341, -1.06152, -0.66086), tolerance = 0.01 ) expect_identical(nrow(predict(mp, keep_na = FALSE)), 2436L) expect_identical(nrow(predict(mp, newdata = d[1:10, ], keep_na = FALSE)), 10L) expect_named( predict(mp, names = c("A", "B", "C", "D", "E"), keep_na = FALSE), c("A", "B", "C", "D", "E") ) model <- factor_analysis(d, n = 5, rotation = "none") expect_identical(nrow(predict(model, keep_na = FALSE)), 2436L) }) unloadNamespace("GPArotation") parameters/tests/testthat/test-compare_parameters.R0000644000176200001440000002316115053035103022366 0ustar liggesusersskip_if_not_installed("withr") # make sure we have the correct interaction mark for tests withr::with_options( list(parameters_interaction = "*", easystats_table_width = Inf), { data(iris) m1 <- lm(Sepal.Length ~ Species, data = iris) m2 <- lm(Sepal.Length ~ Species * Petal.Length, data = iris) counts <- c(18, 17, 15, 20, 10, 20, 25, 13, 12) outcome <- gl(3, 1, 9) treatment <- gl(3, 3) m3 <- glm(counts ~ outcome + treatment, family = poisson()) x <- compare_parameters(m1, m2, m3) test_that("compare_parameters, default", { expect_identical( colnames(x), c( "Parameter", "Component", "Effects", "Coefficient.m1", "SE.m1", "CI.m1", "CI_low.m1", "CI_high.m1", "t.m1", "df_error.m1", "p.m1", "Coefficient.m2", "SE.m2", "CI.m2", "CI_low.m2", "CI_high.m2", "t.m2", "df_error.m2", "p.m2", "Log-Mean.m3", "SE.m3", "CI.m3", "CI_low.m3", "CI_high.m3", "z.m3", "df_error.m3", "p.m3" ) ) out <- capture.output(x) expect_length(out, 14) out <- format(x, select = "ci") expect_identical(colnames(out), c("Parameter", "m1", "m2", "m3")) expect_identical( out$Parameter, c( "(Intercept)", "Species [versicolor]", "Species [virginica]", "Petal Length", "Species [versicolor] * Petal Length", "Species [virginica] * Petal Length", "outcome [2]", "outcome [3]", "treatment [2]", "treatment [3]", NA, "Observations" ) ) }) x <- compare_parameters(m1, m2, m3, select = "se_p2") test_that("compare_parameters, se_p2", { expect_identical( colnames(x), c( "Parameter", "Component", "Effects", "Coefficient.m1", "SE.m1", "CI.m1", "CI_low.m1", "CI_high.m1", "t.m1", "df_error.m1", "p.m1", "Coefficient.m2", "SE.m2", "CI.m2", "CI_low.m2", "CI_high.m2", "t.m2", "df_error.m2", "p.m2", "Log-Mean.m3", "SE.m3", "CI.m3", "CI_low.m3", "CI_high.m3", "z.m3", "df_error.m3", "p.m3" ) ) out <- capture.output(x) expect_length(out, 14) out <- format(x, select = "se_p2") expect_identical( colnames(out), c( "Parameter", "Coefficient (SE) (m1)", "p (m1)", "Coefficient (SE) (m2)", "p (m2)", "Log-Mean (SE) (m3)", "p (m3)" ) ) expect_identical( out$Parameter, c( "(Intercept)", "Species [versicolor]", "Species [virginica]", "Petal Length", "Species [versicolor] * Petal Length", "Species [virginica] * Petal Length", "outcome [2]", "outcome [3]", "treatment [2]", "treatment [3]", NA, "Observations" ) ) }) data(mtcars) m1 <- lm(mpg ~ wt, data = mtcars) m2 <- glm(vs ~ wt + cyl, data = mtcars, family = "binomial") test_that("compare_parameters, column name with escaping regex characters", { out <- utils::capture.output(compare_parameters(m1, m2, column_names = c("linear model (m1)", "logistic reg. (m2)"))) expect_identical(out[1], "Parameter | linear model (m1) | logistic reg. (m2)") }) data(mtcars) m1 <- lm(mpg ~ hp, mtcars) m2 <- lm(mpg ~ hp, mtcars) test_that("compare_parameters, proper printing for CI=NULL #820", { expect_snapshot(compare_parameters(m1, m2, ci = NULL)) }) skip_on_cran() test_that("compare_parameters, correct random effects", { suppressWarnings(skip_if_not_installed("glmmTMB")) skip_if_not(getRversion() >= "4.0.0") data("fish") m0 <- glm(count ~ child + camper, data = fish, family = poisson()) m1 <- glmmTMB::glmmTMB( count ~ child + camper + (1 | persons) + (1 | ID), data = fish, family = poisson() ) m2 <- glmmTMB::glmmTMB( count ~ child + camper + zg + (1 | ID), ziformula = ~ child + (1 | persons), data = fish, family = glmmTMB::truncated_poisson() ) cp <- compare_parameters(m0, m1, m2, effects = "all", component = "all") expect_snapshot(cp) }) test_that("compare_parameters, print_md-1", { skip_if_not_installed("lme4") skip_if_not_installed("knitr") skip_if_not_installed("tinytable") data(sleepstudy, package = "lme4") set.seed(1234) sleepstudy$grp <- as.factor(sample.int(3, nrow(sleepstudy), replace = TRUE)) lm1 <- lme4::lmer(Reaction ~ Days + grp + (1 | Subject), data = sleepstudy) lm2 <- lme4::lmer(Reaction ~ Days * grp + (1 | Subject), data = sleepstudy) cp <- compare_parameters(lm1, lm2, select = "{estimate} ({ci})|{p}", drop = "^\\(Intercept") out <- print_md(cp, groups = list( Groups = c(2, 3), Interactions = c(4, 5), Controls = 1 )) expect_snapshot(print(out)) }) test_that("compare_parameters, print_md-2", { skip_if_not_installed("lme4") skip_if_not_installed("knitr") skip_if_not_installed("tinytable") data(sleepstudy, package = "lme4") set.seed(1234) sleepstudy$grp <- as.factor(sample.int(3, nrow(sleepstudy), replace = TRUE)) lm1 <- lme4::lmer(Reaction ~ Days + grp + (1 | Subject), data = sleepstudy) lm2 <- lme4::lmer(Reaction ~ Days * grp + (1 | Subject), data = sleepstudy) cp <- compare_parameters(lm1, lm2, select = "{estimate} ({ci})|{p}", drop = "^\\(Intercept") out <- display(cp, groups = list( Groups = c(2, 3), Interactions = c(4, 5), Controls = 1 ), format = "tt" ) expect_snapshot(print(out)) }) test_that("compare_parameters, print_md-3", { skip_if_not_installed("lme4") skip_if_not_installed("knitr") skip_if_not_installed("tinytable") data(sleepstudy, package = "lme4") set.seed(1234) sleepstudy$grp <- as.factor(sample.int(3, nrow(sleepstudy), replace = TRUE)) lm1 <- lme4::lmer(Reaction ~ Days + grp + (1 | Subject), data = sleepstudy) lm2 <- lme4::lmer(Reaction ~ Days * grp + (1 | Subject), data = sleepstudy) cp <- compare_parameters(lm1, lm2, select = "{estimate} ({ci})|{p}", drop = "^\\(Intercept", effects = "all") expect_snapshot(print_md(cp)) expect_snapshot(display(cp, format = "tt")) }) test_that("compare_parameters, print_md-4", { skip_if_not_installed("lme4") skip_if_not_installed("knitr") skip_if_not_installed("tinytable") data(sleepstudy, package = "lme4") set.seed(1234) sleepstudy$grp <- as.factor(sample.int(3, nrow(sleepstudy), replace = TRUE)) lm1 <- lme4::lmer(Reaction ~ Days + grp + (1 | Subject), data = sleepstudy) lm2 <- lme4::lmer(Reaction ~ Days * grp + (1 | Subject), data = sleepstudy) # with reference level cp <- compare_parameters(lm1, lm2, select = "{estimate} ({ci})|{p}", drop = "^\\(Intercept", include_reference = TRUE) out <- print_md(cp, groups = list( Groups = 2:4, Interactions = 5:6, Controls = 1 )) expect_snapshot(print(out)) }) test_that("compare_parameters, print_md-5", { skip_if_not_installed("lme4") skip_if_not_installed("knitr") skip_if_not_installed("tinytable") data(sleepstudy, package = "lme4") set.seed(1234) sleepstudy$grp <- as.factor(sample.int(3, nrow(sleepstudy), replace = TRUE)) lm1 <- lme4::lmer(Reaction ~ Days + grp + (1 | Subject), data = sleepstudy) lm2 <- lme4::lmer(Reaction ~ Days * grp + (1 | Subject), data = sleepstudy) # with reference level cp <- compare_parameters(lm1, lm2, drop = "^\\(Intercept", include_reference = TRUE) out <- print_md(cp, groups = list( Groups = 2:4, Interactions = 5:6, Controls = 1 )) expect_snapshot(print(out)) # error cp <- compare_parameters(lm1, lm2, select = "{estimate} ({ci})|{p}", drop = "^\\(Intercept") expect_error( print_md(cp, groups = list( Groups = c(2, 3), Interactions = c(4, 5), Controls = "XDays" )), regex = "Some group indices" ) expect_error( print_md(cp, groups = list( Groups = 1:2, Interactions = 4:5, Controls = 10 )), regex = "Some group indices" ) # output identical for both calls cp1 <- compare_parameters(lm1, lm2, select = "{estimate} ({ci})|{p}", drop = "^\\(Intercept") out1 <- capture.output(print_md(cp1, groups = list( Groups = 2:3, Interactions = 4:5, Controls = 1 ))) cp2 <- compare_parameters( lm1, lm2, select = "{estimate} ({ci})|{p}", drop = "^\\(Intercept", groups = list( Groups = c(2, 3), Interactions = c(4, 5), Controls = 1 ) ) out2 <- capture.output(print_md(cp2)) expect_identical(out1, out2) }) } ) skip_on_cran() skip_if_not_installed("blme") skip_if_not_installed("glmmTMB") skip_if_not_installed("lme4") test_that("compare_parameters, works with blmer and glmmTMB", { data(sleepstudy, package = "lme4") control <- lme4::lmerControl(check.conv.grad = "ignore") fm1 <- blme::blmer(Reaction ~ Days + (0 + Days | Subject), sleepstudy, control = control, cov.prior = gamma ) fm2 <- glmmTMB::glmmTMB(Reaction ~ Days + (1 + Days | Subject), sleepstudy) expect_silent(compare_parameters(fm1, fm2)) }) parameters/tests/testthat/test-Hmisc.R0000644000176200001440000000210215111301621017544 0ustar liggesusersskip_on_cran() test_that("issue 697", { skip_if_not_installed("Hmisc") skip_if_not_installed("rms") # for some reason, Hmisc::transcan() doesn't find na.retain (which is an internal # function in Hmisc) na.retain <<- Hmisc:::na.retain set.seed(1) n <- 100 df <- data.frame( y = round(runif(n), 2), x1 = sample(c(-1, 0, 1), n, TRUE), x2 = sample(c(-1, 0, 1), n, TRUE) ) df$x1[c(0, 1, 2)] <- NA imputer <- suppressWarnings(Hmisc::transcan( ~ x1 + x2, data = df, imputed = TRUE, n.impute = 2, pr = FALSE, pl = FALSE )) suppressWarnings( mod <- Hmisc::fit.mult.impute( y ~ x1 + x2, fitter = rms::orm, xtrans = imputer, data = df, pr = FALSE ) ) expect_s3_class(parameters(mod), "parameters_model") expect_s3_class(standard_error(mod), "data.frame") expect_s3_class(p_value(mod), "data.frame") expect_identical(nrow(parameters(mod)), 3L) expect_identical(nrow(standard_error(mod)), 3L) expect_identical(nrow(p_value(mod)), 3L) }) parameters/tests/testthat/test-helper.R0000644000176200001440000000076214716604201020004 0ustar liggesusersskip_on_cran() skip_if_not_installed("withr") withr::with_options( list(easystats_errors = TRUE), test_that(".safe works with options", { expect_error(parameters:::.safe(mean(fd)), regex = "object 'fd' not found") expect_identical(parameters:::.safe(mean(fd), 1L), 1L) expect_identical(parameters:::.safe(mean(c(1, 2, 3))), 2) }) ) test_that(".safe works", { expect_null(parameters:::.safe(mean(fd))) expect_identical(parameters:::.safe(mean(c(1, 2, 3))), 2) }) parameters/tests/testthat/test-random_effects_ci.R0000644000176200001440000003540215111301621022144 0ustar liggesusersskip_on_cran() skip_on_os("mac") skip_if_not_installed("lme4") data(sleepstudy, package = "lme4") data(cake, package = "lme4") set.seed(123) sleepstudy$Months <- sample.int(4, nrow(sleepstudy), TRUE) m1 <- suppressMessages(lme4::lmer( angle ~ temperature + (temperature | recipe) + (temperature | replicate), data = cake )) m2 <- suppressMessages(lme4::lmer(Reaction ~ Days + (Days | Subject), data = sleepstudy)) m3 <- suppressMessages(lme4::lmer( angle ~ temperature + (temperature | recipe), data = cake )) m4 <- suppressMessages(lme4::lmer( angle ~ temperature + (temperature | replicate), data = cake )) m5 <- suppressMessages(lme4::lmer( Reaction ~ Days + (Days + Months | Subject), data = sleepstudy )) ## TODO also check messages for profiled CI expect_message( { mp1 <- model_parameters(m1, ci_random = TRUE) }, regex = "meaningful" ) mp2 <- model_parameters(m2, ci_random = TRUE) expect_message( { mp3 <- model_parameters(m3, ci_random = TRUE) }, regex = "meaningful" ) expect_message( { mp4 <- model_parameters(m4, ci_random = TRUE) }, regex = "meaningful" ) expect_message( { mp5 <- model_parameters(m5, ci_random = TRUE) }, regex = "meaningful" ) # model 1 --------------------- test_that("random effects CIs, two slopes, categorical", { expect_equal( mp1$CI_low, c( 28.75568, 4.97893, -1.95002, -2.69995, -3.62201, -2.69102, 4.28558, 0.21474, 0.40062, 0.10169, 0.04953, 1e-05, 0.55398, 0, 2e-05, 0.6333, 1.09851, 0.00944, -0.65406, -0.69103, -1, -0.95271, -0.90617, -1, -1, -1, -1, -1, -1, -0.99802, -1, -0.75274, -0.99836, -1, -0.96895, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 4.07985 ), tolerance = 1e-3, ignore_attr = TRUE ) expect_identical( mp1$Parameter, c( "(Intercept)", "temperature.L", "temperature.Q", "temperature.C", "temperature^4", "temperature^5", "SD (Intercept)", "SD (Intercept)", "SD (temperature.L)", "SD (temperature.Q)", "SD (temperature.C)", "SD (temperature^4)", "SD (temperature^5)", "SD (temperature.L)", "SD (temperature.Q)", "SD (temperature.C)", "SD (temperature^4)", "SD (temperature^5)", "Cor (Intercept~temperature.L)", "Cor (Intercept~temperature.Q)", "Cor (Intercept~temperature.C)", "Cor (Intercept~temperature^4)", "Cor (Intercept~temperature^5)", "Cor (Intercept~temperature.L)", "Cor (Intercept~temperature.Q)", "Cor (Intercept~temperature.C)", "Cor (Intercept~temperature^4)", "Cor (Intercept~temperature^5)", "Cor (temperature.L~temperature.Q)", "Cor (temperature.L~temperature.C)", "Cor (temperature.L~temperature^4)", "Cor (temperature.L~temperature^5)", "Cor (temperature.Q~temperature.C)", "Cor (temperature.Q~temperature^4)", "Cor (temperature.Q~temperature^5)", "Cor (temperature.C~temperature^4)", "Cor (temperature.C~temperature^5)", "Cor (temperature^4~temperature^5)", "Cor (temperature.L~temperature.Q)", "Cor (temperature.L~temperature.C)", "Cor (temperature.L~temperature^4)", "Cor (temperature.L~temperature^5)", "Cor (temperature.Q~temperature.C)", "Cor (temperature.Q~temperature^4)", "Cor (temperature.Q~temperature^5)", "Cor (temperature.C~temperature^4)", "Cor (temperature.C~temperature^5)", "Cor (temperature^4~temperature^5)", "SD (Observations)" ) ) expect_identical( mp1$Group, c( "", "", "", "", "", "", "replicate", "recipe", "replicate", "replicate", "replicate", "replicate", "replicate", "recipe", "recipe", "recipe", "recipe", "recipe", "replicate", "replicate", "replicate", "replicate", "replicate", "recipe", "recipe", "recipe", "recipe", "recipe", "replicate", "replicate", "replicate", "replicate", "replicate", "replicate", "replicate", "replicate", "replicate", "replicate", "recipe", "recipe", "recipe", "recipe", "recipe", "recipe", "recipe", "recipe", "recipe", "recipe", "Residual" ) ) }) # model 2 --------------------- test_that("random effects CIs, simple slope", { expect_equal( mp2$CI_low, c(237.93546, 7.41637, 15.5817, 3.91828, -0.50907, 22.80044), tolerance = 1e-3, ignore_attr = TRUE ) expect_identical( mp2$Parameter, c( "(Intercept)", "Days", "SD (Intercept)", "SD (Days)", "Cor (Intercept~Days)", "SD (Observations)" ) ) expect_identical(mp2$Group, c("", "", "Subject", "Subject", "Subject", "Residual")) }) # model 3 --------------------- test_that("random effects CIs, categorical slope-1", { expect_equal( mp3$CI_low[14:28], c(-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 7.09933), tolerance = 1e-2, ignore_attr = TRUE ) expect_equal( mp3$CI_low[1:12], c( 30.91139, 4.33247, -2.6798, -3.20703, -4.07681, -3.27237, 0.06301, 0, 0, 0.1192, 0.32213, 0 ), tolerance = 1e-2, ignore_attr = TRUE ) expect_identical( mp3$Parameter, c( "(Intercept)", "temperature.L", "temperature.Q", "temperature.C", "temperature^4", "temperature^5", "SD (Intercept)", "SD (temperature.L)", "SD (temperature.Q)", "SD (temperature.C)", "SD (temperature^4)", "SD (temperature^5)", "Cor (Intercept~temperature.L)", "Cor (Intercept~temperature.Q)", "Cor (Intercept~temperature.C)", "Cor (Intercept~temperature^4)", "Cor (Intercept~temperature^5)", "Cor (temperature.L~temperature.Q)", "Cor (temperature.L~temperature.C)", "Cor (temperature.L~temperature^4)", "Cor (temperature.L~temperature^5)", "Cor (temperature.Q~temperature.C)", "Cor (temperature.Q~temperature^4)", "Cor (temperature.Q~temperature^5)", "Cor (temperature.C~temperature^4)", "Cor (temperature.C~temperature^5)", "Cor (temperature^4~temperature^5)", "SD (Observations)" ) ) expect_identical( mp3$Group, c( "", "", "", "", "", "", "recipe", "recipe", "recipe", "recipe", "recipe", "recipe", "recipe", "recipe", "recipe", "recipe", "recipe", "recipe", "recipe", "recipe", "recipe", "recipe", "recipe", "recipe", "recipe", "recipe", "recipe", "Residual" ) ) }) # model 4 --------------------- test_that("random effects CIs, categorical slope-2", { expect_equal( mp4$CI_low, c( 28.88523, 4.96796, -1.93239, -1.98597, -2.68858, -2.5524, 4.27899, 0.35378, 0.08109, 0.03419, 0, 0.49982, -0.68893, -0.71984, -1, -0.96725, -0.92158, -1, -0.99894, -1, -0.80378, -0.99924, -1, -0.9778, -1, -1, -1, 4.21143 ), tolerance = 1e-3, ignore_attr = TRUE ) expect_identical( mp4$Parameter, c( "(Intercept)", "temperature.L", "temperature.Q", "temperature.C", "temperature^4", "temperature^5", "SD (Intercept)", "SD (temperature.L)", "SD (temperature.Q)", "SD (temperature.C)", "SD (temperature^4)", "SD (temperature^5)", "Cor (Intercept~temperature.L)", "Cor (Intercept~temperature.Q)", "Cor (Intercept~temperature.C)", "Cor (Intercept~temperature^4)", "Cor (Intercept~temperature^5)", "Cor (temperature.L~temperature.Q)", "Cor (temperature.L~temperature.C)", "Cor (temperature.L~temperature^4)", "Cor (temperature.L~temperature^5)", "Cor (temperature.Q~temperature.C)", "Cor (temperature.Q~temperature^4)", "Cor (temperature.Q~temperature^5)", "Cor (temperature.C~temperature^4)", "Cor (temperature.C~temperature^5)", "Cor (temperature^4~temperature^5)", "SD (Observations)" ) ) expect_identical( mp4$Group, c( "", "", "", "", "", "", "replicate", "replicate", "replicate", "replicate", "replicate", "replicate", "replicate", "replicate", "replicate", "replicate", "replicate", "replicate", "replicate", "replicate", "replicate", "replicate", "replicate", "replicate", "replicate", "replicate", "replicate", "Residual" ) ) }) # model 5 --------------------- test_that("random effects CIs, double slope", { expect_equal( mp5$CI_low, c(237.99863, 7.4022, 12.63814, 0.58664, 0, -0.58599, -1, -1, 22.65226), tolerance = 1e-3, ignore_attr = TRUE ) expect_identical( mp5$Parameter, c( "(Intercept)", "Days", "SD (Intercept)", "SD (Days)", "SD (Months)", "Cor (Intercept~Days)", "Cor (Intercept~Months)", "Cor (Days~Months)", "SD (Observations)" ) ) expect_identical( mp5$Group, c( "", "", "Subject", "Subject", "Subject", "Subject", "Subject", "Subject", "Residual" ) ) }) # no random intercept -------------------------- test_that("random effects CIs, simple slope", { data(sleepstudy, package = "lme4") set.seed(123) sleepstudy$Months <- sample.int(4, nrow(sleepstudy), TRUE) m2 <- lme4::lmer(Reaction ~ Days + (0 + Days | Subject), data = sleepstudy) m5 <- lme4::lmer(Reaction ~ Days + (0 + Days + Months | Subject), data = sleepstudy) mp2 <- model_parameters(m2) mp5 <- model_parameters(m5) expect_equal( mp2$CI_low, c(243.47155, 6.77765, 5.09041, 26.01525), tolerance = 1e-3, ignore_attr = TRUE ) expect_identical( mp2$Parameter, c("(Intercept)", "Days", "SD (Days)", "SD (Observations)") ) expect_equal( mp5$CI_low, c(241.61021, 7.43503, 4.11446, 2.69857, -0.40595, 24.632), tolerance = 1e-3, ignore_attr = TRUE ) expect_identical( mp5$Parameter, c( "(Intercept)", "Days", "SD (Days)", "SD (Months)", "Cor (Days~Months)", "SD (Observations)" ) ) }) # poly random slope -------------------------- test_that("random effects CIs, poly slope", { data(cake, package = "lme4") suppressMessages({ m <- lme4::lmer( angle ~ poly(temp, 2) + (poly(temp, 2) | replicate) + (1 | recipe), data = cake ) }) mp <- model_parameters(m, ci_random = TRUE) expect_equal( mp$CI_low, c( 28.7884, 33.56318, -12.84259, 4.27435, 0.16222, 7.78988, 0.87668, -0.8172, -1, -1, 4.32855 ), tolerance = 1e-3, ignore_attr = TRUE ) expect_identical( mp$Parameter, c( "(Intercept)", "poly(temp, 2)1", "poly(temp, 2)2", "SD (Intercept)", "SD (Intercept)", "SD (poly(temp, 2)1)", "SD (poly(temp, 2)2)", "Cor (Intercept~poly(temp, 2)1)", "Cor (Intercept~poly(temp, 2)2)", "Cor (poly(temp, 2)1~poly(temp, 2)2)", "SD (Observations)" ) ) }) # poly and categorical random slope -------------------------- test_that("random effects CIs, poly categorical slope", { ## NOTE check back every now and then and see if tests still work skip("works interactively") m <- lme4::lmer( angle ~ poly(temp, 2) + (poly(temp, 2) | replicate) + (temperature | recipe), data = cake ) mp <- model_parameters(m, effects = "random") expect_equal( mp$CI_low, c( 4.27846, 0.22005, 8.22659, 1.17579, 0, 5e-05, 0.37736, 1.24258, 0, -0.77207, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, 4.22056 ), tolerance = 1e-3, ignore_attr = TRUE ) expect_identical( mp$Parameter, c( "SD (Intercept)", "SD (Intercept)", "SD (poly(temp, 2)1)", "SD (poly(temp, 2)2)", "SD (temperature.L)", "SD (temperature.Q)", "SD (temperature.C)", "SD (temperature^4)", "SD (temperature^5)", "Cor (Intercept~poly(temp, 2)1)", "Cor (Intercept~poly(temp, 2)2)", "Cor (Intercept~temperature.L)", "Cor (Intercept~temperature.Q)", "Cor (Intercept~temperature.C)", "Cor (Intercept~temperature^4)", "Cor (Intercept~temperature^5)", "Cor (poly(temp, 2)1~poly(temp, 2)2)", "Cor (temperature.L~temperature.Q)", "Cor (temperature.L~temperature.C)", "Cor (temperature.L~temperature^4)", "Cor (temperature.L~temperature^5)", "Cor (temperature.Q~temperature.C)", "Cor (temperature.Q~temperature^4)", "Cor (temperature.Q~temperature^5)", "Cor (temperature.C~temperature^4)", "Cor (temperature.C~temperature^5)", "Cor (temperature^4~temperature^5)", "SD (Observations)" ) ) expect_identical( mp$Group, c( "replicate", "recipe", "replicate", "replicate", "recipe", "recipe", "recipe", "recipe", "recipe", "replicate", "replicate", "recipe", "recipe", "recipe", "recipe", "recipe", "replicate", "recipe", "recipe", "recipe", "recipe", "recipe", "recipe", "recipe", "recipe", "recipe", "recipe", "Residual" ) ) }) parameters/tests/testthat/test-model_parameters.aov.R0000644000176200001440000001202415057525051022632 0ustar liggesusersskip_on_cran() iris$Cat1 <- rep_len(c("X", "X", "Y"), nrow(iris)) iris$Cat2 <- rep_len(c("A", "B"), nrow(iris)) # aov ---------------------------------- test_that("model_parameters.aov", { skip_if_not_installed("effectsize") model <- aov(Sepal.Width ~ Species, data = iris) mp <- suppressMessages(model_parameters(model, es_type = c("omega", "eta", "epsilon"))) expect_identical(mp$Parameter, c("Species", "Residuals")) expect_equal(mp$Sum_Squares, c(11.34493, 16.962), tolerance = 1e-3) }) test_that("model_parameters.aov", { skip_if_not_installed("effectsize") model <- aov(Sepal.Width ~ Species, data = iris) mp <- suppressMessages(model_parameters(model, es_type = c("omega", "eta", "epsilon"))) expect_identical(sum(mp$df), 149) expect_named(mp, c( "Parameter", "Sum_Squares", "df", "Mean_Square", "F", "p", "Omega2", "Eta2", "Epsilon2" )) model <- aov(Sepal.Length ~ Species * Cat1 * Cat2, data = iris) expect_identical(sum(model_parameters(model, es_type = c("omega", "eta", "epsilon"), verbose = FALSE)$df), 149) model <- aov(Sepal.Length ~ Species / Cat1 * Cat2, data = iris) expect_identical(sum(model_parameters(model, verbose = FALSE)$df), 149) }) test_that("model_parameters.anova", { skip_if_not_installed("lme4") model <- anova(lm(Sepal.Width ~ Species, data = iris)) expect_identical(sum(model_parameters(model)$df), 149L) model <- anova(lm(Sepal.Length ~ Species * Cat1 * Cat2, data = iris)) expect_identical(sum(model_parameters(model)$df), 149L) model <- anova(lme4::lmer(wt ~ 1 + (1 | gear), data = mtcars)) expect_identical(nrow(model_parameters(model)), 0L) model <- anova(lme4::lmer(wt ~ cyl + (1 | gear), data = mtcars)) expect_identical(sum(model_parameters(model)$df), 1L) model <- anova(lme4::lmer(wt ~ drat + cyl + (1 | gear), data = mtcars)) expect_identical(sum(model_parameters(model)$df), 2L) model <- anova(lme4::lmer(wt ~ drat * cyl + (1 | gear), data = mtcars)) expect_identical(sum(model_parameters(model, verbose = FALSE)$df), 3L) model <- anova(lme4::lmer(wt ~ drat / cyl + (1 | gear), data = mtcars)) expect_identical(sum(model_parameters(model, verbose = FALSE)$df), 2L) }) test_that("model_parameters.anova", { skip_if_not_installed("curl") skip_if_offline() skip_if_not_installed("httr2") model <- insight::download_model("anova_3") skip_if(is.null(model)) out <- model_parameters(model, verbose = FALSE) expect_identical(sum(out$df), 149L) expect_identical(dim(out), c(6L, 6L)) model <- insight::download_model("anova_4") skip_if(is.null(model)) expect_identical(sum(model_parameters(model, verbose = FALSE)$df, na.rm = TRUE), 2) model <- insight::download_model("anova_lmerMod_5") skip_if(is.null(model)) expect_identical(sum(model_parameters(model, verbose = FALSE)$df), 1L) model <- insight::download_model("anova_lmerMod_6") skip_if(is.null(model)) expect_identical(sum(model_parameters(model, verbose = FALSE)$df), 12) }) test_that("model_parameters.anova", { model <- aov(wt ~ cyl + Error(gear), data = mtcars) expect_identical(sum(model_parameters(model, verbose = FALSE)$df), 31) model <- aov(Sepal.Length ~ Species * Cat1 + Error(Cat2), data = iris) expect_identical(sum(model_parameters(model, verbose = FALSE)$df), 149) model <- aov(Sepal.Length ~ Species / Cat1 + Error(Cat2), data = iris) expect_identical(sum(model_parameters(model, verbose = FALSE)$df), 149) }) test_that("model_parameters.aov - table_wide", { skip_if_not_installed("effectsize") skip_if_not_installed("datawizard") data("iris") # can't use the pipe yet :( iris_long <- datawizard::data_modify(iris, id = seq_along(Species)) iris_long <- datawizard::data_to_long(iris_long, select = colnames(iris)[1:4]) iris_long <- datawizard::data_separate(iris_long, select = "name", separator = "\\.", new_columns = c("attribute", "measure") ) mod1 <- stats::aov( formula = value ~ attribute * measure + Error(id), data = iris_long ) mod2 <- stats::aov( formula = value ~ attribute * measure + Error(id / (attribute * measure)), data = iris_long ) mp1 <- model_parameters(mod1, eta_squared = "partial", ci = 0.95, table_wide = TRUE) mp2 <- model_parameters(mod2, eta_squared = "partial", ci = 0.95, table_wide = TRUE) expect_identical(nrow(mp1), 3L) expect_identical(nrow(mp2), 6L) mod1 <- aov(yield ~ N * P * K + Error(block), data = npk) out1 <- model_parameters(mod1, table_wide = FALSE) out2 <- model_parameters(mod1, table_wide = TRUE) idx <- which(out1$Parameter == "Residuals") expect_true(all(out2$Sum_Squares_Error %in% out1$Sum_Squares[idx])) expect_true(all(out1$Sum_Squares[idx] %in% out2$Sum_Squares_Error)) expect_true(all(out2$Mean_Square_Error %in% out1$Mean_Square[idx])) expect_true(all(out1$Mean_Square[idx] %in% out2$Mean_Square_Error)) expect_true(all(out2$df_error %in% out1$df[idx])) expect_true(all(out1$df[idx] %in% out2$df_error)) }) parameters/tests/testthat/test-dominance_analysis.R0000644000176200001440000000410714726272305022371 0ustar liggesusersskip_if_not_installed("performance") skip_if_not_installed("domir") skip_if_not_installed("datawizard") DA_test_model <- lm(mpg ~ vs + cyl + carb, data = mtcars) DA_performance <- dominance_analysis(DA_test_model) DA_domir <- domir::domin(mpg ~ vs + cyl + carb, lm, list(performance::r2, "R2"), data = mtcars) test_that("dominance_analysis$general_dominance", { gnrl_domir <- c(NA, DA_domir$General_Dominance) names(gnrl_domir) <- NULL gnrl_da <- DA_performance$General$General_Dominance expect_identical(gnrl_domir, gnrl_da) }) test_that("dominance_analysis$conditional_dominance", { cdl_domir <- DA_domir$Conditional_Dominance dimnames(cdl_domir) <- c(NULL, NULL) cdl_da <- as.matrix(DA_performance$Conditional[, -1]) dimnames(cdl_da) <- c(NULL, NULL) expect_identical(cdl_domir, cdl_da) }) test_that("dominance_analysis$complete_dominance", { cpt_domir <- DA_domir$Complete_Dominance dimnames(cpt_domir) <- list(NULL, NULL) cpt_da <- t(DA_performance$Complete[, -1]) dimnames(cpt_da) <- list(NULL, NULL) expect_identical(cpt_domir, cpt_da) }) DA_performance2 <- dominance_analysis( DA_test_model, all = ~vs, sets = c(~carb), complete = FALSE, conditional = FALSE ) DA_domir2 <- domir::domin( mpg ~ cyl, lm, list(performance::r2, "R2"), all = "vs", sets = list("carb"), data = mtcars, complete = FALSE, conditional = FALSE ) test_that("dominance_analysis$general_dominance with sets/all", { domir_all_sub_r2 <- DA_domir2$Fit_Statistic_All_Subsets names(domir_all_sub_r2) <- NULL expect_identical( domir_all_sub_r2, with(DA_performance2$General, General_Dominance[Subset == "all"]) ) gnrl_domir2 <- DA_domir2$General_Dominance names(gnrl_domir2) <- NULL gnrl_da2 <- aggregate( DA_performance2$General$General_Dominance, list(DA_performance2$General$Subset), mean ) gnrl_da2 <- gnrl_da2[which(gnrl_da2$Group.1 %in% c("cyl", "set1")), ] gnrl_da2 <- gnrl_da2$x names(gnrl_da2) <- NULL expect_identical(gnrl_domir2, gnrl_da2) }) parameters/tests/testthat/test-estimatr.R0000644000176200001440000000065514413515226020360 0ustar liggesuserstest_that("multivariate used to break: Insight Issue #618", { skip_if_not_installed("estimatr") # multivariate mod1 <- estimatr::lm_robust(cbind(mpg, qsec) ~ cyl + disp, data = mtcars) m <- model_parameters(mod1) expect_s3_class(m, "parameters_model") # univariate mod2 <- estimatr::lm_robust(mpg ~ cyl + disp, data = mtcars) m <- model_parameters(mod2) expect_s3_class(m, "parameters_model") }) parameters/tests/testthat/test-model_parameters.hurdle.R0000644000176200001440000000136614413515226023335 0ustar liggesuserstest_that("model_parameters.hurdle", { skip_if_not_installed("pscl") set.seed(123) data("bioChemists", package = "pscl") model <- pscl::hurdle(formula = art ~ ., data = bioChemists, zero = "geometric") params <- model_parameters(model) expect_equal( params$SE, c( 0.12246, 0.06522, 0.07283, 0.04845, 0.0313, 0.00228, 0.29552, 0.15911, 0.18082, 0.11113, 0.07956, 0.01302 ), tolerance = 1e-3 ) expect_equal( params$Coefficient, unname(coef(model)), tolerance = 1e-3 ) expect_equal( params$z, unname(c(coef(summary(model))[[1]][, 3], coef(summary(model))[[2]][, 3])), tolerance = 1e-3 ) }) parameters/tests/testthat/test-model_parameters.afex_aov.R0000644000176200001440000000574715013566107023652 0ustar liggesuserstest_that("afex_aov", { skip_if_not_installed("afex") data(obk.long, package = "afex") m_between <- suppressMessages(suppressWarnings( afex::aov_car(value ~ treatment * gender + Error(id), data = obk.long) )) m_within <- suppressMessages(suppressWarnings( afex::aov_car(value ~ Error(id / (phase * hour)), data = obk.long) )) mp1 <- model_parameters(m_between, verbose = FALSE) mp2 <- model_parameters(m_within, verbose = FALSE) expect_equal(c(nrow(mp1), ncol(mp1)), c(4, 7)) expect_equal(mp1$Sum_Squares, c(11.98202, 5.56322, 8.68275, 15.2037), tolerance = 1e-3) expect_equal(c(nrow(mp2), ncol(mp2)), c(3, 9)) expect_equal(mp2$Sum_Squares, c(167.5, 106.29167, 11.08333), tolerance = 1e-3) expect_named( mp1, c("Parameter", "Sum_Squares", "df", "Mean_Square", "F", "p", "Method") ) expect_named( mp2, c( "Parameter", "Sum_Squares", "Sum_Squares_Error", "df", "df_error", "Mean_Square", "F", "p", "Method" ) ) # include intercept out <- model_parameters(m_between, verbose = FALSE, include_intercept = TRUE) expect_identical(dim(out), c(5L, 7L)) expect_identical( out$Parameter, c("(Intercept)", "treatment", "gender", "treatment:gender", "Residuals") ) }) test_that("afex_aov, p-adjustement", { skip_if_not_installed("afex") data(laptop_urry, package = "afex") afx <- afex::aov_4( overall ~ condition * talk + (1 | pid), data = laptop_urry, anova_table = list(p_adjust_method = "bonferroni") ) out1 <- model_parameters(afx, ci = 0.95) out2 <- model_parameters(afx, ci = 0.95, p_adjust = "bonferroni") expect_identical(dim(out1), c(4L, 7L)) expect_equal(out1$Sum_Squares, c(115.01087, 6703.72241, 1944.0391, 29101.23396), tolerance = 1e-3) expect_named( out1, c("Parameter", "Sum_Squares", "df", "Mean_Square", "F", "p", "Method") ) expect_equal(out1$p, c(1, 0, 0.2157, NA), tolerance = 1e-3) expect_equal(out2$p, c(1, 0, 0.2157, NA), tolerance = 1e-3) afx <- afex::aov_4( overall ~ condition * talk + (1 | pid), data = laptop_urry ) out3 <- model_parameters(afx, ci = 0.95) out4 <- model_parameters(afx, ci = 0.95, p_adjust = "bonferroni") expect_equal(out3$p, c(0.4714, 0, 0.0719, NA), tolerance = 1e-3) expect_equal(out4$p, c(1, 0, 0.2157, NA), tolerance = 1e-3) }) test_that("afex_aov_ez, p-adjustement", { skip_if_not_installed("afex") data(obk.long, package = "afex") a2 <- afex::aov_ez( "id", "value", data = obk.long, between = c("treatment", "gender"), within = c("phase", "hour"), observed = "gender", anova_table = list(p_adjust_method = "fdr") ) out <- model_parameters(a2) expect_equal(a2$anova_table$`Pr(>F)`, out$p, tolerance = 1e-4) expect_identical(dim(out), c(15L, 9L)) expect_named( out, c( "Parameter", "Sum_Squares", "Sum_Squares_Error", "df", "df_error", "Mean_Square", "F", "p", "Method" ) ) }) parameters/tests/testthat/test-p_function.R0000644000176200001440000000434014716604201020665 0ustar liggesusersdata(iris) model <- lm(Sepal.Length ~ Species, data = iris) test_that("p_function ci-levels", { out <- p_function(model) expect_equal( out$CI_low, c( 4.982759, 0.897132, 1.549132, 4.956774, 0.860384, 1.512384, 4.92192, 0.811093, 1.463093, 4.862126, 0.726531, 1.378531 ), tolerance = 1e-4 ) expect_identical(dim(out), c(12L, 5L)) expect_equal( out$CI, c(0.25, 0.25, 0.25, 0.5, 0.5, 0.5, 0.75, 0.75, 0.75, 0.95, 0.95, 0.95), tolerance = 1e-4 ) ref <- ci(model) expect_equal( out$CI_low[out$CI == 0.95], ref$CI_low, tolerance = 1e-4 ) ref <- ci(model, ci = 0.5) expect_equal( out$CI_low[out$CI == 0.5], ref$CI_low, tolerance = 1e-4 ) out <- p_function(model, ci_levels = c(0.3, 0.6, 0.9)) expect_equal( out$CI, c(0.3, 0.3, 0.3, 0.6, 0.6, 0.6, 0.9, 0.9, 0.9), tolerance = 1e-4 ) skip_if_not_installed("sandwich") out <- p_function(model, vcov = "HC3") expect_equal( out$CI_low, c( 4.989925, 0.901495, 1.548843, 4.971951, 0.869624, 1.511772, 4.947844, 0.826875, 1.462047, 4.906485, 0.753538, 1.376742 ), tolerance = 1e-4 ) }) test_that("p_function keep-drop", { out <- p_function(model, keep = "Speciesversicolor") expect_identical(dim(out), c(4L, 5L)) expect_equal( out$CI, c(0.25, 0.5, 0.75, 0.95), tolerance = 1e-4 ) expect_identical( out$Parameter, c( "Speciesversicolor", "Speciesversicolor", "Speciesversicolor", "Speciesversicolor" ) ) }) test_that("p_function print", { out <- p_function(model) ref <- capture.output(print(out)) expect_identical( ref, c( "Consonance Function", "", "Parameter | 25% CI | 50% CI | 75% CI | 95% CI", "--------------------------------------------------------------------------------", "(Intercept) | [4.98, 5.03] | [4.96, 5.06] | [4.92, 5.09] | [4.86, 5.15]", "Species [versicolor] | [0.90, 0.96] | [0.86, 1.00] | [0.81, 1.05] | [0.73, 1.13]", "Species [virginica] | [1.55, 1.61] | [1.51, 1.65] | [1.46, 1.70] | [1.38, 1.79]" ) ) }) parameters/tests/testthat/test-nestedLogit.R0000644000176200001440000000501214433114017020774 0ustar liggesusersskip_if_not_installed("nestedLogit") skip_if_not_installed("broom") skip_if_not_installed("car") skip_if_not_installed("carData") test_that("model_parameters.nestedLogit", { data(Womenlf, package = "carData") comparisons <- nestedLogit::logits( work = nestedLogit::dichotomy("not.work", working = c("parttime", "fulltime")), full = nestedLogit::dichotomy("parttime", "fulltime") ) mnl1 <- nestedLogit::nestedLogit( partic ~ hincome + children, dichotomies = comparisons, data = Womenlf ) out <- model_parameters(mnl1) expect_identical( out$Parameter, c( "(Intercept)", "hincome", "childrenpresent", "(Intercept)", "hincome", "childrenpresent" ) ) expect_equal( out$Coefficient, unname(c(coef(mnl1)[, 1], coef(mnl1)[, 2])), ignore_attr = TRUE, tolerance = 1e-3 ) expect_equal( out$SE, unname(do.call(rbind, lapply(summary(mnl1), coef))[, "Std. Error"]), ignore_attr = TRUE, tolerance = 1e-3 ) expect_equal( out$CI_low, c(0.60591, -0.08226, -2.16144, 2.11087, -0.18921, -3.80274), ignore_attr = TRUE, tolerance = 1e-3 ) out <- model_parameters(mnl1, ci_method = "wald") expect_equal( out$CI_low, c(0.58367, -0.08108, -2.14847, 1.97427, -0.184, -3.71194), ignore_attr = TRUE, tolerance = 1e-3 ) out <- model_parameters(mnl1, exponentiate = TRUE) expect_equal( out$Coefficient, exp(unname(c(coef(mnl1)[, 1], coef(mnl1)[, 2]))), ignore_attr = TRUE, tolerance = 1e-3 ) out <- model_parameters(mnl1, vcov = "HC3") expect_equal( out$SE, c(0.41738, 0.02256, 0.29565, 0.76467, 0.0373, 0.56165), ignore_attr = TRUE, tolerance = 1e-3 ) out <- model_parameters(mnl1, component = "work") expect_identical(nrow(out), 3L) }) test_that("simulate_parameters.nestedLogit", { skip_if(getRversion() < "4.2.0") skip_on_os(c("linux", "mac")) data(Womenlf, package = "carData") comparisons <- nestedLogit::logits( work = nestedLogit::dichotomy("not.work", working = c("parttime", "fulltime")), full = nestedLogit::dichotomy("parttime", "fulltime") ) mnl1 <- nestedLogit::nestedLogit( partic ~ hincome + children, dichotomies = comparisons, data = Womenlf ) set.seed(123) out <- simulate_parameters(mnl1, iterations = 100) expect_equal( out$Coefficient, c(1.35612, -0.04667, -1.59096, 3.45594, -0.10316, -2.69807), tolerance = 1e-3 ) }) parameters/tests/testthat/test-glmer.R0000644000176200001440000000506114736731407017643 0ustar liggesusersskip_on_cran() skip_if_not_installed("lme4") data("cbpp", package = "lme4") set.seed(123) model <- lme4::glmer( cbind(incidence, size - incidence) ~ period + (1 | herd), data = cbpp, family = binomial(), nAGQ = 0 ) params <- model_parameters(model, effects = "fixed") test_that("model_parameters.glmer", { expect_equal(params$SE, c(0.22758, 0.30329, 0.32351, 0.42445), tolerance = 1e-2) }) test_that("print model_parameters", { skip_if_not_installed("withr") skip_if_not_installed("merDeriv") withr::local_options( list( parameters_exponentiate = TRUE, parameters_warning_exponentiate = TRUE ) ) expect_snapshot(params) suppressMessages({ mp <- model_parameters(model, effects = "all", exponentiate = TRUE) }) expect_snapshot(mp) set.seed(123) model <- lme4::glmer( cbind(incidence, size - incidence) ~ period + (1 | herd), data = cbpp, family = binomial(), nAGQ = 2 ) mp <- model_parameters(model, effects = "all") expect_snapshot(mp) }) test_that("model_parameters.glmer ml1", { params <- model_parameters(model, ci_method = "ml1", effects = "fixed") expect_equal(params$SE, c(0.22758, 0.30329, 0.32351, 0.42445), tolerance = 1e-2) expect_equal(params$df, c(54, 54, 54, 54), tolerance = 1e-2) }) test_that("model_parameters.glmer betwithin", { params <- model_parameters(model, ci_method = "betwithin", effects = "fixed") expect_equal(params$SE, c(0.23009, 0.30433, 0.32476, 0.42632), tolerance = 1e-2) expect_equal(params$df, c(822, 822, 822, 822), tolerance = 1e-2) }) set.seed(123) cbpp$time <- runif(nrow(cbpp), 1, 4) model <- lme4::glmer( cbind(incidence, size - incidence) ~ period + time + (1 + time | herd), data = cbpp, family = binomial(), nAGQ = 0 ) test_that("model_parameters.glmer", { params <- model_parameters(model, effects = "fixed") expect_equal(params$SE, c(0.66539, 0.36178, 0.36223, 0.45528, 0.2379), tolerance = 1e-2) }) test_that("model_parameters.glmer ml1", { params <- model_parameters(model, ci_method = "ml1", effects = "fixed") expect_equal(params$SE, c(0.66539, 0.36178, 0.36223, 0.45528, 0.2379), tolerance = 1e-2) expect_equal(params$df, c(53, 53, 53, 53, 53), tolerance = 1e-2) }) test_that("model_parameters.glmer betwithin", { params <- model_parameters(model, ci_method = "betwithin", effects = "fixed") expect_equal(params$SE, c(0.66539, 0.36178, 0.36223, 0.45528, 0.2379), tolerance = 1e-2) expect_equal(params$df, c(821, 821, 821, 821, 9), tolerance = 1e-2) }) parameters/tests/testthat/test-model_parameters.glmgee.R0000644000176200001440000000076114716604201023306 0ustar liggesusersskip_on_cran() skip_if_not_installed("glmtoolbox") skip_if_not_installed("withr") withr::with_options( list(parameters_exponentiate = FALSE), test_that("model_parameters.glmgee", { data(spruces, package = "glmtoolbox") fit1 <- glmtoolbox::glmgee( size ~ poly(days, 4) + treat, id = tree, family = Gamma("log"), corstr = "AR-M-dependent(1)", data = spruces ) out <- model_parameters(fit1) expect_snapshot(print(out)) }) ) parameters/tests/testthat/test-mira.R0000644000176200001440000000075214413515226017456 0ustar liggesusersskip_if_not_installed("mice") data("nhanes2", package = "mice") imp <- mice::mice(nhanes2, printFlag = FALSE) fit <- with(data = imp, exp = lm(bmi ~ age + hyp + chl)) mp1 <- model_parameters(fit) mp2 <- summary(mice::pool(fit)) test_that("param", { expect_equal(mp1$Parameter, as.vector(mp2$term)) }) test_that("coef", { expect_equal(mp1$Coefficient, mp2$estimate, tolerance = 1e-3) }) test_that("se", { expect_equal(mp1$SE, mp2$std.error, tolerance = 1e-3) }) parameters/tests/testthat/test-printing_reference_level.R0000644000176200001440000000410614716604201023560 0ustar liggesuserstest_that("print in pipe", { data(iris) model <- lm(Sepal.Length ~ Petal.Length + Species, data = iris) out <- model_parameters(model, include_reference = TRUE) expect_identical( out$Parameter, c( "(Intercept)", "Petal.Length", "Speciessetosa", "Speciesversicolor", "Speciesvirginica" ) ) expect_equal(out$Coefficient, c(3.68353, 0.90456, 0, -1.60097, -2.11767), tolerance = 1e-4) out <- model_parameters(model, include_reference = TRUE, pretty_names = FALSE) expect_identical( out$Parameter, c( "(Intercept)", "Petal.Length", "Speciessetosa", "Speciesversicolor", "Speciesvirginica" ) ) expect_equal(out$Coefficient, c(3.68353, 0.90456, 0, -1.60097, -2.11767), tolerance = 1e-4) }) # skip_if(getRversion() < "4.0.0") # test_that("simple reference level", { # data(PlantGrowth) # d <<- PlantGrowth # m <- lm(weight ~ group, data = d) # mp <- model_parameters(m) # expect_snapshot(print(mp, include_reference = TRUE)) # data(mtcars) # d <<- mtcars # d$cyl <- as.factor(d$cyl) # d$am <- as.factor(d$am) # m <- lm(mpg ~ hp + cyl + gear + am, data = d) # mp <- model_parameters(m) # expect_snapshot(print(mp, include_reference = TRUE)) # data(iris) # d <<- iris # m <- lm(Sepal.Length ~ Sepal.Width * Species, data = d) # mp <- model_parameters(m) # expect_snapshot(print(mp, include_reference = TRUE)) # data(mtcars) # d <<- mtcars # d$gear <- as.factor(d$gear) # m <- glm(vs ~ wt + gear, data = d, family = "binomial") # expect_snapshot(print(model_parameters(m, exponentiate = TRUE, drop = "(Intercept)"), include_reference = TRUE)) # }) # test_that("reference for models with multiple components", { # skip_on_cran() # skip_if_not_installed("glmmTMB") # data("fish") # m1 <- glmmTMB::glmmTMB( # count ~ child + camper + zg + (1 | ID), # ziformula = ~ child + camper + (1 | persons), # data = fish, # family = glmmTMB::truncated_poisson() # ) # print(model_parameters(m1), include_reference = TRUE) # }) parameters/tests/testthat/test-model_parameters.mle2.R0000644000176200001440000000107314413515226022704 0ustar liggesuserstest_that("model_parameters.mle2", { skip_if_not_installed("bbmle") x <- 0:10 y <- c(26, 17, 13, 12, 20, 5, 9, 8, 5, 4, 8) d <- data.frame(x, y) LL <- function(ymax = 15, xhalf = 6) { -sum(stats::dpois(y, lambda = ymax / (1 + x / xhalf), log = TRUE)) } model <- suppressWarnings(bbmle::mle2(LL)) params <- model_parameters(model) expect_equal(params$SE, c(4.224444, 1.034797), tolerance = 1e-3) expect_equal( colnames(params), c("Parameter", "Coefficient", "SE", "CI", "CI_low", "CI_high", "z", "df_error", "p") ) }) parameters/tests/testthat/test-model_parameters.BFBayesFactor.R0000644000176200001440000001250114716604201024453 0ustar liggesusersskip_on_os("linux") test_that("model_parameters.BFBayesFactor", { skip_on_cran() skip("TODO") skip_if_not_installed("BayesFactor") model <- BayesFactor::ttestBF(iris$Sepal.Width, iris$Petal.Length, paired = TRUE) expect_equal(model_parameters(model)$BF, c(492.770567186302, NA), tolerance = 1e-2) }) # make sure BF is returned, even if NA # see https://github.com/easystats/correlation/issues/269 test_that("model_parameters.BFBayesFactor", { skip_if_not_installed("BayesFactor") var_x <- c( 12.1, 8.7, 10.1, 17.4, 12.5, 2.7, 6.2, 19.4, 11, 14.5, 15.8, 10.4, 13.5, 3.5, 5.6, 5.2, 6.3, 12.5, 9.8 ) var_y <- c( 11.9, 15.3, 13.9, 6.6, 11.5, 21.35, 17.8, 4.6, 13, 9.5, 8.2, 13.6, 10.5, 20.5, 18.45, 18.8, 17.7, 11.5, 14.2 ) expect_warning({ model <- BayesFactor::correlationBF(var_x, var_y, rscale = "medium") }) params <- model_parameters(model) expect_identical( colnames(params), c( "Parameter", "Median", "CI", "CI_low", "CI_high", "pd", "Prior_Distribution", "Prior_Location", "Prior_Scale", "BF", "Method" ) ) expect_true(is.na(params$BF)) }) test_that("model_parameters.BFBayesFactor", { skip_if_not_installed("BayesFactor") model <- BayesFactor::correlationBF(iris$Sepal.Width, iris$Petal.Length) expect_equal(model_parameters(model)$BF, 348853.6, tolerance = 10) }) test_that("model_parameters.BFBayesFactor", { skip_if_not_installed("BayesFactor") set.seed(123) model <- BayesFactor::anovaBF(Sepal.Length ~ Species, data = iris, progress = FALSE) expect_equal( model_parameters(model, centrality = "median")$Median, c(5.8431, -0.8266, 0.092, 0.734, 0.2681, 2.0415), tolerance = 2 ) }) # test_that("model_parameters.BFBayesFactor", { # skip_on_cran() # model <- BayesFactor::ttestBF(formula = mpg ~ am, data = df) # expect_equal(model_parameters(model)$BF, c(86.58973, NA), tolerance = 1) # }) test_that("model_parameters.BFBayesFactor", { skip_if_not_installed("BayesFactor") df <- mtcars df$gear <- as.factor(df$gear) df$am <- as.factor(df$am) set.seed(123) model <- suppressMessages(BayesFactor::anovaBF(mpg ~ gear * am, data = df, progress = FALSE)) expect_equal( suppressMessages(model_parameters(model, centrality = "mean", verbose = FALSE))$Mean, c(20.7099, -3.24884, 3.24884, 26.51413, 5.30506, NA, NA, NA), tolerance = 1L ) }) test_that("model_parameters.BFBayesFactor", { skip_on_cran() skip_if_not_installed("BayesFactor") data(raceDolls, package = "BayesFactor") bf <- BayesFactor::contingencyTableBF(raceDolls, sampleType = "indepMulti", fixedMargin = "cols") mp <- suppressWarnings(model_parameters(bf, centrality = "mean", dispersion = TRUE, verbose = FALSE, es_type = "cramers_v", adjust = TRUE, include_proportions = TRUE )) mp2 <- suppressWarnings(model_parameters(bf, verbose = FALSE)) expect_identical( colnames(mp), c( "Parameter", "Mean", "CI", "CI_low", "CI_high", "SD", "Cramers_v_adjusted", "pd", "Prior_Distribution", "Prior_Location", "Prior_Scale", "BF", "Method" ) ) expect_identical(dim(mp), c(6L, 13L)) expect_identical( colnames(mp2), c( "Parameter", "Prior_Distribution", "Prior_Location", "Prior_Scale", "BF", "Method", "CI" ) ) expect_identical(dim(mp2), c(1L, 7L)) }) test_that("model_parameters.BFBayesFactor", { skip_on_cran() skip_if_not_installed("BayesFactor") data(puzzles, package = "BayesFactor") result <- BayesFactor::anovaBF(RT ~ shape * color + ID, data = puzzles, whichRandom = "ID", whichModels = "top", progress = FALSE ) mp <- suppressMessages(model_parameters( result, centrality = "median", dispersion = TRUE, verbose = FALSE )) expect_identical(colnames(mp), c( "Parameter", "Median", "MAD", "CI", "CI_low", "CI_high", "pd", "Prior_Distribution", "Prior_Location", "Prior_Scale", "Effects", "Component", "BF", "Method" )) expect_identical(mp$Effects, c( "fixed", "fixed", "fixed", "fixed", "fixed", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "random", "fixed", "fixed", "fixed", "fixed" )) }) # one-sample t-test test_that("model_parameters.BFBayesFactor, without effectsize", { skip_if_not_installed("BayesFactor") set.seed(123) df_t <- as.data.frame(parameters(BayesFactor::ttestBF(mtcars$wt, mu = 3))) expect_identical( colnames(df_t), c( "Parameter", "Median", "CI", "CI_low", "CI_high", "pd", "Prior_Distribution", "Prior_Location", "Prior_Scale", "BF", "Method" ) ) expect_identical(dim(df_t), c(1L, 11L)) }) test_that("model_parameters.BFBayesFactor, with effectsize", { skip_if_not_installed("BayesFactor") set.seed(123) df_t_es <- as.data.frame( parameters(BayesFactor::ttestBF(mtcars$wt, mu = 3), es_type = "cohens_d") ) # TODO: fix column order expect_identical( colnames(df_t_es), c( "Parameter", "Median", "CI", "CI_low", "CI_high", "Cohens_d", "d_CI_low", "d_CI_high", "pd", "Prior_Distribution", "Prior_Location", "Prior_Scale", "BF", "Method" ) ) expect_identical(dim(df_t_es), c(1L, 14L)) }) parameters/tests/testthat/test-model_parameters_mixed_coeforder.R0000644000176200001440000000102614413515226025262 0ustar liggesuserstest_that("model_parameters.mixed.coeforder", { skip_if_not_installed("lme4") set.seed(1) dat <- data.frame( TST.diff = runif(100, 0, 100), Exposition = as.factor(sample(0:2, 100, TRUE)), Gruppe = as.factor(sample(0:1, 100, TRUE)), Kennung = as.factor(sample(1:5, 100, TRUE)) ) m <- lme4::lmer(TST.diff ~ Exposition + Gruppe + Gruppe:Exposition + (1 | Kennung), data = dat) cs <- coef(summary(m)) mp <- model_parameters(m, effects = "fixed") expect_equal(mp$Parameter, rownames(cs)) }) parameters/tests/testthat/test-model_parameters.aov_es_ci.R0000644000176200001440000003243415057525051024003 0ustar liggesusersiris$Cat1 <- rep_len(c("X", "X", "Y"), nrow(iris)) iris$Cat2 <- rep_len(c("A", "B"), nrow(iris)) # aov ---------------------------------- test_that("model_parameters.aov", { skip_if_not_installed("effectsize") model <- aov(Sepal.Width ~ Species, data = iris) mp <- suppressMessages(model_parameters( model, es_type = c("omega", "eta", "epsilon"), ci = 0.9, alternative = "greater" )) es <- suppressMessages(effectsize::omega_squared(model, partial = TRUE, ci = 0.9)) expect_equal(na.omit(mp$Omega2_CI_low), es$CI_low, tolerance = 1e-3, ignore_attr = TRUE) expect_equal(mp$Omega2_CI_low, c(0.3122, NA), tolerance = 1e-3, ignore_attr = TRUE) expect_equal(na.omit(mp$Omega2_CI_high), es$CI_high, tolerance = 1e-3, ignore_attr = TRUE) expect_equal(na.omit(mp$Omega2_CI_high), 1, tolerance = 1e-3, ignore_attr = TRUE) expect_identical( colnames(mp), c( "Parameter", "Sum_Squares", "df", "Mean_Square", "F", "p", "Omega2", "Omega2_CI_low", "Omega2_CI_high", "Eta2", "Eta2_CI_low", "Eta2_CI_high", "Epsilon2", "Epsilon2_CI_low", "Epsilon2_CI_high" ) ) model <- aov(Sepal.Length ~ Species * Cat1 * Cat2, data = iris) mp <- model_parameters(model, es_type = "eta", ci = 0.9, partial = FALSE, alternative = "greater") es <- effectsize::eta_squared(model, partial = FALSE, ci = 0.9) expect_equal(na.omit(mp$Eta2_CI_low), es$CI_low, tolerance = 1e-3, ignore_attr = TRUE) expect_equal( mp$Eta2_CI_low, c(0.5572, 0, 0, 0, 0, 0, 0, NA), tolerance = 1e-3, ignore_attr = TRUE ) expect_equal(na.omit(mp$Eta2_CI_high), es$CI_high, tolerance = 1e-3, ignore_attr = TRUE) expect_equal(na.omit(mp$Eta2_CI_high), rep(1, 7), tolerance = 1e-3, ignore_attr = TRUE) expect_identical( colnames(mp), c( "Parameter", "Sum_Squares", "df", "Mean_Square", "F", "p", "Eta2", "Eta2_CI_low", "Eta2_CI_high" ) ) }) # anova --------------------- test_that("model_parameters.anova", { skip_if_not_installed("effectsize") model <- anova(lm(Sepal.Length ~ Species * Cat1 * Cat2, data = iris)) mp <- model_parameters( model, es_type = c("omega", "eta", "epsilon"), partial = TRUE, ci = 0.9, alternative = "greater" ) es <- effectsize::omega_squared(model, partial = TRUE, ci = 0.9) expect_equal(na.omit(mp$Omega2_CI_low), es$CI_low, tolerance = 1e-3, ignore_attr = TRUE) expect_equal(na.omit(mp$Omega2_CI_high), es$CI_high, tolerance = 1e-3, ignore_attr = TRUE) expect_identical(colnames(mp), c( "Parameter", "Sum_Squares", "df", "Mean_Square", "F", "p", "Omega2_partial", "Omega2_CI_low", "Omega2_CI_high", "Eta2_partial", "Eta2_CI_low", "Eta2_CI_high", "Epsilon2_partial", "Epsilon2_CI_low", "Epsilon2_CI_high" )) }) test_that("model_parameters.anova", { skip_if_not_installed("effectsize") model <- aov(wt ~ cyl + Error(gear), data = mtcars) suppressWarnings({ mp <- model_parameters(model, es_type = c("omega", "eta", "epsilon"), partial = TRUE, ci = 0.9) es <- effectsize::omega_squared(model, partial = TRUE, ci = 0.9, verbose = FALSE) }) expect_equal(na.omit(mp$Omega2_CI_low), es$CI_low[2], tolerance = 1e-3, ignore_attr = TRUE) expect_equal(na.omit(mp$Omega2_CI_high), es$CI_high, tolerance = 1e-3, ignore_attr = TRUE) expect_identical(colnames(mp), c( "Group", "Parameter", "Sum_Squares", "df", "Mean_Square", "F", "p", "Omega2_partial", "Omega2_CI_low", "Omega2_CI_high", "Eta2_partial", "Eta2_CI_low", "Eta2_CI_high", "Epsilon2_partial", "Epsilon2_CI_low", "Epsilon2_CI_high" )) }) # car anova --------------------------------- test_that("model_parameters.car-anova", { skip_if_not_installed("car") skip_if_not_installed("carData") skip_if_not_installed("effectsize") data(Moore, package = "carData") set.seed(123) model <- car::Anova(stats::lm( formula = conformity ~ fcategory * partner.status, data = Moore, contrasts = list(fcategory = contr.sum, partner.status = contr.sum) )) mp <- model_parameters(model, es_type = c("omega", "eta", "epsilon"), partial = TRUE, ci = 0.9) es <- effectsize::omega_squared(model, partial = TRUE, ci = 0.9) expect_equal(na.omit(mp$Omega2_CI_low), es$CI_low, tolerance = 1e-3, ignore_attr = TRUE) expect_equal(mp$Omega2_CI_low, c(0, 0.05110, 0.00666, NA), tolerance = 1e-3, ignore_attr = TRUE) expect_equal(na.omit(mp$Omega2_CI_high), es$CI_high, tolerance = 1e-3, ignore_attr = TRUE) expect_equal(na.omit(mp$Omega2_CI_high), rep(1, 3), tolerance = 1e-3, ignore_attr = TRUE) expect_identical(colnames(mp), c( "Parameter", "Sum_Squares", "df", "Mean_Square", "F", "p", "Omega2_partial", "Omega2_CI_low", "Omega2_CI_high", "Eta2_partial", "Eta2_CI_low", "Eta2_CI_high", "Epsilon2_partial", "Epsilon2_CI_low", "Epsilon2_CI_high" )) }) # maov ---------------------------------- test_that("model_parameters.maov", { skip_if_not_installed("effectsize") set.seed(123) fit <- lm(cbind(mpg, disp, hp) ~ factor(cyl), data = mtcars) model <- aov(fit) mp <- suppressMessages(model_parameters( model, es_type = c("omega", "eta", "epsilon"), partial = TRUE, ci = 0.9 )) es <- suppressMessages(effectsize::omega_squared(model, partial = TRUE, ci = 0.9)) expect_equal(na.omit(mp$Omega2_CI_low), es$CI_low, tolerance = 1e-3, ignore_attr = TRUE) expect_equal(mp$Omega2_CI_low, c(0.58067, NA, 0.74092, NA, 0.55331, NA), tolerance = 1e-3, ignore_attr = TRUE) expect_equal(na.omit(mp$Omega2_CI_high), es$CI_high, tolerance = 1e-3, ignore_attr = TRUE) expect_equal(na.omit(mp$Omega2_CI_high), rep(1, 3), tolerance = 1e-3, ignore_attr = TRUE) expect_identical(colnames(mp), c( "Response", "Parameter", "Sum_Squares", "df", "Mean_Square", "F", "p", "Omega2", "Omega2_CI_low", "Omega2_CI_high", "Eta2", "Eta2_CI_low", "Eta2_CI_high", "Epsilon2", "Epsilon2_CI_low", "Epsilon2_CI_high" )) }) # stricter tests --------------------------------------------------------- test_that("works with aov", { skip_on_cran() skip_if_not_installed("effectsize") set.seed(123) npk.aov <- aov(yield ~ block + N * P, npk) set.seed(123) df_aov <- as.data.frame(parameters::model_parameters(npk.aov, ci = 0.95, es_type = c("eta", "omega"), partial = FALSE )) expect_equal( df_aov, structure( list( Parameter = c("block", "N", "P", "N:P", "Residuals"), Sum_Squares = c(343.295, 189.28167, 8.40167, 21.28167, 314.105), df = c(5, 1, 1, 1, 15), Mean_Square = c(68.659, 189.28167, 8.40167, 21.28167, 20.94033), F = c(3.27879, 9.0391, 0.40122, 1.0163, NA), p = c(0.03371, 0.00885, 0.536, 0.32938, NA), Eta2 = c(0.39173, 0.21598, 0.00959, 0.02428, NA), Eta2_CI_low = c(0, 0, 0, 0, NA), Eta2_CI_high = c(1, 1, 1, 1, NA), Omega2 = c(0.2659, 0.18761, -0.01397, 0.00038, NA), Omega2_CI_low = c(0, 0, 0, 0, NA), Omega2_CI_high = c(1, 1, 1, 1, NA) ), row.names = c(NA, 5L), ci = 0.95, model_class = c("aov", "lm"), anova_type = 1, title = "", digits = 2, ci_digits = 2, p_digits = 3, object_name = "npk.aov", class = "data.frame" ), tolerance = 0.1, ignore_attr = TRUE ) }) # aovlist ------------------------------------------------ # test_that("works with aovlist", { # skip_on_cran() # # set.seed(123) # npk.aovE <- aov(yield ~ N * P * K + Error(block), npk) # # set.seed(123) # df_aovE <- # as.data.frame(model_parameters(npk.aovE, # ci = 0.90, # eta_squared = "raw", # omega_squared = "partial" # )) # # expect_equal( # df_aovE, # structure( # list( # Group = c( # "block", # "block", # "Within", # "Within", # "Within", # "Within", # "Within", # "Within", # "Within" # ), # Parameter = c( # "N:P:K", # "Residuals", # "N", # "P", # "K", # "N:P", # "N:K", # "P:K", # "Residuals" # ), # Sum_Squares = c(37, 306.29, 189.28, 8.4, 95.2, 21.28, 33.14, 0.48, 185.29), # df = c(1, 4, 1, 1, 1, 1, 1, 1, 12), # Mean_Square = c(37, 76.57, 189.28, 8.4, 95.2, 21.28, 33.14, 0.48, 15.44), # `F` = c(0.48, NA, 12.26, 0.54, 6.17, 1.38, 2.15, 0.03, NA), # p = c(0.53, NA, 0, 0.47, 0.03, 0.26, 0.17, 0.86, NA), # Omega2_partial = c(-0.09, NA, 0.23, -0.01, 0.12, 0.01, 0.03, -0.03, NA), # Omega2_CI_low = c(0, NA, 0, 0, 0, 0, 0, 0, NA), # Omega2_CI_high = c(0, NA, 0.52, 0, 0.42, 0.22, 0.29, 0, NA), # Eta2 = c(0.04, NA, 0.22, 0.01, 0.11, 0.02, 0.04, 0, NA), # Eta2_CI_low = c(0, NA, 0, 0, 0, 0, 0, 0, NA), # Eta2_CI_high = c(0.49, NA, 0.51, 0.23, 0.41, 0.28, 0.31, 0.04, NA) # ), # row.names = c(NA, 9L), # class = "data.frame", # ci = 0.9, # model_class = c("aovlist", "listof"), # digits = 2, # ci_digits = 2, # p_digits = 3 # ), # tolerance = 0.1, # ignore_attr = TRUE # ) # }) # manova ------------------------------------------------ test_that("works with manova", { skip_on_cran() skip_if_not_installed("effectsize") set.seed(123) # fake a 2nd response variable foo <- rnorm(24) npk2 <- within(npk, foo) # model m <- manova(cbind(yield, foo) ~ block + N * P * K, npk2) set.seed(123) df_manova <- as.data.frame(model_parameters(m, ci = 0.99, es_type = c("epsilon", "omega"), partial = TRUE )) expect_identical( df_manova$Parameter, c("block", "N", "P", "K", "N:P", "N:K", "P:K", "Residuals") ) expect_identical( colnames(df_manova), c( "Parameter", "Statistic", "df", "df_num", "df_error", "F", "p", "Epsilon2_partial", "Epsilon2_CI_low", "Epsilon2_CI_high", "Omega2_partial", "Omega2_CI_low", "Omega2_CI_high" ) ) expect_equal( df_manova$Statistic, c(0.88, 0.61, 0.07, 0.39, 0.11, 0.17, 0, NA), tolerance = 0.1 ) expect_equal( df_manova$Omega2_CI_low, c(0, 0, 0, 0, 0, 0, 0, NA), tolerance = 0.1 ) expect_equal( df_manova$Omega2_partial, c(0.204, 0.518, 0, 0.262, 0, 0.022, 0, NA), tolerance = 0.1 ) }) # Gam ------------------------------------------------ test_that("works with Gam", { skip_on_cran() skip_if_not_installed("gam") skip_if_not_installed("effectsize") # setup set.seed(123) # model set.seed(123) g <- gam::gam( formula = mpg ~ gam::s(hp, 4) + am + qsec, data = mtcars ) set.seed(123) df_Gam <- as.data.frame(model_parameters(g, ci = 0.50, es_type = "omega", partial = TRUE )) expect_equal( df_Gam, structure( list( Parameter = c("gam::s(hp, 4)", "am", "qsec", "Residuals"), Sum_Squares = c(678.37287, 202.23503, 6.87905, 238.56023), df = c(1, 1, 1, 28), Mean_Square = c(678.37287, 202.23503, 6.87905, 8.52001), `F` = c(79.62115, 23.73648, 0.8074, NA), # nolint p = c(0, 4e-05, 0.37655, NA), Omega2_partial = c(0.71072, 0.41538, -0.00606, NA), Omega2_CI_low = c(0.70634, 0.41067, 0, NA), Omega2_CI_high = c(1, 1, 1, NA) ), row.names = c(NA, 4L), class = "data.frame", ci = 0.5, model_class = c("anova", "data.frame"), digits = 2, ci_digits = 2, p_digits = 3 ), tolerance = 0.1, ignore_attr = TRUE ) }) # anova ------------------------------------------------ test_that("works with anova", { skip_on_cran() skip_if_not_installed("car") skip_if_not_installed("effectsize") set.seed(123) mod <- car::Anova(stats::lm( formula = conformity ~ fcategory * partner.status, data = Moore, contrasts = list(fcategory = contr.sum, partner.status = contr.sum) )) set.seed(123) df_car <- as.data.frame(model_parameters(mod, ci = 0.89, es_type = c("eta", "epsilon"), partial = FALSE )) expect_equal( df_car, structure( list( Parameter = c( "fcategory", "partner.status", "fcategory:partner.status", "Residuals" ), Sum_Squares = c(11.61, 212.21, 175.49, 817.76), df = c(2, 1, 2, 39), Mean_Square = c(5.81, 212.21, 87.74, 20.97), F = c(0.28, 10.12, 4.18, NA), p = c(0.76, 0, 0.02, NA), Eta2 = c(0.01, 0.17, 0.14, NA), Eta2_CI_low = c(0, 0.03, 0, NA), Eta2_CI_high = c(1, 1, 1, NA), Epsilon2 = c(-0.02, 0.16, 0.11, NA), Epsilon2_CI_low = c(0, 0.03, 0, NA), Epsilon2_CI_high = c(1, 1, 1, NA) ), row.names = c(NA, 4L), class = "data.frame", ci = 0.89, model_class = c("anova", "data.frame"), digits = 2, ci_digits = 2, p_digits = 3 ), tolerance = 0.1, ignore_attr = TRUE ) }) parameters/tests/testthat/test-efa.R0000644000176200001440000000074614413515226017264 0ustar liggesusersskip_on_cran() test_that("predict.parameters_efa works with verbose", { skip_if_not_installed("psych") d <- psych::bfi[, 1:25] d <- na.omit(d) efa <- psych::fa(d, nfactors = 5) out <- model_parameters(efa, sort = TRUE, threshold = "max") predictions <- predict( out, names = c("Neuroticism", "Conscientiousness", "Extraversion", "Agreeableness", "Opennness"), verbose = FALSE ) expect_identical(dim(predictions), as.integer(c(2436, 5))) }) parameters/tests/testthat/test-quantreg.R0000644000176200001440000000515714736731407020371 0ustar liggesusersskip_on_cran() skip_if(getRversion() < "4.2.0") # rqss --------- # data("CobarOre") # set.seed(123) # CobarOre$w <- rnorm(nrow(CobarOre)) # m1 <- rqss(z ~ w + qss(cbind(x, y), lambda = .08), data = CobarOre) # mp <- suppressWarnings(model_parameters(m1)) # test_that("mp_rqss", { # expect_identical(mp$Parameter, c("(Intercept)", "w", "cbind(x, y)")) # expect_equal(mp$Coefficient, c(17.63057, 1.12506, NA), tolerance = 1e-3) # expect_equal(mp$df_error, c(15, 15, NA), tolerance = 1e-3) # expect_equal(mp[["df"]], c(NA, NA, 70), tolerance = 1e-3) # }) # rq --------- test_that("mp_rq", { skip_if_not_installed("quantreg") data(stackloss) m1 <- quantreg::rq(stack.loss ~ Air.Flow + Water.Temp, data = stackloss, tau = 0.25) mp <- suppressWarnings(model_parameters(m1)) expect_identical(mp$Parameter, c("(Intercept)", "Air.Flow", "Water.Temp")) expect_equal(mp$Coefficient, c(-36, 0.5, 1), tolerance = 1e-3) }) # rqs --------- test_that("mp_rqs", { skip_if_not_installed("quantreg") set.seed(123) data("engel", package = "quantreg") m1 <- quantreg::rq(foodexp ~ income, data = engel, tau = 1:9 / 10) mp <- suppressWarnings(model_parameters(m1)) expect_identical(mp$Parameter, c( "(Intercept)", "income", "(Intercept)", "income", "(Intercept)", "income", "(Intercept)", "income", "(Intercept)", "income", "(Intercept)", "income", "(Intercept)", "income", "(Intercept)", "income", "(Intercept)", "income" )) expect_equal(mp$Coefficient, c( 110.14157, 0.40177, 102.31388, 0.4469, 99.11058, 0.48124, 101.95988, 0.5099, 81.48225, 0.56018, 79.70227, 0.58585, 79.28362, 0.60885, 58.00666, 0.65951, 67.35087, 0.6863 ), tolerance = 1e-3) expect_equal(mp$SE, c( 29.39768, 0.04024, 21.42836, 0.02997, 22.18115, 0.02987, 22.06032, 0.02936, 19.25066, 0.02828, 17.61762, 0.02506, 14.25039, 0.02176, 19.21719, 0.02635, 22.39538, 0.02849 ), tolerance = 1e-3) }) # crq --------- test_that("mp_rq", { skip_if_not_installed("quantreg") skip_if_not_installed("survival") set.seed(123) n <- 200 x <- rnorm(n) y <- 5 + x + rnorm(n) c <- 4 + x + rnorm(n) d <- (y > c) dat <- data.frame(y, x, c, d) m1 <- quantreg::crq(survival::Surv(pmax(y, c), d, type = "left") ~ x, method = "Portnoy", data = dat) mp <- model_parameters(m1) expect_identical( mp$Parameter, c("(Intercept)", "x", "(Intercept)", "x", "(Intercept)", "x", "(Intercept)", "x") ) expect_equal( mp$Coefficient, c(4.26724, 0.97534, 4.84961, 0.92638, 5.21843, 0.98038, 5.91301, 0.97382), tolerance = 1e-3 ) }) parameters/tests/testthat/test-include_reference.R0000644000176200001440000000745715053035103022170 0ustar liggesusersskip_if_not_installed("tinytable") skip_if_not_installed("knitr") test_that("include_reference, on-the-fly factors", { data(mtcars) d <- as.data.frame(mtcars) d$gear <- as.factor(d$gear) d$am <- as.factor(d$am) m1 <- lm(mpg ~ as.factor(gear) + factor(am) + hp, data = mtcars) m2 <- lm(mpg ~ gear + am + hp, data = d) out1 <- model_parameters(m1, include_reference = TRUE) out2 <- model_parameters(m2, include_reference = TRUE) expect_snapshot(print(out1)) expect_snapshot(print(out2)) expect_equal(attributes(out1)$pretty_names, attributes(out2)$pretty_names, ignore_attr = TRUE) expect_equal(out1$Coefficient, out2$Coefficient, tolerance = 1e-4) out <- compare_parameters(m1, m2, include_reference = TRUE) expect_snapshot(print_md(out)) expect_snapshot(display(out, format = "tt")) }) skip_if(getRversion() < "4.3.3") skip_if_not_installed("datawizard") test_that("include_reference, on-the-fly factors", { data(mtcars) d <- as.data.frame(mtcars) d$gear <- as.factor(d$gear) d$am <- as.factor(d$am) m1 <- lm(mpg ~ as.factor(gear) + factor(am) + hp, data = mtcars) m2 <- lm(mpg ~ gear + am + hp, data = d) out1 <- model_parameters(m1, include_reference = TRUE) out3 <- mtcars |> datawizard::data_modify(gear = factor(gear), am = as.factor(am)) |> lm(formula = mpg ~ gear + am + hp) |> model_parameters(include_reference = TRUE) expect_equal(attributes(out1)$pretty_names, attributes(out3)$pretty_names, ignore_attr = TRUE) }) test_that("include_reference, with pretty formatted cut", { data(mtcars) mtcars$mpg_cut <- cut(mtcars$mpg, breaks = c(0, 20, 30, 100)) m <- lm(wt ~ mpg_cut, data = mtcars) out <- parameters(m, include_reference = TRUE) expect_identical( attributes(out)$pretty_names, c( `(Intercept)` = "(Intercept)", `mpg_cut(0,20]` = "mpg cut [>0-20]", `mpg_cut(20,30]` = "mpg cut [>20-30]", `mpg_cut(30,100]` = "mpg cut [>30-100]" ) ) }) test_that("include_reference, different contrasts", { data("mtcars") mtcars$cyl <- factor(mtcars$cyl) mtcars$gear <- factor(mtcars$gear) m <- lm(mpg ~ cyl + gear, data = mtcars, contrasts = list(cyl = datawizard::contr.deviation)) out <- model_parameters(m, include_reference = TRUE) expect_snapshot(print(out)) m <- lm(mpg ~ cyl + gear, data = mtcars) out <- model_parameters(m, include_reference = TRUE) expect_snapshot(print(out)) m <- lm( mpg ~ cyl + gear, data = mtcars, contrasts = list( cyl = datawizard::contr.deviation, gear = contr.sum ) ) out <- model_parameters(m, include_reference = TRUE) expect_snapshot(print(out)) m <- lm( mpg ~ cyl + gear, data = mtcars, contrasts = list( cyl = contr.SAS, gear = contr.sum ) ) out <- model_parameters(m, include_reference = TRUE) expect_snapshot(print(out)) m <- lm( mpg ~ cyl + gear, data = mtcars, contrasts = list( cyl = contr.SAS, gear = contr.treatment ) ) out <- model_parameters(m, include_reference = TRUE) expect_snapshot(print(out)) }) test_that("include_reference, random effects models", { skip_if_not_installed("glmmTMB") skip_if(getRversion() < "4.5") data(penguins, package = "datasets") fit_penguins <- glmmTMB::glmmTMB( sex ~ flipper_len + island + (1 | year), family = binomial(), data = penguins ) out <- model_parameters(fit_penguins, exponentiate = TRUE, include_reference = TRUE) expect_snapshot(print(out)) out <- model_parameters(fit_penguins, exponentiate = TRUE) expect_snapshot(print(out, include_reference = TRUE)) out <- model_parameters(fit_penguins, effects = "fixed", exponentiate = TRUE, include_reference = TRUE) expect_snapshot(print(out)) }) parameters/tests/testthat/test-betareg.R0000644000176200001440000000353314413515226020137 0ustar liggesusersskip_if_not_installed("betareg") data("GasolineYield", package = "betareg") data("FoodExpenditure", package = "betareg") m1 <- betareg::betareg(yield ~ batch + temp, data = GasolineYield) m2 <- betareg::betareg(I(food / income) ~ income + persons, data = FoodExpenditure) test_that("ci", { expect_equal( ci(m1)$CI_low, as.vector(confint(m1)[, 1]), tolerance = 1e-4 ) expect_equal( ci(m2)$CI_low, as.vector(confint(m2)[, 1]), tolerance = 1e-4 ) }) test_that("se", { s <- summary(m1) expect_equal( standard_error(m1)$SE, as.vector(c(s$coefficients$mean[, 2], s$coefficients$precision[, 2])), tolerance = 1e-4 ) s <- summary(m2) expect_equal( standard_error(m2)$SE, as.vector(c(s$coefficients$mean[, 2], s$coefficients$precision[, 2])), tolerance = 1e-4 ) }) test_that("p_value", { expect_equal( p_value(m1)$p, c(0, 0, 0, 0, 0, 0, 0, 0, 1e-05, 0.00114, 0, 6e-05), tolerance = 1e-3 ) expect_equal( p_value(m2)$p, c(0.00542, 5e-05, 8e-04, 1e-05), tolerance = 1e-3 ) }) # check vcov args test_that("model_parameters", { expect_message({ out <- model_parameters(m1, vcov = "vcovHAC") }) expect_equal(out$SE, unname(coef(summary(m1))[[1]][, 2]), tolerance = 1e-3) }) test_that("model_parameters", { expect_equal( model_parameters(m1)$Coefficient, as.vector(coef(m1))[1:11], tolerance = 1e-4 ) expect_equal( model_parameters(m1, component = "all")$Coefficient, as.vector(coef(m1)), tolerance = 1e-4 ) expect_equal( model_parameters(m2)$Coefficient, c(-0.62255, -0.0123, 0.11846), tolerance = 1e-4 ) expect_equal( model_parameters(m2, component = "all")$Coefficient, c(-0.62255, -0.0123, 0.11846, 35.60975033), tolerance = 1e-4 ) }) parameters/tests/testthat/test-group_level_total.R0000644000176200001440000000436614761570351022267 0ustar liggesusersskip_on_os("mac") skip_on_cran() skip_if_not_installed("glmmTMB") skip_if_not_installed("lme4") test_that("group_level_total", { data("fish", package = "insight") m1 <- suppressWarnings(glmmTMB::glmmTMB( count ~ child + camper + (1 | ID), data = fish, family = poisson() )) m2 <- suppressWarnings(glmmTMB::glmmTMB( count ~ child + camper + (1 + xb | persons) + (1 + zg | ID), ziformula = ~ child + livebait + (1 + zg + nofish | ID), dispformula = ~xb, data = fish, family = glmmTMB::truncated_poisson() )) m3 <- suppressWarnings(glmmTMB::glmmTMB( count ~ child + camper, ziformula = ~ child + livebait + (1 | ID), data = fish, family = glmmTMB::truncated_poisson() )) m4 <- suppressWarnings(glmmTMB::glmmTMB( count ~ child + camper + (1 + xb | persons), ziformula = ~ child + livebait, dispformula = ~xb, data = fish, family = glmmTMB::truncated_poisson() )) m5 <- suppressWarnings(lme4::glmer( count ~ child + camper + (1 | ID), data = fish, family = poisson() )) m6 <- suppressWarnings(lme4::lmer( Reaction ~ Days + (1 + Days | Subject), data = lme4::sleepstudy )) out <- model_parameters(m1, effects = "total") expect_identical(dim(out), c(4L, 6L)) out <- model_parameters(m2, effects = "total") expect_identical(dim(out), c(28L, 6L)) out <- model_parameters(m3, effects = "total") expect_identical(dim(out), c(4L, 6L)) out <- model_parameters(m4, effects = "total") expect_identical(dim(out), c(8L, 6L)) out <- model_parameters(m5, effects = "total") expect_identical(dim(out), c(4L, 5L)) out <- model_parameters(m6, effects = "total") expect_identical(dim(out), c(36L, 5L)) }) test_that("group_level_total, brms", { skip_if_not_installed("curl") skip_if_offline() skip_if_not_installed("httr2") skip_if_not_installed("brms") m1 <- insight::download_model("brms_zi_4") m2 <- insight::download_model("brms_sigma_3") skip_if(is.null(m1) || is.null(m2)) out <- model_parameters(m1, effects = "total") expect_identical(dim(out), c(28L, 10L)) out <- model_parameters(m2, effects = "total") expect_identical(dim(out), c(12L, 10L)) }) parameters/tests/testthat/test-ci.R0000644000176200001440000000372714413515226017126 0ustar liggesuserstest_that("ci", { skip_if_not_installed("lme4") model <- lm(mpg ~ wt, data = mtcars) expect_equal(suppressMessages(ci(model))[1, 3], 33.4505, tolerance = 0.01) expect_equal(suppressMessages(ci(model, ci = c(0.7, 0.8)))[1, 3], 35.30486, tolerance = 0.01) model <- glm(vs ~ wt, family = "binomial", data = mtcars) expect_equal(suppressMessages(ci(model))[1, 3], 1.934013, tolerance = 0.01) model <- lme4::lmer(wt ~ cyl + (1 | gear), data = mtcars) expect_equal(suppressMessages(ci(model, method = "normal"))[1, 3], -0.335063, tolerance = 0.01) model <- lme4::lmer(wt ~ cyl + (1 | gear), data = mtcars) expect_equal(ci(model)[1, 3], -0.3795646, tolerance = 0.01) set.seed(1) val <- ci(model, method = "boot")[1, 3] expect_equal(val, -0.555424, tolerance = 0.01) model <- lme4::glmer(vs ~ cyl + (1 | gear), data = mtcars, family = "binomial") expect_equal(ci(model)[1, 3], -0.7876679, tolerance = 0.01) model <- lme4::glmer(vs ~ drat + cyl + (1 | gear), data = mtcars, family = "binomial") expect_equal(ci(model)[1, 3], -48.14195, tolerance = 0.01) }) test_that("vs. sandwich & lmtest", { skip_if_not_installed("sandwich") skip_if_not_installed("lmtest") model <- lm(mpg ~ wt, data = mtcars) known <- lmtest::coefci(model, vcov = sandwich::vcovHC) unknown <- ci(model, vcov = sandwich::vcovHC) expect_equal(unknown[["CI_low"]], known[, "2.5 %"], ignore_attr = TRUE) expect_equal(unknown[["CI_high"]], known[, "97.5 %"], ignore_attr = TRUE) model <- glm(am ~ wt, data = mtcars, family = binomial) known <- lmtest::coefci(model, vcov = sandwich::vcovHC) unknown <- ci(model, vcov = sandwich::vcovHC, method = "wald") expect_equal(unknown[["CI_low"]], known[, "2.5 %"], ignore_attr = TRUE) expect_equal(unknown[["CI_high"]], known[, "97.5 %"], ignore_attr = TRUE) suppressMessages( expect_message(ci(model, vcov = sandwich::vcovHC), regexp = "vcov.*are not available with.*profile") ) }) parameters/tests/testthat/test-model_parameters.lme.R0000644000176200001440000000171714413515226022627 0ustar liggesusersskip_if_not_installed("nlme") skip_if_not_installed("lme4") data("sleepstudy", package = "lme4") model <- nlme::lme(Reaction ~ Days, random = ~ 1 + Days | Subject, data = sleepstudy ) test_that("model_parameters.lme", { params <- model_parameters(model, effects = "fixed") expect_equal(params$SE, c(6.8245, 1.5458), tolerance = 1e-3) expect_equal( colnames(params), c("Parameter", "Coefficient", "SE", "CI", "CI_low", "CI_high", "t", "df_error", "p", "Effects") ) }) test_that("model_parameters.lme", { params <- model_parameters(model, effects = "all") expect_equal(params$Coefficient, c(251.4051, 10.46729, 24.74024, 5.9221, 0.066, 25.59184), tolerance = 1e-3) expect_equal(params$SE, c(6.82452, 1.54578, NA, NA, NA, NA), tolerance = 1e-3) expect_equal( colnames(params), c( "Parameter", "Coefficient", "SE", "CI", "CI_low", "CI_high", "t", "df_error", "p", "Effects", "Group" ) ) }) parameters/tests/testthat/test-pretty_names.R0000644000176200001440000000560715033425412021240 0ustar liggesuserstest_that("pretty_names", { data(mtcars) attr(mtcars$hp, "label") <- "Gross horsepower" mod <- lm(mpg ~ hp + factor(cyl), mtcars) p <- parameters::parameters(mod, pretty_names = "labels", include_reference = TRUE) expect_identical( attr(p, "pretty_labels"), c( `(Intercept)` = "(Intercept)", hp = "Gross horsepower", `factor(cyl)4` = "cyl [4]", `factor(cyl)6` = "cyl [6]", `factor(cyl)8` = "cyl [8]" ) ) p <- parameters::parameters(mod, pretty_names = "labels") expect_identical( attr(p, "pretty_labels"), c( `(Intercept)` = "(Intercept)", hp = "Gross horsepower", `factor(cyl)6` = "cyl [6]", `factor(cyl)8` = "cyl [8]" ) ) mtcars2 <- transform(mtcars, cyl = as.factor(cyl)) attr(mtcars2$cyl, "label") <- "Cylinders" model <- lm(mpg ~ wt + cyl, data = mtcars2) p <- model_parameters(model, pretty_names = "labels", include_reference = TRUE) expect_identical( attr(p, "pretty_labels"), c( `(Intercept)` = "(Intercept)", wt = "wt", cyl4 = "Cylinders [4]", cyl6 = "Cylinders [6]", cyl8 = "Cylinders [8]" ) ) }) skip_if_not_installed("withr") # make sure we have the correct interaction mark for tests withr::with_options( list(parameters_interaction = "*", parameters_warning_exponentiate = TRUE), test_that("pretty_labels", { set.seed(1024) N <- 5000 X <- rbinom(N, 1, 0.5) M <- sample(c("a", "b", "c"), N, replace = TRUE) b <- runif(8, -1, 1) Y <- rbinom(N, 1, prob = plogis( b[1] + b[2] * X + b[3] * (M == "b") + b[4] * (M == "b") + b[5] * (M == "c") + b[6] * X * (M == "a") + b[7] * X + (M == "b") + b[8] * X * (M == "c") )) dat <- data.frame(Y, X, M, stringsAsFactors = FALSE) mod <- glm(Y ~ X * M, data = dat, family = binomial) p <- parameters(mod) expect_identical( attr(p, "pretty_labels"), c( `(Intercept)` = "(Intercept)", X = "X", Mb = "M [b]", Mc = "M [c]", `X:Mb` = "X * M [b]", `X:Mc` = "X * M [c]" ) ) expect_snapshot(print(p)) }) ) withr::with_options( list(parameters_exponentiate = FALSE, parameters_warning_exponentiate = FALSE), { test_that("pretty_labels, pscl", { skip_if_not_installed("pscl") mydf <- pscl::bioChemists attr(mydf$art, "label") <- "MyCount" attr(mydf$fem, "label") <- "MyGender" attr(mydf$mar, "label") <- "MyMarried" model <- pscl::zeroinfl(art ~ fem + mar, data = mydf) expect_snapshot(print(model_parameters(model), pretty_names = "labels", zap_small = TRUE)) # doesn't crash if pretty-labels is NULL mydf <- pscl::bioChemists mydf$mar <- as.character(mydf$mar) model <- pscl::zeroinfl(art ~ fem + mar, data = mydf) out <- model_parameters(model) expect_identical(dim(out), c(6L, 10L)) }) } ) parameters/tests/testthat/test-model_parameters.vgam.R0000644000176200001440000000333114716604201022774 0ustar liggesusersskip_if_not_installed("VGAM") skip_on_cran() data("pneumo", package = "VGAM") data("hunua", package = "VGAM") set.seed(123) pneumo <- transform(pneumo, let = log(exposure.time)) m1 <- suppressWarnings(VGAM::vgam( cbind(normal, mild, severe) ~ VGAM::s(let) + exposure.time, VGAM::cumulative(parallel = TRUE), data = pneumo, trace = FALSE )) set.seed(123) hunua$x <- rnorm(nrow(hunua)) m2 <- VGAM::vgam( agaaus ~ VGAM::s(altitude, df = 2) + VGAM::s(x) + beitaw + corlae, VGAM::binomialff, data = hunua ) test_that("model_parameters.vgam", { skip("TODO: model_parameters doesn't work with 'VGAM::' in the formula") params <- suppressWarnings(model_parameters(m1)) expect_equal(params$Coefficient, as.vector(m1@coefficients[params$Parameter]), tolerance = 1e-3) expect_identical(params$Parameter, c("(Intercept):1", "(Intercept):2", "exposure.time", "s(let)")) expect_equal(params$df, c(NA, NA, NA, 2.6501), tolerance = 1e-3) expect_equal(as.vector(na.omit(params$df)), as.vector(m1@nl.df), tolerance = 1e-3) }) test_that("model_parameters.vgam", { skip("TODO: model_parameters doesn't work with 'VGAM::' in the formula") params <- suppressWarnings(model_parameters(m2)) expect_equal(params$Coefficient, as.vector(m2@coefficients[params$Parameter]), tolerance = 1e-3) expect_identical(params$Parameter, c("(Intercept)", "beitaw", "corlae", "s(altitude, df = 2)", "s(x)")) expect_equal(params$df, c(NA, NA, NA, 0.82686, 2.8054), tolerance = 1e-3) expect_equal(as.vector(na.omit(params$df)), as.vector(m2@nl.df), tolerance = 1e-3) expect_named(params, c( "Parameter", "Coefficient", "SE", "CI", "CI_low", "CI_high", "Chi2", "df_error", "p", "Component" )) }) parameters/tests/testthat/test-mmrm.R0000644000176200001440000000410314716604201017466 0ustar liggesusersskip_on_cran() skip_if_not_installed("mmrm") skip_if_not(packageVersion("insight") > "0.18.8") test_that("model_parameters", { data(fev_data, package = "mmrm") m1 <- mmrm::mmrm( formula = FEV1 ~ RACE + SEX + ARMCD * AVISIT + us(AVISIT | USUBJID), data = fev_data ) out1 <- coef(summary(m1)) out2 <- model_parameters(m1) expect_equal( as.vector(out1[, "Estimate"]), out2$Coefficient, tolerance = 1e-4, ignore_attr = TRUE ) expect_identical( rownames(out1), out2$Parameter ) expect_equal( as.vector(out1[, "df"]), out2$df_error, tolerance = 1e-4, ignore_attr = TRUE ) expect_equal( as.vector(out1[, "Pr(>|t|)"]), out2$p, tolerance = 1e-4, ignore_attr = TRUE ) expect_equal( as.vector(out1[, "t value"]), out2$t, tolerance = 1e-4, ignore_attr = TRUE ) expect_equal( as.vector(out1[, "Std. Error"]), out2$SE, tolerance = 1e-4, ignore_attr = TRUE ) expect_identical(attributes(out2)$ci_method, "Satterthwaite") }) test_that("model_parameters", { data(fev_data, package = "mmrm") m1 <- mmrm::mmrm( formula = FEV1 ~ RACE + SEX + ARMCD * AVISIT + us(AVISIT | USUBJID), data = fev_data, method = "Kenward-Roger" ) out1 <- coef(summary(m1)) out2 <- model_parameters(m1) expect_equal( as.vector(out1[, "Estimate"]), out2$Coefficient, tolerance = 1e-4, ignore_attr = TRUE ) expect_identical( rownames(out1), out2$Parameter ) expect_equal( as.vector(out1[, "df"]), out2$df_error, tolerance = 1e-4, ignore_attr = TRUE ) expect_equal( as.vector(out1[, "Pr(>|t|)"]), out2$p, tolerance = 1e-4, ignore_attr = TRUE ) expect_equal( as.vector(out1[, "t value"]), out2$t, tolerance = 1e-4, ignore_attr = TRUE ) expect_equal( as.vector(out1[, "Std. Error"]), out2$SE, tolerance = 1e-4, ignore_attr = TRUE ) expect_identical(attributes(out2)$ci_method, "Kenward") }) parameters/tests/testthat/test-bootstrap_emmeans.R0000644000176200001440000000500115002424150022226 0ustar liggesusersskip_on_cran() test_that("emmeans | lm", { skip_if_not_installed("emmeans") skip_if_not_installed("boot") skip_if_not_installed("coda") model <- lm(mpg ~ log(wt) + factor(cyl), data = mtcars) set.seed(7) b <- bootstrap_model(model, iterations = 1000) expect_equal(summary(emmeans::emmeans(b, ~cyl))$emmean, summary(emmeans::emmeans(model, ~cyl))$emmean, tolerance = 0.1 ) set.seed(7) b <- bootstrap_parameters(model, iterations = 1000) expect_equal(summary(emmeans::emmeans(b, ~cyl))$emmean, summary(emmeans::emmeans(model, ~cyl))$emmean, tolerance = 0.1 ) skip_on_ci() mp <- model_parameters(emmeans::emmeans(b, consec ~ cyl), verbose = FALSE) expect_identical( colnames(mp), c("Parameter", "Median", "CI", "CI_low", "CI_high", "pd", "Component") ) expect_identical(nrow(mp), 5L) }) test_that("emmeans | lmer", { skip_if_not_installed("emmeans") skip_if_not_installed("boot") skip_if_not_installed("lme4") skip_if_not_installed("coda") model <- lme4::lmer(mpg ~ log(wt) + factor(cyl) + (1 | gear), data = mtcars) set.seed(7) b <- bootstrap_model(model, iterations = 1000) expect_equal(summary(emmeans::emmeans(b, ~cyl))$emmean, summary(emmeans::emmeans(model, ~cyl))$emmean, tolerance = 0.1 ) set.seed(7) b <- bootstrap_parameters(model, iterations = 1000) expect_equal(summary(emmeans::emmeans(b, ~cyl))$emmean, summary(emmeans::emmeans(model, ~cyl))$emmean, tolerance = 0.1 ) mp <- suppressWarnings(model_parameters(emmeans::emmeans(b, consec ~ cyl))) expect_identical( colnames(mp), c("Parameter", "Median", "CI", "CI_low", "CI_high", "pd", "Component") ) expect_identical(nrow(mp), 5L) }) test_that("emmeans | glmmTMB", { skip_if_not_installed("coda") skip_if_not_installed("emmeans") skip_if_not_installed("boot") skip_if_not_installed("lme4") suppressWarnings(skip_if_not_installed("glmmTMB")) data(Salamanders, package = "glmmTMB") model <- glmmTMB::glmmTMB(count ~ spp + mined + (1 | site), family = glmmTMB::nbinom2, data = Salamanders) set.seed(7) b <- bootstrap_parameters(model, iterations = 10) out <- summary(emmeans::emmeans(b, ~spp, type = "response")) expect_equal( out$response, c(0.654, 0.1515, 0.8856, 0.261, 0.9775, 1.2909, 0.9031), tolerance = 0.1 ) expect_identical( colnames(out), c("spp", "response", "lower.HPD", "upper.HPD") ) expect_identical(nrow(out), 7L) }) parameters/tests/testthat/test-posterior.R0000644000176200001440000000432715111301621020542 0ustar liggesusersskip_on_cran() skip_if_not_installed("curl") skip_if_offline() skip_if_not_installed("posterior") skip_if_not_installed("brms") skip_if_not_installed("httr2") model <- insight::download_model("brms_1") skip_if(is.null(model)) test_that("mp-posterior-draws", { x <- posterior::as_draws(model) mp <- model_parameters(x) expect_equal( mp$Median, c(39.68234, -3.19505, -1.4936, 2.62881, -79.73344), tolerance = 1e-2, ignore_attr = TRUE ) expect_identical(mp$Parameter, c("b_Intercept", "b_wt", "b_cyl", "sigma", "lp__")) expect_identical(colnames(mp), c("Parameter", "Median", "CI_low", "CI_high", "pd")) }) test_that("mp-posterior-draws_list", { x <- posterior::as_draws_list(model) mp <- model_parameters(x) expect_equal( mp$Median, c(39.68234, -3.19505, -1.4936, 2.62881, -79.73344), tolerance = 1e-2, ignore_attr = TRUE ) expect_identical(mp$Parameter, c("b_Intercept", "b_wt", "b_cyl", "sigma", "lp__")) }) test_that("mp-posterior-draws_df", { x <- posterior::as_draws_df(model) mp <- model_parameters(x) expect_equal( mp$Median, c(39.68234, -3.19505, -1.4936, 2.62881, -79.73344), tolerance = 1e-2, ignore_attr = TRUE ) expect_identical(mp$Parameter, c("b_Intercept", "b_wt", "b_cyl", "sigma", "lp__")) }) test_that("mp-posterior-draws_matrix", { x <- posterior::as_draws_matrix(model) mp <- model_parameters(x) expect_equal( mp$Median, c(39.68234, -3.19505, -1.4936, 2.62881, -79.73344), tolerance = 1e-2, ignore_attr = TRUE ) expect_identical(mp$Parameter, c("b_Intercept", "b_wt", "b_cyl", "sigma", "lp__")) }) test_that("mp-posterior-draws_array", { x <- posterior::as_draws_array(model) mp <- model_parameters(x) expect_equal( mp$Median, c(39.68234, -3.19505, -1.4936, 2.62881, -79.73344), tolerance = 1e-2, ignore_attr = TRUE ) expect_identical(mp$Parameter, c("b_Intercept", "b_wt", "b_cyl", "sigma", "lp__")) }) test_that("mp-posterior-draws standard error", { x <- posterior::as_draws(model) se1 <- standard_error(x) se2 <- standard_error(model) expect_equal(se1$SE[1:4], se2$SE[1:4], tolerance = 1e-2, ignore_attr = TRUE) }) parameters/tests/testthat/test-print_AER_labels.R0000644000176200001440000000061314716604201021665 0ustar liggesusersskip_if_not_installed("AER") skip_if_not_installed("datawizard") skip_if_not_installed("withr") withr::with_options( list(easystats_table_width = Inf), test_that("templates", { data(efc, package = "datawizard") model <- AER::tobit(neg_c_7 ~ e42dep + c172code, data = efc) mp <- model_parameters(model) expect_snapshot(print(mp, pretty_names = "labels")) }) ) parameters/tests/testthat/test-model_parameters.nnet.R0000644000176200001440000000534414506526355023026 0ustar liggesusersskip_if_not_installed("nnet") skip_if_not_installed("faraway") skip_if_not(packageVersion("insight") > "0.19.1") skip_on_cran() data("cns", package = "faraway") cns2 <- reshape(cns, direction = "long", timevar = "Type", times = names(cns)[3:5], varying = 3:5, v.names = "Freq" )[, 3:6] cns2$Type <- factor(cns2$Type, levels = unique(cns2$Type)) mnnet1 <- nnet::multinom(Type ~ Water + Work, data = cns2, weights = Freq, trace = FALSE) mnnet2 <- nnet::multinom(cbind(An, Sp, Other) ~ Water + Work, data = cns, trace = FALSE) ci1 <- confint(mnnet1) ci2 <- confint(mnnet2) test_that("model_parameters.multinom - long and wide", { mpnnet1 <- model_parameters(mnnet1) mpnnet2 <- model_parameters(mnnet2) expect_named( mpnnet1, c( "Parameter", "Coefficient", "SE", "CI", "CI_low", "CI_high", "z", "df_error", "p", "Response" ) ) expect_identical( mpnnet1$Parameter, c("(Intercept)", "Water", "WorkNonManual", "(Intercept)", "Water", "WorkNonManual") ) expect_identical( mpnnet1$Response, c("Sp", "Sp", "Sp", "Other", "Other", "Other") ) expect_equal( mpnnet1$Coefficient, c(0.3752, -0.0013, 0.11576, -1.12255, 0.00218, -0.27028), tolerance = 1e-4 ) expect_equal( mpnnet1$CI_low, as.vector(ci1[1:3, 1, 1:2]), tolerance = 1e-4 ) expect_named( mpnnet2, c( "Parameter", "Coefficient", "SE", "CI", "CI_low", "CI_high", "z", "df_error", "p", "Response" ) ) expect_identical( mpnnet2$Parameter, c("(Intercept)", "Water", "WorkNonManual", "(Intercept)", "Water", "WorkNonManual") ) expect_identical( mpnnet2$Response, c("Sp", "Sp", "Sp", "Other", "Other", "Other") ) expect_equal( mpnnet2$Coefficient, c(0.3752, -0.0013, 0.11576, -1.12255, 0.00218, -0.27028), tolerance = 1e-4 ) expect_equal( mpnnet2$CI_low, as.vector(ci2[1:3, 1, 1:2]), tolerance = 1e-4 ) }) test_that("ci.multinom - long and wide", { cinnet1 <- ci(mnnet1) cinnet2 <- ci(mnnet2) expect_identical( cinnet1$Parameter, c("(Intercept)", "Water", "WorkNonManual", "(Intercept)", "Water", "WorkNonManual") ) expect_identical( cinnet1$Response, c("Sp", "Sp", "Sp", "Other", "Other", "Other") ) expect_equal( cinnet1$CI_low, as.vector(ci1[1:3, 1, 1:2]), tolerance = 1e-4 ) expect_identical( cinnet2$Parameter, c("(Intercept)", "Water", "WorkNonManual", "(Intercept)", "Water", "WorkNonManual") ) expect_identical( cinnet2$Response, c("Sp", "Sp", "Sp", "Other", "Other", "Other") ) expect_equal( cinnet2$CI_low, as.vector(ci1[1:3, 1, 1:2]), tolerance = 1e-4 ) }) parameters/tests/testthat/test-coxph.R0000644000176200001440000000412014716604211017637 0ustar liggesusersskip_if_not_installed("survival") lung <- subset(survival::lung, subset = ph.ecog %in% 0:2) lung$sex <- factor(lung$sex, labels = c("male", "female")) lung$ph.ecog <- factor(lung$ph.ecog, labels = c("good", "ok", "limited")) m1 <- survival::coxph(survival::Surv(time, status) ~ sex + age + ph.ecog, data = lung) test_that("ci", { expect_equal( ci(m1)$CI_low, c(-0.87535, -0.00747, 0.01862, 0.45527), tolerance = 1e-4 ) }) test_that("se", { expect_equal( standard_error(m1)$SE, c(0.16823, 0.00931, 0.19961, 0.22809), tolerance = 1e-4 ) }) test_that("p_value", { expect_equal( p_value(m1)$p, c(0.00118, 0.24713, 0.04005, 8e-05), tolerance = 1e-4 ) }) test_that("model_parameters", { expect_equal( model_parameters(m1)$Coefficient, c(-0.54563, 0.01078, 0.40984, 0.90232), tolerance = 1e-4 ) }) test_that("model_parameters", { suppressPackageStartupMessages(library(survival, quietly = TRUE)) # Create the simplest test data set test1 <- list( time = c(4, 3, 1, 1, 2, 2, 3), status = c(1, 1, 1, 0, 1, 1, 0), x = c(0, 2, 1, 1, 1, 0, 0), sex = c(0, 0, 0, 0, 1, 1, 1) ) # Fit a stratified model m2 <- coxph(Surv(time, status) ~ x + strata(sex), test1) expect_equal(model_parameters(m2)$Coefficient, 0.8023179, tolerance = 1e-4) expect_equal(model_parameters(m2)$z, 0.9756088, tolerance = 1e-4) expect_equal(model_parameters(m2)$p, 0.3292583, tolerance = 1e-4) unloadNamespace("rms") unloadNamespace("quantreg") unloadNamespace("multcomp") unloadNamespace("TH.data") unloadNamespace("effects") unloadNamespace("survey") unloadNamespace("survival") }) skip_if_not_installed("withr") withr::with_package( "survival", test_that("model_parameters coxph-panel", { set.seed(123) # a time transform model mod <- survival::coxph( survival::Surv(time, status) ~ ph.ecog + tt(age), data = lung, tt = function(x, t, ...) pspline(x + t / 365.25) ) expect_snapshot(print(model_parameters(mod))) }) ) parameters/tests/testthat/test-mlm.R0000644000176200001440000000507514413515226017316 0ustar liggesuserstest_that("model_parameters,mlm", { set.seed(123) mod <- lm(formula = cbind(mpg, disp) ~ wt, data = mtcars) mp <- model_parameters(mod) expect_equal( mp$Coefficient, c(37.28513, -5.34447, -131.14842, 112.47814), tolerance = 1e-3 ) expect_equal( colnames(mp), c( "Parameter", "Coefficient", "SE", "CI", "CI_low", "CI_high", "t", "df_error", "p", "Response" ) ) expect_equal(mp$Response, c("mpg", "mpg", "disp", "disp")) expect_equal(mp$Parameter, c("(Intercept)", "wt", "(Intercept)", "wt")) }) test_that("model_parameters,mlm", { model <- lm(cbind(mpg, hp) ~ cyl * disp, mtcars) mp <- model_parameters(model) expect_equal( mp$Coefficient, c(49.03721, -3.40524, -0.14553, 0.01585, 23.55, 17.43527, -0.36762, 0.06174), tolerance = 1e-3 ) expect_equal( colnames(mp), c( "Parameter", "Coefficient", "SE", "CI", "CI_low", "CI_high", "t", "df_error", "p", "Response" ) ) expect_equal(mp$Response, c("mpg", "mpg", "mpg", "mpg", "hp", "hp", "hp", "hp")) expect_equal(mp$Parameter, c("(Intercept)", "cyl", "disp", "cyl:disp", "(Intercept)", "cyl", "disp", "cyl:disp")) }) test_that("sandwich standard errors", { skip_if_not_installed("sandwich") skip_if_not_installed("lmtest") mod <- lm(formula = cbind(mpg, disp) ~ wt + factor(cyl) + am, data = mtcars) se1 <- standard_error(mod) se2 <- standard_error(mod, vcov = "HC3") se3 <- standard_error(mod, vcov = sandwich::vcovHC) se4 <- sqrt(diag(sandwich::vcovHC(mod))) expect_true(all(se1$SE != se2$SE)) expect_true(all(se2$SE == se3$SE)) expect_true(all(se2$SE == se4)) lab <- strsplit(names(se4), ":") expect_equal(se2$Parameter, sapply(lab, function(x) x[2])) expect_equal(se2$Response, sapply(lab, function(x) x[1])) p1 <- parameters(mod) p2 <- parameters(mod, vcov = "HC3") expect_true(all(p1$Coefficient == p2$Coefficient)) expect_true(all(p1$SE != p2$SE)) expect_true(all(p1$t != p2$t)) expect_true(all(p1$p != p2$p)) expect_true(all(p1$CI_low != p2$CI_low)) expect_true(all(p1$CI_high != p2$CI_high)) lt <- lmtest::coeftest(mod, vcov = sandwich::vcovHC) ci <- stats::confint(lt) expect_equal(p2$Coefficient, lt[, "Estimate"], ignore_attr = TRUE) expect_equal(p2$SE, lt[, "Std. Error"], ignore_attr = TRUE) expect_equal(p2$t, lt[, "t value"], ignore_attr = TRUE) expect_equal(p2$p, lt[, "Pr(>|t|)"], ignore_attr = TRUE) expect_equal(p2$CI_low, ci[, 1], ignore_attr = TRUE) expect_equal(p2$CI_high, ci[, 2], ignore_attr = TRUE) }) parameters/tests/testthat/test-plm.R0000644000176200001440000000715014716604201017313 0ustar liggesusersskip_if_not_installed("stats") skip_if_not_installed("plm") data(Crime, package = "plm") data("Produc", package = "plm") set.seed(123) Crime$year <- as.factor(Crime$year) m1 <- suppressWarnings(plm::plm(lcrmrte ~ lprbarr + year | . - lprbarr + lmix, data = Crime, model = "random")) m2 <- suppressWarnings(plm::plm( formula = log(gsp) ~ log(pcap) + log(pc) + log(emp) + unemp, data = Produc, index = c("state", "year") )) test3333 <- data.frame( ID = c("TOM", "TOM", "TOM", "TOM", "MARY", "MARY", "MARY", "JOHN", "JOHN"), Year = c(1992:1995, 1991:1993, 1993:1994), ret = rnorm(9), stringsAsFactors = FALSE ) test3333 <- plm::pdata.frame(test3333) test3333["lag"] <- lag(test3333$ret) test3333 <- na.omit(test3333) test3333model <- ret ~ lag m3 <- suppressWarnings(plm::plm( test3333model, data = test3333, model = "within", effect = "individual", index = c("ID", "Year") )) test_that("ci", { expect_equal( ci(m1)$CI_low, c(-3.73825, -0.12292, -0.05971, -0.13356, -0.18381, -0.17782, -0.11688, -0.03962), tolerance = 1e-3 ) expect_equal( ci(m2)$CI_low, c(-0.08308, 0.2427, 0.70909, -0.00724), tolerance = 1e-3 ) expect_equal(ci(m3)$CI_low, -2.60478, tolerance = 1e-3) }) test_that("se", { expect_equal( standard_error(m1)$SE, c(0.13223, 0.09221, 0.02684, 0.02679, 0.02704, 0.02671, 0.02663, 0.02664), tolerance = 1e-3 ) expect_equal( standard_error(m2)$SE, c(0.029, 0.02512, 0.03009, 0.00099), tolerance = 1e-3 ) expect_equal(standard_error(m3)$SE, 0.5166726, tolerance = 1e-3) }) test_that("p_value", { expect_equal( p_value(m1)$p, c(0, 0.5285, 0.79456, 0.00262, 0, 0, 0.01558, 0.63395), tolerance = 1e-3 ) expect_equal( p_value(m2)$p, c(0.36752, 0, 0, 0), tolerance = 1e-3 ) expect_equal(p_value(m3)$p, 0.53696, tolerance = 1e-3) }) test_that("model_parameters", { expect_equal( model_parameters(m1)$Coefficient, c(-3.47857, 0.05815, -0.00699, -0.08095, -0.13071, -0.12537, -0.06458, 0.01269), tolerance = 1e-3 ) expect_equal( model_parameters(m2)$Coefficient, c(-0.02615, 0.29201, 0.76816, -0.0053), tolerance = 1e-3 ) expect_equal(model_parameters(m3)$Coefficient, -0.381721, tolerance = 1e-3) }) test_that("vcov standard errors", { skip_if_not_installed("sandwich") data("Grunfeld", package = "plm") ran <- suppressWarnings( plm::plm(value ~ capital + inv, data = Grunfeld, model = "random", effect = "twoways") ) out1 <- standard_error(ran) out2 <- standard_error(ran, vcov = "HC1") validate1 <- coef(summary(ran))[, 2] validate2 <- sqrt(diag(sandwich::vcovHC(ran, type = "HC1"))) expect_equal(out1$SE, validate1, tolerance = 1e-3, ignore_attr = TRUE) expect_equal(out2$SE, validate2, tolerance = 1e-3, ignore_attr = TRUE) expect_snapshot(print(model_parameters(ran))) expect_snapshot(print(model_parameters(ran, vcov = "HC1"))) }) test_that("vcov standard errors, methods", { data("Produc", package = "plm") zz <- plm::plm(log(gsp) ~ log(pcap) + log(pc) + log(emp) + unemp, data = Produc, model = "random" ) out1 <- standard_error(zz, vcov = "HC1") out2 <- standard_error(zz, vcov = "HC1", vcov_args = list(method = "white1")) validate1 <- sqrt(diag(plm::vcovHC(zz, method = "arellano", type = "HC1"))) validate2 <- sqrt(diag(plm::vcovHC(zz, method = "white1", type = "HC1"))) expect_equal(out1$SE, validate1, tolerance = 1e-3, ignore_attr = TRUE) expect_equal(out2$SE, validate2, tolerance = 1e-3, ignore_attr = TRUE) }) parameters/tests/testthat/test-model_parameters.metafor.R0000644000176200001440000000212114716604201023473 0ustar liggesuserstest_that("model_parameters.metafor", { skip_if_not_installed("metafor") test <- data.frame( estimate = c(0.111, 0.245, 0.8, 1.1, 0.03), std.error = c(0.05, 0.111, 0.001, 0.2, 0.01) ) mydat <<- test model <- metafor::rma(yi = estimate, sei = std.error, data = mydat) params <- model_parameters(model) expect_identical( params$Parameter, c("Study 1", "Study 2", "Study 3", "Study 4", "Study 5", "Overall") ) expect_identical( colnames(params), c("Parameter", "Coefficient", "SE", "CI", "CI_low", "CI_high", "z", "p", "Weight", "Method") ) expect_equal(params$Coefficient, c(0.111, 0.245, 0.8, 1.1, 0.03, 0.43769), tolerance = 1e-3) expect_equal(params$Weight, c(400, 81.16224, 1e+06, 25, 10000, NA), tolerance = 1e-3) # test message on unsupported arguments expect_message(model_parameters(model, vcov = "vcovHC"), regex = "Following arguments") # test standardize params <- model_parameters(model, standardize = "refit") expect_equal(params$Coefficient, c(0.111, 0.245, 0.8, 1.1, 0.03, -0.5613041), tolerance = 1e-3) }) parameters/tests/testthat/test-geeglm.R0000644000176200001440000000130514736731407017772 0ustar liggesusersskip_if_not_installed("geepack") data(warpbreaks) m1 <- geepack::geeglm( breaks ~ tension, id = wool, data = warpbreaks, family = poisson, corstr = "ar1" ) test_that("ci", { expect_equal( ci(m1)$CI_low, c(3.28294, -0.76741, -0.64708), tolerance = 1e-4 ) }) test_that("se", { expect_equal( standard_error(m1)$SE, c(0.15931, 0.22554, 0.06598), tolerance = 1e-4 ) }) test_that("p_value", { expect_equal( p_value(m1)$p, c(0, 0.14913, 0), tolerance = 1e-4 ) }) test_that("model_parameters", { expect_equal( model_parameters(m1)$Coefficient, c(3.59517, -0.32536, -0.51776), tolerance = 1e-4 ) }) parameters/tests/testthat/test-model_parameters.efa_cfa.R0000644000176200001440000000461315062754403023417 0ustar liggesuserstest_that("principal_components", { skip_if_not_installed("psych") set.seed(333) x <- principal_components(mtcars[, 1:7], n = "all", threshold = 0.2) expect_identical(c(ncol(x), nrow(x)), c(8L, 7L)) x <- suppressMessages(principal_components( mtcars[, 1:7], n = 2, rotation = "oblimin", threshold = "max", sort = TRUE )) expect_identical(c(ncol(x), nrow(x)), c(6L, 7L)) pca <- principal_components(mtcars[, 1:5], n = 2) expect_identical(c(ncol(pca), nrow(pca)), c(4L, 5L)) x <- summary(pca) expect_identical(c(ncol(x), nrow(x)), c(3L, 4L)) x <- model_parameters(pca) expect_identical(c(ncol(x), nrow(x)), c(5L, 2L)) x <- predict(pca) expect_identical(c(ncol(x), nrow(x)), c(2L, 32L)) }) test_that("efa-cfa", { skip_if_not_installed("psych") skip_if_not_installed("lavaan") skip_on_cran() efa <- psych::fa(attitude, nfactors = 3) params <- parameters::model_parameters(efa) expect_identical(c(nrow(params), ncol(params)), c(7L, 6L)) model1 <- efa_to_cfa(efa) model2 <- efa_to_cfa(efa, threshold = 0.3) expect_identical(nchar(model1), 109L) m1 <- suppressWarnings(lavaan::cfa(model1, data = attitude)) params <- parameters::model_parameters(m1) expect_identical(c(nrow(params), ncol(params)), c(10L, 10L)) expect_message(parameters::model_parameters(m1, ci = c(0.8, 0.9))) params <- parameters::model_parameters(m1, standardize = TRUE, component = "all") expect_identical(c(nrow(params), ncol(params)), c(20L, 10L)) x <- lavaan::anova(m1, lavaan::cfa(model2, data = attitude)) params <- parameters::model_parameters(x) expect_identical(c(nrow(params), ncol(params)), c(2L, 6L)) }) test_that("FactoMineR", { skip_if_not_installed("FactoMineR") x <- suppressWarnings(model_parameters( FactoMineR::PCA(mtcars, ncp = 3, graph = FALSE), threshold = 0.2, sort = TRUE )) expect_identical(c(ncol(x), nrow(x)), c(5L, 11L)) # x <- suppressWarnings(model_parameters(FactoMineR::FAMD(iris, ncp = 3, graph = FALSE), threshold = 0.2, sort = TRUE)) # expect_identical(c(ncol(x), nrow(x)), c(5L, 5L)) }) test_that("BayesFM", { skip_if_not_installed("BayesFM") set.seed(333) befa <- BayesFM::befa(mtcars, iter = 1000, verbose = FALSE) params <- suppressWarnings(parameters::model_parameters(befa, sort = TRUE)) expect_identical(nrow(params), 11L) }) parameters/tests/testthat/test-averaging.R0000644000176200001440000000355714721362233020477 0ustar liggesusersskip_on_cran() skip_if_not_installed("MuMIn") skip_if_not_installed("withr") skip_if_not_installed("glmmTMB") skip_if_not_installed("betareg") withr::with_options( list(na.action = "na.fail"), test_that("MuMIn link functions", { library(MuMIn) # nolint set.seed(1234) dat <- data.frame( outcome = rbinom(n = 100, size = 1, prob = 0.35), var_binom = as.factor(rbinom(n = 100, size = 1, prob = 0.2)), var_cont = rnorm(n = 100, mean = 10, sd = 7), group = sample(letters[1:4], size = 100, replace = TRUE), stringsAsFactors = FALSE ) dat$var_cont <- as.vector(scale(dat$var_cont)) m1 <- glm( outcome ~ var_binom + var_cont, data = dat, family = binomial(link = "logit") ) out <- MuMIn::model.avg(MuMIn::dredge(m1), fit = TRUE) mp <- model_parameters(out) expect_snapshot(print(mp)) }) ) test_that("ggpredict, glmmTMB averaging", { library(MuMIn) # nolint data(FoodExpenditure, package = "betareg") m <- glmmTMB::glmmTMB( I(food / income) ~ income + (1 | persons), ziformula = ~1, data = FoodExpenditure, na.action = "na.fail", family = glmmTMB::beta_family() ) set.seed(123) dr <- MuMIn::dredge(m) avg <- MuMIn::model.avg(object = dr, fit = TRUE) mp <- model_parameters(avg) expect_snapshot(print(mp)) }) withr::with_options( list(na.action = "na.fail"), test_that("ggpredict, poly averaging", { library(MuMIn) data(mtcars) mtcars$am <- factor(mtcars$am) set.seed(123) m <- lm(disp ~ mpg + I(mpg^2) + am + gear, mtcars) dr <- MuMIn::dredge(m, subset = dc(mpg, I(mpg^2))) dr <- subset(dr, !(has(mpg) & !has(I(mpg^2)))) mod.avg.i <- MuMIn::model.avg(dr, fit = TRUE) mp <- model_parameters(mod.avg.i) expect_snapshot(print(mp)) }) ) unloadNamespace("MuMIn") parameters/tests/testthat/test-printing.R0000644000176200001440000001056715053035103020355 0ustar liggesusersskip_on_cran() skip_if_not_installed("withr") skip_if(getRversion() < "4.0.0") withr::with_options( list(parameters_interaction = "*", easystats_table_width = Inf), { # Splitting model components ---- test_that("print model with multiple components", { skip_if_not_installed("glmmTMB") data("Salamanders", package = "glmmTMB") model <- glmmTMB::glmmTMB(count ~ spp + mined + (1 | site), ziformula = ~ spp + mined, family = glmmTMB::nbinom2(), data = Salamanders ) out <- model_parameters(model, exponentiate = TRUE) expect_snapshot(print(out)) expect_snapshot(print(out, split_component = FALSE)) }) # Adding model summaries ----- test_that("adding model summaries", { # summary doesn't show the R2 if performance is not installed so the # snapshot breaks between R CMD check "classic" and "strict" skip_if_not_installed("performance") model <- lm(Sepal.Length ~ Species * Petal.Length, data = iris) out <- model_parameters(model, include_info = TRUE) expect_snapshot(print(out)) }) # Group parameters ------ test_that("grouped parameters", { mtcars$cyl <- as.factor(mtcars$cyl) mtcars$gear <- as.factor(mtcars$gear) model <- lm(mpg ~ hp + gear * vs + cyl + drat, data = mtcars) # don't select "Intercept" parameter out <- model_parameters(model, drop = "^\\(Intercept") expect_snapshot( print(out, groups = list( Engine = c(5, 6, 4, 1), # c("cyl6", "cyl8", "vs", "hp"), Interactions = c(8, 9), # c("gear4:vs", "gear5:vs"), Controls = c(2, 3, 7) )) ) expect_snapshot( print(out, groups = list( Engine = c("cyl [6]", "cyl [8]", "vs", "hp"), Interactions = c("gear [4] * vs", "gear [5] * vs"), Controls = c(2, 3, 7) )) ) expect_snapshot( print(out, sep = " ", groups = list( Engine = c(5, 6, 4, 1), Interactions = c(8, 9), Controls = c(2, 3, 7) ) ) ) }) # Digits ------ test_that("digits and ci_digits", { mtcars$cyl <- as.factor(mtcars$cyl) mtcars$gear <- as.factor(mtcars$gear) model <- lm(mpg ~ hp + gear + vs + cyl + drat, data = mtcars) expect_snapshot(model_parameters(model, digits = 4)) expect_snapshot(model_parameters(model, digits = 4, ci_digits = 1)) out <- model_parameters(model) expect_snapshot(print(out, digits = 4)) expect_snapshot(print(out, digits = 4, ci_digits = 1)) }) # Table templates ------ test_that("select pattern", { mtcars$cyl <- as.factor(mtcars$cyl) mtcars$gear <- as.factor(mtcars$gear) model <- lm(mpg ~ hp + gear * vs + cyl + drat, data = mtcars) # don't select "Intercept" parameter out <- model_parameters(model, drop = "^\\(Intercept") expect_snapshot( print(out, groups = list( Engine = c(5, 6, 4, 1), Interactions = c(8, 9), Controls = c(2, 3, 7) )) ) expect_snapshot(print(out, select = "{coef} ({se})")) expect_snapshot(print(out, select = "{coef}{stars}|[{ci}]")) expect_snapshot( print(out, groups = list( Engine = c(5, 6, 4, 1), Interactions = c(8, 9), Controls = c(2, 3, 7) ), select = "{coef}{stars}|[{ci}]") ) expect_snapshot( print(out, sep = " ", groups = list( Engine = c(5, 6, 4, 1), Interactions = c(8, 9), Controls = c(2, 3, 7) ), select = "{coef}{stars}|[{ci}]" ) ) }) } ) withr::with_options( list(parameters_warning_exponentiate = TRUE), test_that("no more message about interpretation of log-resoponse", { data(mtcars) m <- lm(log(mpg) ~ gear, data = mtcars) out <- model_parameters(m, exponentiate = TRUE) expect_snapshot(print(out)) }) ) withr::with_options( list(parameters_warning_exponentiate = TRUE), test_that("no fail for mgcv-binomial", { skip_if_not_installed("mgcv") m <- mgcv::gam(vs ~ s(mpg), data = mtcars, family = "binomial") out <- model_parameters(m) expect_snapshot(print(out)) }) ) parameters/tests/testthat/test-lcmm.R0000644000176200001440000000240715066721002017451 0ustar liggesusersskip_on_cran() skip_on_os(c("mac", "linux", "solaris")) skip_if_not_installed("lcmm") skip_if_not_installed("datawizard") skip_if_not_installed("curl") skip_if_offline() skip_if_not_installed("httr2") skip_if_not_installed("withr") withr::with_options( list(parameters_warning_exponentiate = TRUE), test_that("model_parameters lcmm", { out <- tryCatch( datawizard::data_read( "https://github.com/easystats/circus/raw/refs/heads/main/data/lcmm.rda" ), error = function(e) NULL ) skip_if(is.null(out)) expect_snapshot(print(model_parameters(out$mx_linear), zap_small = TRUE)) expect_snapshot(print(model_parameters(out$mx_beta), zap_small = TRUE)) expect_snapshot(print(model_parameters(out$mx_splines), zap_small = TRUE)) expect_snapshot(print(model_parameters(out$m1_linear), zap_small = TRUE)) expect_snapshot(print(model_parameters(out$m1_beta), zap_small = TRUE)) expect_snapshot(print(model_parameters(out$m1_splines), zap_small = TRUE)) expect_snapshot(print(model_parameters(out$m2_linear), zap_small = TRUE)) expect_snapshot(print(model_parameters(out$m2_beta), zap_small = TRUE)) expect_snapshot(print(model_parameters(out$m2_splines), zap_small = TRUE)) }) ) parameters/tests/testthat/test-lme.R0000644000176200001440000000556214415527674017323 0ustar liggesusersskip_if_not_installed("nlme") skip_if_not_installed("lme4") data("sleepstudy", package = "lme4") m1_lme <- nlme::lme(Reaction ~ Days, random = ~ 1 + Days | Subject, data = sleepstudy) data("Orthodont", package = "nlme") m2_lme <- nlme::lme(distance ~ age + Sex, random = ~ 1 | Subject, data = Orthodont, method = "ML") data(iris) set.seed(1234) iris$grp <- as.factor(sample(1:3, nrow(iris), replace = TRUE)) m3_lme <- nlme::lme( fixed = Sepal.Length ~ Species * Sepal.Width + Petal.Length, random = ~ 1 | grp, data = iris ) test_that("ci", { expect_equal( ci(m1_lme)$CI_low, c(237.927995380985, 7.4146616764556), tolerance = 1e-4 ) }) test_that("ci(vcov)", { # vcov changes results ci1 <- ci(m3_lme) ci2 <- suppressMessages(ci(m3_lme, vcov = "CR3")) expect_true(all(ci1$CI_low != ci2$CI_low)) # manual computation b <- lme4::fixef(m3_lme) se <- standard_error(m3_lme, vcov = "CR3")$SE tstat <- b / se critical_t <- abs(qt(0.025, df = dof(m3_lme))) ci_lo <- b - critical_t * se ci_hi <- b + critical_t * se expect_equal(ci2$CI_low, ci_lo, tolerance = 1e-3, ignore_attr = TRUE) expect_equal(ci2$CI_high, ci_hi, tolerance = 1e-3, ignore_attr = TRUE) }) test_that("se", { expect_equal( standard_error(m1_lme)$SE, c(6.82451602451407, 1.54578275017725), tolerance = 1e-4 ) }) test_that("se: vcov", { skip_if_not_installed("clubSandwich") se1 <- standard_error(m1_lme, vcov = "CR3")$SE se2 <- sqrt(diag(as.matrix(clubSandwich::vcovCR(m1_lme, type = "CR3")))) expect_equal(se1, se2, ignore_attr = TRUE) }) test_that("p_value", { expect_equal( p_value(m1_lme)$p, c(2.38350215912719e-80, 2.26328050057813e-10), tolerance = 1e-4 ) }) test_that("p: vcov", { skip_if_not_installed("clubSandwich") skip_if_not_installed("lmtest") # default p1 <- stats::coef(summary(m3_lme))[, 5] p2 <- p_value(m3_lme)$p expect_equal(p1, p2, ignore_attr = TRUE) # manual computation p1 <- p_value(m3_lme, vcov = "CR3")$p b2 <- lme4::fixef(m3_lme) se2 <- sqrt(diag(as.matrix(clubSandwich::vcovCR(m3_lme, type = "CR3")))) t2 <- b2 / se2 # same DF used in `nlme:::summary.lme` p2 <- 2 * pt(-abs(t2), df = m3_lme$fixDF[["X"]]) expect_equal(p1, p2, ignore_attr = TRUE) }) test_that("model_parameters", { expect_equal( model_parameters(m1_lme, effects = "fixed")$Coefficient, c(251.405104848485, 10.467285959596), tolerance = 1e-4 ) }) test_that("model_parameters", { params <- model_parameters(m2_lme, effects = "fixed") expect_equal(params$Coefficient, c(17.70671, 0.66019, -2.32102), tolerance = 1e-4) expect_equal(params$SE, c(0.83155, 0.06209, 0.74307), tolerance = 1e-4) # expect_equal(params$df, c(80, 80, 25), tolerance = 1e-4) expect_equal(params$CI_low, c(16.07503, 0.53834, -3.82999), tolerance = 1e-4) }) parameters/tests/testthat/test-car.R0000644000176200001440000000150714413515226017272 0ustar liggesusersskip_if_not_installed("car") mod <- lm(mpg ~ disp + hp, mtcars) x <- car::deltaMethod(mod, "disp + hp", rhs = 0) test_that("ci", { expect_equal(ci(x)$CI_low, x$`2.5 %`, tolerance = 1e-3) }) test_that("se", { expect_equal(standard_error(x)$SE, x$SE, tolerance = 1e-3) }) test_that("p", { expect_equal(p_value(x)$p, x$`Pr(>|z|)`, tolerance = 1e-3) }) mp <- model_parameters(x) test_that("model_parameters", { expect_equal(mp$Coefficient, x$Estimate, tolerance = 1e-3) expect_equal(mp$Parameter, row.names(x), tolerance = 1e-3) }) x <- car::deltaMethod(mod, "disp + hp", rhs = 0, level = 0.8) test_that("ci", { expect_equal(ci(x)$CI_low, x$`10 %`, tolerance = 1e-3) }) mp <- model_parameters(x) test_that("model_parameters", { expect_equal(attributes(mp)$ci, 0.8, tolerance = 1e-3) }) parameters/tests/testthat/test-wrs2.R0000644000176200001440000000465515002455357017434 0ustar liggesusersskip_if_not_installed("WRS2") data(viagra, package = "WRS2") data(WineTasting, package = "WRS2") data(spider, package = "WRS2") # model_parameters.t1way --------------------------------------------------- test_that("model_parameters.t1way", { set.seed(123) df_b <- model_parameters(WRS2::t1way(libido ~ dose, data = viagra)) expect_named( df_b, c( "F", "df", "df_error", "p", "Method", "Estimate", "CI", "CI_low", "CI_high", "Effectsize" ) ) set.seed(123) df_w <- model_parameters(WRS2::rmanova(WineTasting$Taste, WineTasting$Wine, WineTasting$Taster)) expect_named(df_w, c("F", "df", "df_error", "p", "Method")) }) # model_parameters.yuen --------------------------------------------------- test_that("model_parameters.yuen", { set.seed(123) df_b <- model_parameters(WRS2::yuen(Anxiety ~ Group, data = spider)) expect_named( df_b, c( "t", "df_error", "p", "Method", "Difference", "CI", "Difference_CI_low", "Difference_CI_high", "Estimate", "Effectsize" ) ) before <- c(190, 210, 300, 240, 280, 170, 280, 250, 240, 220) after <- c(210, 210, 340, 190, 260, 180, 200, 220, 230, 200) set.seed(123) df_w <- model_parameters(WRS2::yuend(before, after)) set.seed(123) df_bt <- model_parameters(WRS2::yuenbt(Anxiety ~ Group, data = spider)) expect_named( df_bt, c( "t", "df_error", "p", "Method", "Difference", "CI", "Difference_CI_low", "Difference_CI_high" ) ) }) # model_parameters.mcp and robtab --------------------------------------- test_that("model_parameters.mcp and robtab", { set.seed(123) df_b <- model_parameters(WRS2::lincon(libido ~ dose, data = viagra)) expect_snapshot(print(df_b, table_width = Inf)) set.seed(123) df_w <- model_parameters(WRS2::rmmcp(WineTasting$Taste, WineTasting$Wine, WineTasting$Taster)) set.seed(123) df <- model_parameters(WRS2::discmcp(libido ~ dose, viagra, nboot = 100)) }) # model_parameters.akp.effect ----------------------------------------------- test_that("model_parameters.AKP", { set.seed(123) mod <- WRS2::akp.effect( formula = wt ~ am, data = mtcars, EQVAR = FALSE ) }) # model_parameters.onesampb --------------------------------------------------- test_that("model_parameters.onesampb", { set.seed(123) x <- rnorm(30) set.seed(123) mod <- WRS2::onesampb(x, nboot = 100) }) parameters/tests/testthat/test-serp.R0000644000176200001440000000222114721362233017470 0ustar liggesusersskip_if_not_installed("serp") skip_if_not_installed("withr") # make sure we have the correct interaction mark for tests withr::with_options( list(parameters_interaction = "*"), test_that("model_parameters.serp", { data(wine, package = "serp") m1 <- serp::serp( rating ~ temp * contact, slope = "penalize", link = "logit", reverse = TRUE, tuneMethod = "user", lambda = 5, data = ordinal::wine ) mp <- model_parameters(m1, verbose = FALSE) expect_snapshot(suppressMessages(print(mp))) # validate against coef out <- coef(summary(m1)) expect_equal(mp$Coefficient, out[, 1], tolerance = 1e-4, ignore_attr = TRUE) expect_equal(mp$SE, out[, 2], tolerance = 1e-4, ignore_attr = TRUE) expect_equal(mp$z, out[, 3], tolerance = 1e-4, ignore_attr = TRUE) expect_equal(mp$p, out[, 4], tolerance = 1e-4, ignore_attr = TRUE) out <- confint(m1) expect_equal(mp$CI_low, out[, 1], tolerance = 1e-4, ignore_attr = TRUE) expect_equal(degrees_of_freedom(m1), Inf, tolerance = 1e-3) expect_equal(degrees_of_freedom(m1, "residual"), 279.5938, tolerance = 1e-3) }) ) parameters/tests/testthat/test-model_parameters.cgam.R0000644000176200001440000000453414716604201022757 0ustar liggesusersskip_on_cran() test_that("model_parameters - cgam", { skip_if_not(getRversion() >= "4.0.0") skip_if_not_installed("cgam") # cgam ----------------------- data(cubic, package = "cgam") # model m_cgam <- cgam::cgam(formula = y ~ cgam::incr.conv(x), data = cubic) df_cgam <- model_parameters(m_cgam) expect_equal( df_cgam, data.frame( Parameter = "(Intercept)", Coefficient = 1.187, SE = 0.3054, CI = 0.95, CI_low = 0.569520101908619, CI_high = 1.80447989809138, t = 3.8868, df_error = 39.5, p = 4e-04, stringsAsFactors = FALSE ), tolerance = 0.01, ignore_attr = TRUE ) }) # cgamm ----------------------- test_that("model_parameters - cgamm", { skip_if_not(getRversion() >= "4.0.0") skip_if_not_installed("cgam") # setup set.seed(123) # simulate a balanced data set with 30 clusters # each cluster has 30 data points n <- 30 m <- 30 # the standard deviation of between cluster error terms is 1 # the standard deviation of within cluster error terms is 2 sige <- 1 siga <- 2 # generate a continuous predictor x <- 1:(m * n) for (i in 1:m) { x[(n * (i - 1) + 1):(n * i)] <- round(runif(n), 3) } # generate a group factor group <- trunc(0:((m * n) - 1) / n) + 1 # generate the fixed-effect term mu <- 10 * exp(10 * x - 5) / (1 + exp(10 * x - 5)) # generate the random-intercept term asscosiated with each group avals <- rnorm(m, 0, siga) # generate the response y <- 1:(m * n) for (i in 1:m) { y[group == i] <- mu[group == i] + avals[i] + rnorm(n, 0, sige) } # use REML method to fit the model ans <- cgam::cgamm(formula = y ~ cgam::s.incr(x) + (1 | group), reml = TRUE) df <- suppressWarnings(model_parameters(ans)) expect_equal( df, data.frame( Parameter = c("(Intercept)", "cgam::s.incr(x)"), Coefficient = c(5.5174, NA), SE = c(0.3631, NA), CI = c(0.95, NA), CI_low = c(4.80476838465533, NA), CI_high = c(6.23003161534467, NA), `t / F` = c(15.1954, NA), df = c(NA, 8.4), df_error = c(890.4, NA), p = c(0, 0), Component = c("conditional", "smooth_terms"), stringsAsFactors = FALSE ), tolerance = 0.01, ignore_attr = TRUE ) }) parameters/tests/testthat/test-visualisation_recipe.R0000644000176200001440000000042014716604201022735 0ustar liggesuserstest_that("vis_recipe.cluster_analysis", { data(iris) result <- cluster_analysis(iris[, 1:4], n = 4) out <- visualisation_recipe(result) expect_named(out, c("l1", "l2", "l3")) expect_s3_class(out, "visualisation_recipe") expect_snapshot(print(out)) }) parameters/tests/testthat/test-model_parameters.mclogit.R0000644000176200001440000000164215111301621023471 0ustar liggesusersskip_on_cran() skip_if_not_installed("mclogit") skip_if_not_installed("withr") skip_if_not(packageVersion("insight") > "0.19.1") withr::with_options(list(parameters_exponentiate = FALSE), { data(Transport, package = "mclogit") invisible(capture.output({ m1 <- mclogit::mclogit(cbind(resp, suburb) ~ distance + cost, data = Transport) })) data(housing, package = "MASS") invisible(capture.output({ m2 <- mclogit::mblogit(Sat ~ Infl + Type + Cont, weights = Freq, data = housing) })) test_that("model_parameters.mclogit", { params <- model_parameters(m1) expect_snapshot(params) }) test_that("model_parameters.mblogit", { params <- model_parameters(m2) expect_snapshot(params) }) skip_on_os(c("mac", "linux")) test_that("simulate_parameters.mblogit", { set.seed(1234) params <- simulate_parameters(m2) expect_snapshot(params) }) }) parameters/tests/testthat/test-glmmTMB.R0000644000176200001440000007137715111301621020024 0ustar liggesusersskip_on_cran() skip_if_not_installed("withr") skip_if_not_installed("glmmTMB") skip_if_not(getRversion() >= "4.0.0") data("fish", package = "parameters") data("Salamanders", package = "glmmTMB") withr::with_options(list(parameters_exponentiate = FALSE), { m1 <- suppressWarnings(glmmTMB::glmmTMB( count ~ child + camper + (1 | persons), ziformula = ~ child + camper + (1 | persons), data = fish, family = glmmTMB::truncated_poisson() )) m2 <- suppressWarnings(glmmTMB::glmmTMB( count ~ child + camper + (1 | persons), data = fish, family = poisson() )) m3 <- suppressWarnings(glmmTMB::glmmTMB( count ~ spp + mined + (1 | site), ziformula = ~ spp + mined, family = glmmTMB::nbinom2, data = Salamanders )) test_that("ci", { expect_equal( ci(m1)$CI_low, c(0.33067, -1.32402, 0.55037, -1.66786, 1.44667, -1.64177), tolerance = 1e-3 ) expect_equal( ci(m1, component = "cond")$CI_low, c(0.33067, -1.32402, 0.55037), tolerance = 1e-3 ) expect_equal( ci(m1, component = "zi")$CI_low, c(-1.66786, 1.44667, -1.64177), tolerance = 1e-3 ) expect_equal(ci(m2)$CI_low, c(-0.47982, -1.85096, 0.76044), tolerance = 1e-3) expect_equal( ci(m2, component = "cond")$CI_low, c(-0.47982, -1.85096, 0.76044), tolerance = 1e-3 ) expect_message(expect_null(ci(m2, component = "zi")), "no zero-inflation component") }) test_that("se", { expect_equal( standard_error(m1)$SE, c(0.47559, 0.09305, 0.09346, 0.65229, 0.3099, 0.32324), tolerance = 1e-3 ) expect_equal( standard_error(m1, effects = "random")$persons$`(Intercept)`, c(0.69856, 0.68935, 0.68749, 0.68596), tolerance = 1e-3 ) expect_equal( standard_error(m1, component = "cond")$SE, c(0.47559, 0.09305, 0.09346), tolerance = 1e-3 ) expect_equal( standard_error(m1, component = "zi")$SE, c(0.65229, 0.3099, 0.32324), tolerance = 1e-3 ) expect_equal(standard_error(m2)$SE, c(0.62127, 0.08128, 0.08915), tolerance = 1e-3) expect_equal( standard_error(m2, component = "cond")$SE, c(0.62127, 0.08128, 0.08915), tolerance = 1e-3 ) expect_message( expect_null(standard_error(m2, component = "zi")), "no zero-inflation component" ) }) test_that("p_value", { expect_equal(p_value(m1)$p, c(0.00792, 0, 0, 0.55054, 0, 0.00181), tolerance = 1e-3) expect_equal(p_value(m1, component = "cond")$p, c(0.00792, 0, 0), tolerance = 1e-3) expect_equal( p_value(m1, component = "zi")$p, c(0.55054, 0, 0.00181), tolerance = 1e-3 ) expect_equal(p_value(m2)$p, c(0.23497, 0, 0), tolerance = 1e-3) expect_equal(p_value(m2, component = "cond")$p, c(0.23497, 0, 0), tolerance = 1e-3) expect_message( expect_null(p_value(m2, component = "zi")), "no zero-inflation component" ) }) test_that("model_parameters", { expect_equal( model_parameters(m1, effects = "fixed")$Coefficient, c(1.2628, -1.14165, 0.73354, -0.38939, 2.05407, -1.00823), tolerance = 1e-3 ) expect_equal( model_parameters(m1, effects = "all")$Coefficient, c(1.2628, -1.14165, 0.73354, -0.38939, 2.05407, -1.00823, 0.9312, 1.17399), tolerance = 1e-3 ) expect_equal( model_parameters(m2, effects = "fixed")$Coefficient, c(0.73785, -1.69166, 0.93516), tolerance = 1e-3 ) expect_equal( model_parameters(m3, effects = "fixed")$Coefficient, c( -0.61038, -0.9637, 0.17068, -0.38706, 0.48795, 0.58949, -0.11327, 1.42935, 0.91004, 1.16141, -0.93932, 1.04243, -0.56231, -0.893, -2.53981, -2.56303, 1.51165 ), tolerance = 1e-2 ) expect_identical( model_parameters(m1)$Component, c( "conditional", "conditional", "conditional", "zero_inflated", "zero_inflated", "zero_inflated", "conditional", "zero_inflated" ) ) expect_null(model_parameters(m2, effects = "fixed")$Component) expect_identical( model_parameters(m2)$Component, c("conditional", "conditional", "conditional", "conditional") ) expect_identical( model_parameters(m3, effects = "fixed")$Component, c( "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "zero_inflated", "zero_inflated", "zero_inflated", "zero_inflated", "zero_inflated", "zero_inflated", "zero_inflated", "zero_inflated", "dispersion" ) ) expect_equal( model_parameters(m3, effects = "fixed")$SE, c( 0.4052, 0.6436, 0.2353, 0.3424, 0.2383, 0.2278, 0.2439, 0.3666, 0.6279, 1.3346, 0.8005, 0.714, 0.7263, 0.7535, 2.1817, 0.6045, NA ), tolerance = 1e-2 ) }) test_that("model_parameters.mixed-random", { params <- model_parameters(m1, effects = "random", group_level = TRUE) expect_identical(c(nrow(params), ncol(params)), c(8L, 10L)) expect_named( params, c( "Parameter", "Level", "Coefficient", "SE", "CI", "CI_low", "CI_high", "Component", "Effects", "Group" ) ) expect_identical( as.vector(params$Parameter), c( "(Intercept)", "(Intercept)", "(Intercept)", "(Intercept)", "(Intercept)", "(Intercept)", "(Intercept)", "(Intercept)" ) ) expect_identical( params$Component, c( "conditional", "conditional", "conditional", "conditional", "zero_inflated", "zero_inflated", "zero_inflated", "zero_inflated" ) ) expect_equal( params$Coefficient, c(-1.24, -0.3456, 0.3617, 1.2553, 1.5719, 0.3013, -0.3176, -1.5665), tolerance = 1e-2 ) }) test_that("model_parameters.mixed-random grouplevel", { params <- model_parameters(m1, effects = "grouplevel") expect_identical(c(nrow(params), ncol(params)), c(8L, 10L)) expect_named( params, c( "Parameter", "Level", "Coefficient", "SE", "CI", "CI_low", "CI_high", "Component", "Effects", "Group" ) ) expect_identical( as.vector(params$Parameter), c( "(Intercept)", "(Intercept)", "(Intercept)", "(Intercept)", "(Intercept)", "(Intercept)", "(Intercept)", "(Intercept)" ) ) expect_identical( params$Component, c( "conditional", "conditional", "conditional", "conditional", "zero_inflated", "zero_inflated", "zero_inflated", "zero_inflated" ) ) expect_equal( params$Coefficient, c(-1.24, -0.3456, 0.3617, 1.2553, 1.5719, 0.3013, -0.3176, -1.5665), tolerance = 1e-2 ) }) test_that("model_parameters.mixed-random total", { params <- model_parameters(m1, effects = "total") expect_identical(c(nrow(params), ncol(params)), c(8L, 6L)) expect_named( params, c("Group", "Level", "Parameter", "Coefficient", "Component", "Effects") ) expect_identical( as.vector(params$Parameter), c( "(Intercept)", "(Intercept)", "(Intercept)", "(Intercept)", "(Intercept)", "(Intercept)", "(Intercept)", "(Intercept)" ) ) expect_identical( params$Component, c( "conditional", "conditional", "conditional", "conditional", "zero_inflated", "zero_inflated", "zero_inflated", "zero_inflated" ) ) expect_equal( params$Coefficient, c(0.02278, 0.91717, 1.62455, 2.51811, 1.18248, -0.08808, -0.70703, -1.95584), tolerance = 1e-2 ) }) test_that("model_parameters.mixed-ran_pars", { params <- model_parameters(m1, effects = "random") expect_identical(c(nrow(params), ncol(params)), c(2L, 9L)) expect_named( params, c( "Parameter", "Coefficient", "SE", "CI", "CI_low", "CI_high", "Effects", "Group", "Component" ) ) expect_identical(params$Parameter, c("SD (Intercept)", "SD (Intercept)")) expect_identical(params$Component, c("conditional", "zero_inflated")) expect_equal(params$Coefficient, c(0.9312, 1.17399), tolerance = 1e-2) }) test_that("model_parameters.mixed-all_pars", { params <- model_parameters(m1, effects = "all") expect_identical(c(nrow(params), ncol(params)), c(8L, 12L)) expect_named( params, c( "Parameter", "Coefficient", "SE", "CI", "CI_low", "CI_high", "z", "df_error", "p", "Effects", "Group", "Component" ) ) expect_identical( params$Parameter, c( "(Intercept)", "child", "camper1", "(Intercept)", "child", "camper1", "SD (Intercept)", "SD (Intercept)" ) ) expect_identical( params$Component, c( "conditional", "conditional", "conditional", "zero_inflated", "zero_inflated", "zero_inflated", "conditional", "zero_inflated" ) ) expect_equal( params$Coefficient, c(1.2628, -1.14165, 0.73354, -0.38939, 2.05407, -1.00823, 0.9312, 1.17399), tolerance = 1e-2 ) }) test_that("model_parameters.mixed-all", { params <- model_parameters(m1, effects = "all", group_level = TRUE) expect_identical(c(nrow(params), ncol(params)), c(14L, 13L)) expect_named( params, c( "Parameter", "Level", "Coefficient", "SE", "CI", "CI_low", "CI_high", "z", "df_error", "p", "Component", "Effects", "Group" ) ) expect_identical( params$Parameter, c( "(Intercept)", "child", "camper1", "(Intercept)", "child", "camper1", "(Intercept)", "(Intercept)", "(Intercept)", "(Intercept)", "(Intercept)", "(Intercept)", "(Intercept)", "(Intercept)" ) ) expect_identical( params$Component, c( "conditional", "conditional", "conditional", "zero_inflated", "zero_inflated", "zero_inflated", "conditional", "conditional", "conditional", "conditional", "zero_inflated", "zero_inflated", "zero_inflated", "zero_inflated" ) ) expect_equal( params$Coefficient, c( 1.2628, -1.1417, 0.7335, -0.3894, 2.0541, -1.0082, -1.24, -0.3456, 0.3617, 1.2553, 1.5719, 0.3013, -0.3176, -1.5665 ), tolerance = 1e-2 ) }) data(mtcars) mdisp <- glmmTMB::glmmTMB(hp ~ 0 + wt / mpg, mtcars) test_that("model_parameters, dispersion", { mp <- model_parameters(mdisp) expect_equal(mp$Coefficient, c(59.50992, -0.80396, 48.97731), tolerance = 1e-2) expect_identical(mp$Parameter, c("wt", "wt:mpg", "(Intercept)")) expect_identical(mp$Component, c("conditional", "conditional", "dispersion")) }) mdisp <- glmmTMB::glmmTMB(hp ~ 0 + wt / mpg + (1 | gear), mtcars) test_that("model_parameters, dispersion", { mp <- model_parameters(mdisp) expect_equal( mp$Coefficient, c(58.25869, -0.87868, 47.01676, 36.99492), tolerance = 1e-2 ) expect_identical( mp$Parameter, c("wt", "wt:mpg", "SD (Intercept)", "SD (Observations)") ) expect_identical( mp$Component, c("conditional", "conditional", "conditional", "conditional") ) }) m4 <- suppressWarnings(glmmTMB::glmmTMB( count ~ child + camper + (1 + xb | persons), ziformula = ~ child + camper + (1 + zg | persons), data = fish, family = glmmTMB::truncated_poisson() )) test_that("model_parameters.mixed-ran_pars", { expect_message( { params <- model_parameters(m4, effects = "random") }, regex = "Your model may" ) expect_identical(c(nrow(params), ncol(params)), c(6L, 9L)) expect_named( params, c( "Parameter", "Coefficient", "SE", "CI", "CI_low", "CI_high", "Effects", "Group", "Component" ) ) expect_identical( params$Parameter, c( "SD (Intercept)", "SD (xb)", "Cor (Intercept~xb)", "SD (Intercept)", "SD (zg)", "Cor (Intercept~zg)" ) ) expect_identical( params$Component, c( "conditional", "conditional", "conditional", "zero_inflated", "zero_inflated", "zero_inflated" ) ) expect_equal( params$Coefficient, c(3.40563, 1.21316, -1, 2.73583, 1.56833, 1), tolerance = 1e-2 ) }) # exponentiate for dispersion = sigma parameters ----------------------- set.seed(101) ## rbeta() function parameterized by mean and shape my_rbeta <- function(n, mu, shape0) { rbeta(n, shape1 = mu * shape0, shape2 = (1 - mu) * shape0) } n <- 100 ng <- 10 dd <- data.frame(x = rnorm(n), f = factor(rep(1:(n / ng), ng))) dd <- transform(dd, y = my_rbeta(n, mu = plogis(-1 + 2 * x + rnorm(ng)[f]), shape0 = 5)) m_exp <- glmmTMB::glmmTMB(y ~ x + (1 | f), family = glmmTMB::beta_family(), dd) test_that("model_parameters, exp, glmmTMB", { mp1 <- model_parameters(m_exp, exponentiate = TRUE) mp2 <- model_parameters(m_exp, exponentiate = FALSE) expect_equal(mp1$Coefficient, c(0.49271, 6.75824, 5.56294, 1.14541), tolerance = 1e-3) expect_equal(mp1$Coefficient[3:4], mp2$Coefficient[3:4], tolerance = 1e-3) }) test_that("model_parameters, no dispersion, glmmTMB", { mp1 <- model_parameters( m_exp, effects = "fixed", component = "conditional", exponentiate = TRUE ) mp2 <- model_parameters( m_exp, effects = "fixed", component = "conditional", exponentiate = FALSE ) expect_equal( mp1$Coefficient, unname(exp(unlist(glmmTMB::fixef(m_exp)$cond))), tolerance = 1e-3 ) expect_equal( mp2$Coefficient, unname(unlist(glmmTMB::fixef(m_exp)$cond)), tolerance = 1e-3 ) }) # proper printing --------------------- test_that("print-model_parameters glmmTMB", { skip_on_os(c("mac", "linux", "solaris")) skip_if_not(getRversion() >= "4.3.3") mp <- model_parameters(m4, effects = "fixed", component = "conditional") out <- utils::capture.output(print(mp)) expect_snapshot(out[-5]) mp <- model_parameters( m4, ci_random = TRUE, effects = "random", component = "conditional", verbose = FALSE ) out <- utils::capture.output(print(mp)) expect_identical( attributes(mp)$pretty_labels, c( `SD (Intercept)` = "SD (Intercept)", `SD (xb)` = "SD (xb)", `Cor (Intercept~xb)` = "Cor (Intercept~xb)" ) ) expect_identical( substr(out, 1, 30), c( "# Random Effects", "", "Parameter | ", "------------------------------", "SD (Intercept: persons) | ", "SD (xb: persons) | ", "Cor (Intercept~xb: persons) | " ) ) expect_equal(mp$Coefficient, c(3.40563, 1.21316, -1), tolerance = 1e-3) expect_equal(mp$CI_low, c(1.64567, 0.5919, -1), tolerance = 1e-3) mp <- model_parameters( m4, ci_random = TRUE, effects = "fixed", component = "zero_inflated" ) out <- utils::capture.output(print(mp)) expect_identical( attributes(mp)$pretty_labels, c(`(Intercept)` = "(Intercept)", child = "child", camper1 = "camper [1]") ) expect_identical( substr(out, 1, 12), c( "# Fixed Effe", "", "Parameter ", "------------", "(Intercept) ", "child ", "camper [1] " ) ) expect_equal(mp$Coefficient, c(1.88964, 0.15712, -0.17007), tolerance = 1e-3) expect_equal(mp$CI_low, c(0.5878, -0.78781, -0.92836), tolerance = 1e-3) mp <- model_parameters( m4, ci_random = TRUE, effects = "random", component = "zero_inflated", verbose = FALSE ) out <- utils::capture.output(print(mp)) expect_identical( attributes(mp)$pretty_labels, c( `SD (Intercept)` = "SD (Intercept)", `SD (zg)` = "SD (zg)", `Cor (Intercept~zg)` = "Cor (Intercept~zg)" ) ) expect_identical( substr(out, 1, 30), c( "# Random Effects (Zero-Inflati", "", "Parameter | ", "------------------------------", "SD (Intercept: persons) | ", "SD (zg: persons) | ", "Cor (Intercept~zg: persons) | " ) ) expect_equal(mp$Coefficient, c(2.73583, 1.56833, 1), tolerance = 1e-3) expect_equal(mp$CI_low, c(1.16329, 0.64246, -1), tolerance = 1e-3) mp <- model_parameters( m4, ci_random = TRUE, effects = "all", component = "conditional", verbose = FALSE ) out <- utils::capture.output(print(mp)) expect_identical( attributes(mp)$pretty_labels, c( `(Intercept)` = "(Intercept)", child = "child", camper1 = "camper [1]", `SD (Intercept)` = "SD (Intercept)", `SD (xb)` = "SD (xb)", `Cor (Intercept~xb)` = "Cor (Intercept~xb)" ) ) expect_identical( substr(out, 1, 30), c( "# Fixed Effects", "", "Parameter | Log-Mean | SE ", "------------------------------", "(Intercept) | 2.55 | 0.25 ", "child | -1.09 | 0.10 ", "camper [1] | 0.27 | 0.10 ", "", "# Random Effects", "", "Parameter | ", "------------------------------", "SD (Intercept: persons) | ", "SD (xb: persons) | ", "Cor (Intercept~xb: persons) | " ) ) expect_equal( mp$Coefficient, c(2.54713, -1.08747, 0.2723, 3.40563, 1.21316, -1), tolerance = 1e-3 ) expect_equal( mp$CI_low, c(2.06032, -1.27967, 0.07461, 1.64567, 0.5919, -1), tolerance = 1e-3 ) mp <- model_parameters( m4, effects = "all", ci_random = TRUE, component = "zero_inflated", verbose = FALSE ) out <- utils::capture.output(print(mp)) expect_identical( attributes(mp)$pretty_labels, c( `(Intercept)` = "(Intercept)", child = "child", camper1 = "camper [1]", `SD (Intercept)` = "SD (Intercept)", `SD (zg)` = "SD (zg)", `Cor (Intercept~zg)` = "Cor (Intercept~zg)" ) ) expect_identical( substr(out, 1, 30), c( "# Fixed Effects (Zero-Inflatio", "", "Parameter | Log-Mean | SE ", "------------------------------", "(Intercept) | 1.89 | 0.66 ", "child | 0.16 | 0.48 ", "camper [1] | -0.17 | 0.39 ", "", "# Random Effects (Zero-Inflati", "", "Parameter | ", "------------------------------", "SD (Intercept: persons) | ", "SD (zg: persons) | ", "Cor (Intercept~zg: persons) | " ) ) expect_equal( mp$Coefficient, c(1.88964, 0.15712, -0.17007, 2.73583, 1.56833, 1), tolerance = 1e-3 ) expect_equal( mp$CI_low, c(0.5878, -0.78781, -0.92836, 1.16329, 0.64246, -1), tolerance = 1e-3 ) mp <- model_parameters( m4, effects = "all", component = "all", ci_random = TRUE, verbose = FALSE ) out <- utils::capture.output(print(mp)) expect_identical( attributes(mp)$pretty_labels, c( `(Intercept)` = "(Intercept)", child = "child", camper1 = "camper [1]", `(Intercept)` = "(Intercept)", child = "child", camper1 = "camper1", # nolint `SD (Intercept)` = "SD (Intercept)", `SD (xb)` = "SD (xb)", `Cor (Intercept~xb)` = "Cor (Intercept~xb)", `SD (Intercept)` = "SD (Intercept)", `SD (zg)` = "SD (zg)", # nolint `Cor (Intercept~zg)` = "Cor (Intercept~zg)" ) ) expect_identical( substr(out, 1, 30), c( "# Fixed Effects (Count Model)", "", "Parameter | Log-Mean | SE ", "------------------------------", "(Intercept) | 2.55 | 0.25 ", "child | -1.09 | 0.10 ", "camper [1] | 0.27 | 0.10 ", "", "# Fixed Effects (Zero-Inflatio", "", "Parameter | Log-Odds | SE ", "------------------------------", "(Intercept) | 1.89 | 0.66 ", "child | 0.16 | 0.48 ", "camper [1] | -0.17 | 0.39 ", "", "# Random Effects Variances", "", "Parameter | ", "------------------------------", "SD (Intercept: persons) | ", "SD (xb: persons) | ", "Cor (Intercept~xb: persons) | ", "", "# Random Effects (Zero-Inflati", "", "Parameter | ", "------------------------------", "SD (Intercept: persons) | ", "SD (zg: persons) | ", "Cor (Intercept~zg: persons) | " ) ) expect_equal( mp$Coefficient, c( 2.54713, -1.08747, 0.2723, 1.88964, 0.15712, -0.17007, 3.40563, 1.21316, -1, 2.73583, 1.56833, 1 ), tolerance = 1e-3 ) expect_equal( mp$CI_low, c( 2.06032, -1.27967, 0.07461, 0.5878, -0.78781, -0.92836, 1.64567, 0.5919, -1, 1.16329, 0.64246, -1 ), tolerance = 1e-3 ) }) # proper printing of digits --------------------- test_that("print-model_parameters glmmTMB digits", { skip_on_os(c("mac", "linux", "solaris")) skip_if_not(getRversion() >= "4.3.3") mp <- model_parameters(m4, ci_random = TRUE, effects = "all", component = "all") out <- utils::capture.output(print(mp, digits = 4, ci_digits = 5)) expect_snapshot(out[-c(5, 14)]) mp <- model_parameters( m4, effects = "all", component = "all", ci_random = TRUE, digits = 4, ci_digits = 5 ) out <- utils::capture.output(print(mp)) expect_snapshot(out[-c(5, 14)]) }) # proper alignment of CIs --------------------- test_that("print-model_parameters glmmTMB CI alignment", { skip_if_not_installed("curl") skip_if_offline() skip_on_os(c("mac", "linux", "solaris")) skip_if_not(getRversion() >= "4.3.3") model_pr <- tryCatch( { load(url( "https://github.com/d-morrison/parameters/raw/glmmTMB/data/pressure_durations.RData" )) glmmTMB::glmmTMB( formula = n_samples ~ Surface + Side + Jaw + (1 | Participant / Session), ziformula = ~ Surface + Side + Jaw + (1 | Participant / Session), dispformula = ~1, family = glmmTMB::nbinom2(), data = pressure_durations ) }, error = function(e) { NULL } ) mp <- model_parameters( model_pr, effects = "random", component = "all", ci_random = TRUE ) expect_snapshot(print(mp)) mp <- model_parameters(model_pr, effects = "fixed", component = "all") expect_snapshot(print(mp)) }) test_that("model_parameters.mixed-all", { skip_on_os(c("mac", "linux", "solaris")) skip_if_not(getRversion() >= "4.3.3") params <- model_parameters(m4, effects = "all") expect_identical(c(nrow(params), ncol(params)), c(12L, 12L)) expect_identical( colnames(params), c( "Parameter", "Coefficient", "SE", "CI", "CI_low", "CI_high", "z", "df_error", "p", "Effects", "Group", "Component" ) ) expect_identical( params$Parameter, c( "(Intercept)", "child", "camper1", "(Intercept)", "child", "camper1", "SD (Intercept)", "SD (xb)", "Cor (Intercept~xb)", "SD (Intercept)", "SD (zg)", "Cor (Intercept~zg)" ) ) expect_identical( params$Component, c( "conditional", "conditional", "conditional", "zero_inflated", "zero_inflated", "zero_inflated", "conditional", "conditional", "conditional", "zero_inflated", "zero_inflated", "zero_inflated" ) ) expect_equal( params$Coefficient, c( 2.54713, -1.08747, 0.2723, 1.88964, 0.15712, -0.17007, 3.40563, 1.21316, -1, 2.73583, 1.56833, 1 ), tolerance = 1e-2 ) }) test_that("print-model_parameters", { skip_on_os(c("mac", "linux", "solaris")) skip_if_not(getRversion() >= "4.3.3") mp <- model_parameters(m1, effects = "fixed", verbose = FALSE) expect_snapshot(mp) mp <- model_parameters(m1, effects = "fixed", exponentiate = TRUE, verbose = FALSE) expect_snapshot(mp) mp <- model_parameters(m1, effects = "all", verbose = FALSE) expect_snapshot(mp) }) test_that("print-model_parameters, random dispersion", { data(Salamanders, package = "glmmTMB") m <- glmmTMB::glmmTMB( count ~ spp + cover + mined + (1 | site), ziformula = ~ spp + mined, dispformula = ~ DOY + (1 | site), data = Salamanders, family = glmmTMB::nbinom1() ) out <- as.data.frame(model_parameters(m, effects = "fixed", component = "all")) expect_identical(nrow(out), 19L) out <- as.data.frame(model_parameters(m, effects = "random", component = "all")) expect_identical(nrow(out), 1L) out <- as.data.frame(model_parameters( m, effects = "random", component = "all", group_level = TRUE )) expect_identical(nrow(out), 46L) expect_equal( out$Coefficient, unlist(glmmTMB::ranef(m)), ignore_attr = TRUE, tolerance = 1e-4 ) }) test_that("robust SE/VCOV", { skip_if_not_installed("sandwich") skip_if(packageVersion("insight") <= "1.4.0") out1 <- standard_error(m1) out2 <- sqrt(diag(insight::get_varcov(m1, component = "all"))) expect_equal(out1$SE, out2[1:6], ignore_attr = TRUE, tolerance = 1e-4) out1 <- standard_error(m1, vcov = "HC0") out2 <- sqrt(diag(insight::get_varcov(m1, vcov = "HC0", component = "all"))) expect_equal(out1$SE, out2[1:6], ignore_attr = TRUE, tolerance = 1e-4) out <- model_parameters(m1, vcov = "HC0") expect_snapshot(print(out, table_width = Inf)) }) }) parameters/tests/testthat/test-ivreg.R0000644000176200001440000000232214413515226017635 0ustar liggesusersskip_if_not_installed("AER") data(CigarettesSW, package = "AER") CigarettesSW$rprice <- with(CigarettesSW, price / cpi) CigarettesSW$rincome <- with(CigarettesSW, income / population / cpi) CigarettesSW$tdiff <- with(CigarettesSW, (taxs - tax) / cpi) m1 <- AER::ivreg( log(packs) ~ log(rprice) + log(rincome) | log(rincome) + tdiff + I(tax / cpi), data = CigarettesSW, subset = year == "1995" ) test_that("ci", { expect_equal( ci(m1, method = "normal")$CI_low, c(7.82022, -1.79328, -0.18717), tolerance = 1e-4 ) }) test_that("se", { expect_equal( standard_error(m1)$SE, c(1.05856, 0.2632, 0.23857), tolerance = 1e-4 ) }) test_that("p_value", { expect_equal( p_value(m1)$p, c(0, 1e-05, 0.24602), tolerance = 1e-4 ) }) test_that("model_parameters", { expect_equal( model_parameters(m1)$Coefficient, c(9.89496, -1.27742, 0.2804), tolerance = 1e-4 ) }) test_that("print-model_parameters", { skip_if_not_installed("withr") withr::local_options( list( parameters_exponentiate = TRUE, parameters_warning_exponentiate = TRUE ) ) tmp <- model_parameters(m1) expect_snapshot(tmp) }) parameters/tests/testthat/test-lavaan.R0000644000176200001440000000364314413515226017772 0ustar liggesusersskip_if_not_installed("lavaan") data(PoliticalDemocracy, package = "lavaan") model <- " # measurement model ind60 =~ x1 + x2 + x3 dem60 =~ y1 + y2 + y3 + y4 dem65 =~ y5 + y6 + y7 + y8 # regressions dem60 ~ ind60 dem65 ~ ind60 + dem60 # residual correlations y1 ~~ y5 y2 ~~ y4 + y6 y3 ~~ y7 y4 ~~ y8 y6 ~~ y8 " m <- lavaan::sem(model, data = PoliticalDemocracy, test = "Satorra-Bentler") test_that("unstandardized", { mp <- model_parameters(m, eta_squared = "raw") ml <- lavaan::parameterEstimates(m, se = TRUE) ml <- ml[(ml$lhs != ml$rhs) & (ml$op != "~1"), ] expect_equal(mp$Coefficient, ml$est, tolerance = 1e-3) expect_equal(mp$SE, ml$se, tolerance = 1e-3) }) test_that("standardized", { mp <- model_parameters(m, standardize = TRUE) ml <- lavaan::standardizedSolution(m, type = "std.all", se = TRUE) ml <- ml[(ml$lhs != ml$rhs) & (ml$op != "~1"), ] expect_equal(mp$Coefficient, ml$est, tolerance = 1e-3) expect_equal(mp$SE, ml$se, tolerance = 1e-3) }) test_that("standardized-lv", { mp <- model_parameters(m, standardize = "latent") ml <- lavaan::standardizedSolution(m, type = "std.lv", se = TRUE) ml <- ml[(ml$lhs != ml$rhs) & (ml$op != "~1"), ] expect_equal(mp$Coefficient, ml$est, tolerance = 1e-3) expect_equal(mp$SE, ml$se, tolerance = 1e-3) }) test_that("standardized-nox", { mp <- model_parameters(m, standardize = "no_exogenous") ml <- lavaan::standardizedSolution(m, type = "std.nox", se = TRUE) ml <- ml[(ml$lhs != ml$rhs) & (ml$op != "~1"), ] expect_equal(mp$Coefficient, ml$est, tolerance = 1e-3) expect_equal(mp$SE, ml$se, tolerance = 1e-3) }) test_that("standardized no CI", { mod <- lavaan::cfa("ind60 =~ x1 + x2 + x3", data = PoliticalDemocracy) p <- parameters(mod, standardize = "all", ci = NULL) expect_s3_class(p, "parameters_sem") }) parameters/tests/testthat/test-weightit.R0000644000176200001440000000240115057525051020345 0ustar liggesusersskip_on_os("mac") skip_if_not_installed("WeightIt", minimum_version = "1.2.0") skip_if_not_installed("cobalt") skip_if_not_installed("insight") test_that("weightit, multinom", { data("lalonde", package = "cobalt") set.seed(1234) # Logistic regression ATT weights w.out <- WeightIt::weightit( treat ~ age + educ + married + re74, data = lalonde, method = "glm", estimand = "ATT" ) lalonde$re78_3 <- factor(findInterval(lalonde$re78, c(0, 5e3, 1e4))) fit4 <- WeightIt::multinom_weightit( re78_3 ~ treat + age + educ, data = lalonde, weightit = w.out ) expect_snapshot(print(model_parameters(fit4, exponentiate = TRUE), zap_small = TRUE)) }) test_that("weightit, ordinal", { data("lalonde", package = "cobalt") set.seed(1234) # Logistic regression ATT weights w.out <- WeightIt::weightit( treat ~ age + educ + married + re74, data = lalonde, method = "glm", estimand = "ATT" ) lalonde$re78_3 <- factor(findInterval(lalonde$re78, c(0, 5e3, 1e4))) fit5 <- WeightIt::ordinal_weightit( ordered(re78_3) ~ treat + age + educ, data = lalonde, weightit = w.out ) expect_snapshot(print(model_parameters(fit5, exponentiate = TRUE), zap_small = TRUE)) }) parameters/tests/testthat/test-format_parameters.R0000644000176200001440000004624014716604201022241 0ustar liggesusersskip_if_not_installed("splines") # make sure we have the correct interaction mark for tests withr::with_options( list(parameters_interaction = "*", easystats_table_width = Inf), { # define here because messes up the expected output bs <- splines::bs ns <- splines::ns set.seed(123) iris$cat <- sample(LETTERS[1:4], nrow(iris), replace = TRUE) test_that("format_parameters-1", { model <- lm(Sepal.Length ~ Species * Sepal.Width, data = iris) fp <- format_parameters(model) expect_identical(fp, c( `(Intercept)` = "(Intercept)", Speciesversicolor = "Species [versicolor]", Speciesvirginica = "Species [virginica]", Sepal.Width = "Sepal Width", `Speciesversicolor:Sepal.Width` = "Species [versicolor] * Sepal Width", `Speciesvirginica:Sepal.Width` = "Species [virginica] * Sepal Width" )) }) test_that("format_parameters-2", { model <- lm(Sepal.Length ~ Sepal.Width * Species, data = iris) fp <- format_parameters(model) expect_identical(fp, c( `(Intercept)` = "(Intercept)", Sepal.Width = "Sepal Width", Speciesversicolor = "Species [versicolor]", Speciesvirginica = "Species [virginica]", `Sepal.Width:Speciesversicolor` = "Sepal Width * Species [versicolor]", `Sepal.Width:Speciesvirginica` = "Sepal Width * Species [virginica]" )) }) test_that("format_parameters-3", { model <- lm(Sepal.Length ~ Species * Sepal.Width * Petal.Length, data = iris) fp <- format_parameters(model) expect_identical(fp, c( `(Intercept)` = "(Intercept)", Speciesversicolor = "Species [versicolor]", Speciesvirginica = "Species [virginica]", Sepal.Width = "Sepal Width", Petal.Length = "Petal Length", `Speciesversicolor:Sepal.Width` = "Species [versicolor] * Sepal Width", `Speciesvirginica:Sepal.Width` = "Species [virginica] * Sepal Width", `Speciesversicolor:Petal.Length` = "Species [versicolor] * Petal Length", `Speciesvirginica:Petal.Length` = "Species [virginica] * Petal Length", `Sepal.Width:Petal.Length` = "Sepal Width * Petal Length", `Speciesversicolor:Sepal.Width:Petal.Length` = "(Species [versicolor] * Sepal Width) * Petal Length", `Speciesvirginica:Sepal.Width:Petal.Length` = "(Species [virginica] * Sepal Width) * Petal Length" )) }) test_that("format_parameters-4", { model <- lm(Sepal.Length ~ Species * cat * Petal.Length, data = iris) fp <- format_parameters(model) expect_identical(fp, c( `(Intercept)` = "(Intercept)", Speciesversicolor = "Species [versicolor]", Speciesvirginica = "Species [virginica]", catB = "cat [B]", catC = "cat [C]", catD = "cat [D]", Petal.Length = "Petal Length", `Speciesversicolor:catB` = "Species [versicolor] * cat [B]", `Speciesvirginica:catB` = "Species [virginica] * cat [B]", `Speciesversicolor:catC` = "Species [versicolor] * cat [C]", `Speciesvirginica:catC` = "Species [virginica] * cat [C]", `Speciesversicolor:catD` = "Species [versicolor] * cat [D]", `Speciesvirginica:catD` = "Species [virginica] * cat [D]", `Speciesversicolor:Petal.Length` = "Species [versicolor] * Petal Length", `Speciesvirginica:Petal.Length` = "Species [virginica] * Petal Length", `catB:Petal.Length` = "cat [B] * Petal Length", `catC:Petal.Length` = "cat [C] * Petal Length", `catD:Petal.Length` = "cat [D] * Petal Length", `Speciesversicolor:catB:Petal.Length` = "(Species [versicolor] * cat [B]) * Petal Length", `Speciesvirginica:catB:Petal.Length` = "(Species [virginica] * cat [B]) * Petal Length", `Speciesversicolor:catC:Petal.Length` = "(Species [versicolor] * cat [C]) * Petal Length", `Speciesvirginica:catC:Petal.Length` = "(Species [virginica] * cat [C]) * Petal Length", `Speciesversicolor:catD:Petal.Length` = "(Species [versicolor] * cat [D]) * Petal Length", `Speciesvirginica:catD:Petal.Length` = "(Species [virginica] * cat [D]) * Petal Length" )) }) test_that("format_parameters-5", { model <- lm(Sepal.Length ~ Species / Petal.Length, data = iris) fp <- format_parameters(model) expect_identical(fp, c( `(Intercept)` = "(Intercept)", Speciesversicolor = "Species [versicolor]", Speciesvirginica = "Species [virginica]", `Speciessetosa:Petal.Length` = "Species [setosa] * Petal Length", `Speciesversicolor:Petal.Length` = "Species [versicolor] * Petal Length", `Speciesvirginica:Petal.Length` = "Species [virginica] * Petal Length" )) }) test_that("format_parameters-6", { model <- lm(Sepal.Length ~ Petal.Length + (Species / Sepal.Width), data = iris) fp <- format_parameters(model) expect_identical(fp, c( `(Intercept)` = "(Intercept)", Petal.Length = "Petal Length", Speciesversicolor = "Species [versicolor]", Speciesvirginica = "Species [virginica]", `Speciessetosa:Sepal.Width` = "Species [setosa] * Sepal Width", `Speciesversicolor:Sepal.Width` = "Species [versicolor] * Sepal Width", `Speciesvirginica:Sepal.Width` = "Species [virginica] * Sepal Width" )) }) test_that("format_parameters-7", { model <- lm(Sepal.Length ~ Species / Petal.Length * Sepal.Width, data = iris) fp <- format_parameters(model) expect_identical(fp, c( `(Intercept)` = "(Intercept)", Speciesversicolor = "Species [versicolor]", Speciesvirginica = "Species [virginica]", Sepal.Width = "Sepal Width", `Speciessetosa:Petal.Length` = "Species [setosa] * Petal Length", `Speciesversicolor:Petal.Length` = "Species [versicolor] * Petal Length", `Speciesvirginica:Petal.Length` = "Species [virginica] * Petal Length", `Speciesversicolor:Sepal.Width` = "Species [versicolor] * Sepal Width", `Speciesvirginica:Sepal.Width` = "Species [virginica] * Sepal Width", `Speciessetosa:Petal.Length:Sepal.Width` = "Species [setosa] * Petal Length * Sepal Width", `Speciesversicolor:Petal.Length:Sepal.Width` = "Species [versicolor] * Petal Length * Sepal Width", `Speciesvirginica:Petal.Length:Sepal.Width` = "Species [virginica] * Petal Length * Sepal Width" )) }) test_that("format_parameters-8", { model <- lm(Sepal.Length ~ Species / (Petal.Length * Sepal.Width), data = iris) fp <- format_parameters(model) expect_identical(fp, c( `(Intercept)` = "(Intercept)", Speciesversicolor = "Species [versicolor]", Speciesvirginica = "Species [virginica]", `Speciessetosa:Petal.Length` = "Species [setosa] * Petal Length", `Speciesversicolor:Petal.Length` = "Species [versicolor] * Petal Length", `Speciesvirginica:Petal.Length` = "Species [virginica] * Petal Length", `Speciessetosa:Sepal.Width` = "Species [setosa] * Sepal Width", `Speciesversicolor:Sepal.Width` = "Species [versicolor] * Sepal Width", `Speciesvirginica:Sepal.Width` = "Species [virginica] * Sepal Width", `Speciessetosa:Petal.Length:Sepal.Width` = "Species [setosa] * Petal Length * Sepal Width", `Speciesversicolor:Petal.Length:Sepal.Width` = "Species [versicolor] * Petal Length * Sepal Width", `Speciesvirginica:Petal.Length:Sepal.Width` = "Species [virginica] * Petal Length * Sepal Width" )) }) test_that("format_parameters-9", { model <- lm(Sepal.Length ~ Petal.Length + (Species / (Sepal.Width * Petal.Width)), data = iris) fp <- format_parameters(model) expect_identical(fp, c( `(Intercept)` = "(Intercept)", Petal.Length = "Petal Length", Speciesversicolor = "Species [versicolor]", Speciesvirginica = "Species [virginica]", `Speciessetosa:Sepal.Width` = "Species [setosa] * Sepal Width", `Speciesversicolor:Sepal.Width` = "Species [versicolor] * Sepal Width", `Speciesvirginica:Sepal.Width` = "Species [virginica] * Sepal Width", `Speciessetosa:Petal.Width` = "Species [setosa] * Petal Width", `Speciesversicolor:Petal.Width` = "Species [versicolor] * Petal Width", `Speciesvirginica:Petal.Width` = "Species [virginica] * Petal Width", `Speciessetosa:Sepal.Width:Petal.Width` = "Species [setosa] * Sepal Width * Petal Width", `Speciesversicolor:Sepal.Width:Petal.Width` = "Species [versicolor] * Sepal Width * Petal Width", `Speciesvirginica:Sepal.Width:Petal.Width` = "Species [virginica] * Sepal Width * Petal Width" )) }) test_that("format_parameters-10", { model <- lm(Sepal.Length ~ Species + poly(Sepal.Width, 2), data = iris) fp <- format_parameters(model) expect_identical(fp, c( `(Intercept)` = "(Intercept)", Speciesversicolor = "Species [versicolor]", Speciesvirginica = "Species [virginica]", `poly(Sepal.Width, 2)1` = "Sepal Width [1st degree]", `poly(Sepal.Width, 2)2` = "Sepal Width [2nd degree]" )) }) test_that("format_parameters-11", { model <- lm(Sepal.Length ~ Species + poly(Sepal.Width, 2, raw = TRUE), data = iris) fp <- format_parameters(model) expect_identical(fp, c( `(Intercept)` = "(Intercept)", Speciesversicolor = "Species [versicolor]", Speciesvirginica = "Species [virginica]", `poly(Sepal.Width, 2, raw = TRUE)1` = "Sepal Width [1st degree]", `poly(Sepal.Width, 2, raw = TRUE)2` = "Sepal Width [2nd degree]" )) }) test_that("format_parameters-12", { model <- lm(Sepal.Length ~ Petal.Length * bs(Petal.Width), data = iris) fp <- format_parameters(model) expect_identical(fp, c( `(Intercept)` = "(Intercept)", Petal.Length = "Petal Length", `bs(Petal.Width)1` = "Petal Width [1st degree]", `bs(Petal.Width)2` = "Petal Width [2nd degree]", `bs(Petal.Width)3` = "Petal Width [3rd degree]", `Petal.Length:bs(Petal.Width)1` = "Petal Length * Petal Width [1st degree]", `Petal.Length:bs(Petal.Width)2` = "Petal Length * Petal Width [2nd degree]", `Petal.Length:bs(Petal.Width)3` = "Petal Length * Petal Width [3rd degree]" )) }) test_that("format_parameters-13", { model <- lm(Sepal.Length ~ Petal.Length * bs(Petal.Width, degree = 4), data = iris) fp <- format_parameters(model) expect_identical(fp, c( `(Intercept)` = "(Intercept)", Petal.Length = "Petal Length", `bs(Petal.Width, degree = 4)1` = "Petal Width [1st degree]", `bs(Petal.Width, degree = 4)2` = "Petal Width [2nd degree]", `bs(Petal.Width, degree = 4)3` = "Petal Width [3rd degree]", `bs(Petal.Width, degree = 4)4` = "Petal Width [4th degree]", `Petal.Length:bs(Petal.Width, degree = 4)1` = "Petal Length * Petal Width [1st degree]", `Petal.Length:bs(Petal.Width, degree = 4)2` = "Petal Length * Petal Width [2nd degree]", `Petal.Length:bs(Petal.Width, degree = 4)3` = "Petal Length * Petal Width [3rd degree]", `Petal.Length:bs(Petal.Width, degree = 4)4` = "Petal Length * Petal Width [4th degree]" )) }) test_that("format_parameters-14", { model <- lm(Sepal.Length ~ Petal.Length * ns(Petal.Width, df = 3), data = iris) fp <- format_parameters(model) expect_identical(fp, c( `(Intercept)` = "(Intercept)", Petal.Length = "Petal Length", `ns(Petal.Width, df = 3)1` = "Petal Width [1st degree]", `ns(Petal.Width, df = 3)2` = "Petal Width [2nd degree]", `ns(Petal.Width, df = 3)3` = "Petal Width [3rd degree]", `Petal.Length:ns(Petal.Width, df = 3)1` = "Petal Length * Petal Width [1st degree]", `Petal.Length:ns(Petal.Width, df = 3)2` = "Petal Length * Petal Width [2nd degree]", `Petal.Length:ns(Petal.Width, df = 3)3` = "Petal Length * Petal Width [3rd degree]" )) }) test_that("format_parameters-15", { model <- lm(Sepal.Length ~ Petal.Length * I(Petal.Width^2), data = iris) fp <- format_parameters(model) expect_identical(fp, c( `(Intercept)` = "(Intercept)", Petal.Length = "Petal Length", `I(Petal.Width^2)` = "Petal Width^2", `Petal.Length:I(Petal.Width^2)` = "Petal Length * Petal Width^2" )) }) test_that("format_parameters-16", { model <- lm(Sepal.Length ~ Petal.Length * as.factor(Species), data = iris) fp <- format_parameters(model) expect_identical(fp, c( `(Intercept)` = "(Intercept)", Petal.Length = "Petal Length", `as.factor(Species)versicolor` = "Species [versicolor]", `as.factor(Species)virginica` = "Species [virginica]", `Petal.Length:as.factor(Species)versicolor` = "Petal Length * Species [versicolor]", `Petal.Length:as.factor(Species)virginica` = "Petal Length * Species [virginica]" )) }) test_that("format_parameters-17", { skip_if_not_installed("pscl") data("bioChemists", package = "pscl") model <- pscl::zeroinfl(art ~ fem + mar + kid5 + ment | kid5 + phd, data = bioChemists) fp <- format_parameters(model) expect_identical(fp, c( `count_(Intercept)` = "(Intercept)", count_femWomen = "fem [Women]", count_marMarried = "mar [Married]", count_kid5 = "kid5", count_ment = "ment", `zero_(Intercept)` = "(Intercept)", zero_kid5 = "kid5", zero_phd = "phd" )) }) test_that("format_parameters-18", { data(iris) levels(iris$Species) <- c("Species verti", "No Specieses", "Yes (Species)") model <- lm(Sepal.Length ~ Species * Petal.Width, data = iris) fp <- format_parameters(model) expect_identical(fp, c( `(Intercept)` = "(Intercept)", `SpeciesNo Specieses` = "Species [No Specieses]", `SpeciesYes (Species)` = "Species [Yes (Species)]", Petal.Width = "Petal Width", `SpeciesNo Specieses:Petal.Width` = "Species [No Specieses] * Petal Width", `SpeciesYes (Species):Petal.Width` = "Species [Yes (Species)] * Petal Width" )) }) test_that("format_parameters-19", { data(mtcars) m1 <- lm(mpg ~ qsec:wt + wt:drat, data = mtcars) m2 <- lm(mpg ~ qsec:wt + wt / drat, data = mtcars) m3 <- lm(mpg ~ qsec:wt + wt:drat + wt, data = mtcars) m4 <- lm(mpg ~ qsec:wt + wt / drat + wt, data = mtcars) m5 <- lm(mpg ~ qsec * wt + wt:drat + wt, data = mtcars) m6 <- lm(mpg ~ wt + qsec + wt:qsec, data = mtcars) expect_identical( format_parameters(m1), c(`(Intercept)` = "(Intercept)", `qsec:wt` = "qsec * wt", `wt:drat` = "wt * drat") ) expect_identical( format_parameters(m2), c(`(Intercept)` = "(Intercept)", wt = "wt", `qsec:wt` = "qsec * wt", `wt:drat` = "wt * drat") ) expect_identical( format_parameters(m3), c(`(Intercept)` = "(Intercept)", wt = "wt", `qsec:wt` = "qsec * wt", `wt:drat` = "wt * drat") ) expect_identical( format_parameters(m4), c(`(Intercept)` = "(Intercept)", wt = "wt", `qsec:wt` = "qsec * wt", `wt:drat` = "wt * drat") ) expect_identical( format_parameters(m5), c(`(Intercept)` = "(Intercept)", qsec = "qsec", wt = "wt", `qsec:wt` = "qsec * wt", `wt:drat` = "wt * drat") ) expect_identical( format_parameters(m6), c(`(Intercept)` = "(Intercept)", wt = "wt", qsec = "qsec", `wt:qsec` = "wt * qsec") ) }) test_that("format_parameters-20", { data(iris) levels(iris$Species) <- c("Yes (Species)", "Species.verti", "No_Specieses") model <- lm(Sepal.Length ~ Species + Petal.Width, data = iris) fp <- format_parameters(model) expect_identical(fp, c( `(Intercept)` = "(Intercept)", SpeciesSpecies.verti = "Species [Species.verti]", SpeciesNo_Specieses = "Species [No_Specieses]", Petal.Width = "Petal Width" )) }) test_that("format_parameters-labelled data-1", { data(efc, package = "datawizard", envir = globalenv()) m <- lm(neg_c_7 ~ e42dep + c172code, data = efc) mp <- model_parameters(m, verbose = FALSE) out <- utils::capture.output(print(mp, pretty_names = "labels")) expect_identical( insight::trim_ws(unlist(strsplit(out[4], "|", fixed = TRUE))[1]), "elder's dependency [slightly dependent]" ) expect_identical( insight::trim_ws(unlist(strsplit(out[5], "|", fixed = TRUE))[1]), "elder's dependency [moderately dependent]" ) out <- utils::capture.output(print(mp)) expect_identical( insight::trim_ws(unlist(strsplit(out[4], "|", fixed = TRUE))[1]), "e42dep [2]" ) expect_identical( insight::trim_ws(unlist(strsplit(out[5], "|", fixed = TRUE))[1]), "e42dep [3]" ) }) test_that("format_parameters-labelled data-2", { data(iris) m <- lm(Sepal.Width ~ Species + Sepal.Length, data = iris) mp <- model_parameters(m, verbose = FALSE) out <- utils::capture.output(print(mp, pretty_names = "labels")) expect_identical( insight::trim_ws(unlist(strsplit(out[4], "|", fixed = TRUE))[1]), "Species [versicolor]" ) expect_identical( insight::trim_ws(unlist(strsplit(out[5], "|", fixed = TRUE))[1]), "Species [virginica]" ) out <- utils::capture.output(print(mp)) expect_identical( insight::trim_ws(unlist(strsplit(out[4], "|", fixed = TRUE))[1]), "Species [versicolor]" ) expect_identical( insight::trim_ws(unlist(strsplit(out[5], "|", fixed = TRUE))[1]), "Species [virginica]" ) }) test_that("format_parameters-labelled data-3", { data(efc, package = "datawizard", envir = globalenv()) m <- lm(neg_c_7 ~ e42dep * c12hour, data = efc) mp <- model_parameters(m, verbose = FALSE) out <- utils::capture.output(print(mp, pretty_names = "labels")) expect_identical( insight::trim_ws(unlist(strsplit(out[4], "|", fixed = TRUE))[1]), "elder's dependency [slightly dependent]" ) expect_identical( insight::trim_ws(unlist(strsplit(out[8], "|", fixed = TRUE))[1]), "elder's dependency [slightly dependent] * average number of hours of care per week" ) out <- utils::capture.output(print(mp)) expect_identical( insight::trim_ws(unlist(strsplit(out[4], "|", fixed = TRUE))[1]), "e42dep [2]" ) expect_identical( insight::trim_ws(unlist(strsplit(out[8], "|", fixed = TRUE))[1]), "e42dep [2] * c12hour" ) }) test_that("format_parameters, cut", { data(mtcars) mtcars$grp <- cut(mtcars$mpg, breaks = c(0, 15, 20, 50)) out <- model_parameters(lm(wt ~ grp, data = mtcars)) expect_equal( attributes(out)$pretty_names, c( `(Intercept)` = "(Intercept)", `grp(15,20]` = "grp [>15-20]", `grp(20,50]` = "grp [>20-50]" ), ignore_attr = TRUE ) expect_identical(out$Parameter, c("(Intercept)", "grp(15,20]", "grp(20,50]")) }) } ) parameters/tests/testthat/test-GLMMadaptive.R0000644000176200001440000001016414736731407021007 0ustar liggesusersskip_on_cran() skip_if_not_installed("lme4") skip_if_not_installed("GLMMadaptive") skip_if_not_installed("glmmTMB") data("fish") data("cbpp", package = "lme4") m1 <- GLMMadaptive::mixed_model( count ~ child + camper, random = ~ 1 | persons, zi_fixed = ~ child + livebait, data = fish, family = GLMMadaptive::zi.poisson() ) m2 <- GLMMadaptive::mixed_model( cbind(incidence, size - incidence) ~ period, random = ~ 1 | herd, data = cbpp, family = binomial ) test_that("ci", { expect_equal( ci(m1)$CI_low, c(0.08708, -1.35715, 0.58599, -0.99993, 0.75543, -2.1166), tolerance = 1e-3 ) expect_equal( ci(m1, component = "cond")$CI_low, c(0.08708, -1.35715, 0.58599), tolerance = 1e-3 ) expect_equal( ci(m1, component = "zi")$CI_low, c(-0.99993, 0.75543, -2.1166), tolerance = 1e-3 ) expect_equal( ci(m2)$CI_low, c(-1.8572, -1.59265, -1.76827, -2.41754), tolerance = 1e-3 ) expect_equal( ci(m2, component = "cond")$CI_low, c(-1.8572, -1.59265, -1.76827, -2.41754), tolerance = 1e-3 ) expect_null(suppressMessages(ci(m2, component = "zi"))) }) test_that("se", { expect_equal( standard_error(m1)$SE, c(0.540016, 0.094847, 0.09356, 0.468122, 0.29416, 0.507634), tolerance = 1e-3 ) expect_equal( standard_error(m1, component = "cond")$SE, c(0.540016, 0.094847, 0.09356), tolerance = 1e-3 ) expect_equal( standard_error(m1, component = "zi")$SE, c(0.468122, 0.29416, 0.507634), tolerance = 1e-3 ) expect_equal( standard_error(m2)$SE, c(0.233543, 0.306776, 0.326777, 0.427606), tolerance = 1e-3 ) expect_equal( standard_error(m2, component = "cond")$SE, c(0.233543, 0.306776, 0.326777, 0.427606), tolerance = 1e-3 ) expect_null(suppressMessages(standard_error(m2, component = "zi"))) }) test_that("p_value", { expect_equal( p_value(m1)$p, c(0.0339, 0, 0, 0.86023, 1e-05, 0.02713), tolerance = 1e-3 ) expect_equal( p_value(m1, component = "cond")$p, c(0.0339, 0, 0), tolerance = 1e-3 ) expect_equal( p_value(m1, component = "zi")$p, c(0.86023, 1e-05, 0.02713), tolerance = 1e-3 ) expect_equal( p_value(m2)$p, c(0, 0.00123, 0.00056, 0.00022), tolerance = 1e-3 ) expect_equal( p_value(m2, component = "cond")$p, c(0, 0.00123, 0.00056, 0.00022), tolerance = 1e-3 ) expect_null(suppressMessages(p_value(m2, component = "zi"))) }) test_that("model_parameters", { expect_equal( model_parameters(m1, effects = "fixed")$Coefficient, c(1.14549, -1.17125, 0.76937, -0.08243, 1.33197, -1.12165), tolerance = 1e-3 ) expect_equal( model_parameters(m2, effects = "fixed")$Coefficient, c(-1.39946, -0.99138, -1.1278, -1.57945), tolerance = 1e-3 ) }) test_that("model_parameters.mixed-ran_pars", { skip_on_os(c("mac", "linux", "solaris")) skip_if_not_installed("glmmTMB") data("Salamanders", package = "glmmTMB") model <- GLMMadaptive::mixed_model( count ~ spp + mined, random = ~ DOY | site, zi_fixed = ~ spp + mined, zi_random = ~ DOP | site, family = GLMMadaptive::zi.negative.binomial(), data = Salamanders, control = list(nAGQ = 1) ) params <- model_parameters(model, effects = "random") expect_identical(c(nrow(params), ncol(params)), c(7L, 9L)) expect_identical( colnames(params), c("Parameter", "Coefficient", "SE", "CI", "CI_low", "CI_high", "Effects", "Group", "Component") ) expect_identical( params$Parameter, c( "SD (Intercept)", "SD (DOY)", "Cor (Intercept~DOY)", "SD (Observations)", "SD (Intercept)", "SD (DOP)", "Cor (Intercept~DOP)" ) ) expect_identical( params$Component, c( "conditional", "conditional", "conditional", "conditional", "zero_inflated", "zero_inflated", "zero_inflated" ) ) expect_equal( params$Coefficient, c(0.56552, 0.29951, 0.06307, 1.61936, 1.02233, 0.38209, -0.17162), tolerance = 1e-2 ) }) parameters/tests/testthat/test-tobit.R0000644000176200001440000000146414413515226017650 0ustar liggesusersskip_if_not_installed("AER") data("Affairs", package = "AER") m1 <- AER::tobit( affairs ~ age + yearsmarried + religiousness + occupation + rating, data = Affairs ) test_that("ci", { expect_equal( ci(m1)$CI_low, c(2.80106, -0.33435, 0.29049, -2.47756, -0.17261, -3.0843), tolerance = 1e-4 ) }) test_that("se", { expect_equal( standard_error(m1)$SE, c(2.74145, 0.07909, 0.13452, 0.40375, 0.25442, 0.40783), tolerance = 1e-4 ) }) test_that("p_value", { expect_equal( p_value(m1)$p, c(0.00287, 0.02337, 4e-05, 3e-05, 0.20001, 0), tolerance = 1e-4 ) }) test_that("model_parameters", { expect_equal( model_parameters(m1)$Coefficient, c(8.1742, -0.17933, 0.55414, -1.68622, 0.32605, -2.28497), tolerance = 1e-4 ) }) parameters/tests/testthat/test-robust.R0000644000176200001440000003356415111301621020037 0ustar liggesusersskip_on_cran() skip_if_not_installed("sandwich") # standard errors ------------------------------------- test_that("robust-se glm warn with profile-CI", { mglm <- glm(mpg ~ wt, data = mtcars) expect_message(ci(mglm, vcov = "HC3"), regex = "available") expect_message( model_parameters(mglm, vcov = "HC3", ci_method = "profile"), regex = "modifies" ) }) # standard errors ------------------------------------- test_that("robust-se lm", { m <- lm(Petal.Length ~ Sepal.Length * Species, data = iris) se1 <- standard_error(m, vcov = "HC") se2 <- sqrt(diag(sandwich::vcovHC(m))) expect_equal(se1$SE, se2, tolerance = 1e-4, ignore_attr = TRUE) }) test_that("robust-se polr", { skip_if_not_installed("MASS") data(housing, package = "MASS") m <- MASS::polr(Sat ~ Infl + Type + Cont, weights = Freq, data = housing) se1 <- standard_error(m, vcov = "vcovCL") se2 <- sqrt(diag(sandwich::vcovCL(m))) expect_equal(se1$SE, se2, tolerance = 1e-4, ignore_attr = TRUE) se1 <- standard_error(m, vcov = "vcovOPG") se2 <- sqrt(diag(sandwich::vcovOPG(m))) expect_equal(se1$SE, se2, tolerance = 1e-4, ignore_attr = TRUE) }) test_that("robust-se zeroinfl", { skip_if_not_installed("pscl") skip_if_not_installed("clubSandwich") data("bioChemists", package = "pscl") m <- pscl::zeroinfl(art ~ fem + mar + kid5 + ment | kid5 + phd, data = bioChemists) se1 <- standard_error(m, vcov = "vcovCL") se2 <- sqrt(diag(sandwich::vcovCL(m))) expect_equal(se1$SE, se2, tolerance = 1e-4, ignore_attr = TRUE) se1 <- standard_error(m, vcov = "vcovOPG") se2 <- sqrt(diag(sandwich::vcovOPG(m))) expect_equal(se1$SE, se2, tolerance = 1e-4, ignore_attr = TRUE) }) test_that("robust-se ivreg", { skip_if_not_installed("AER") skip_if_not_installed("clubSandwich") data(CigarettesSW, package = "AER") CigarettesSW$rprice <- with(CigarettesSW, price / cpi) CigarettesSW$rincome <- with(CigarettesSW, income / population / cpi) CigarettesSW$tdiff <- with(CigarettesSW, (taxs - tax) / cpi) m <- AER::ivreg( log(packs) ~ log(rprice) + log(rincome) | log(rincome) + tdiff + I(tax / cpi), data = CigarettesSW, subset = year == "1995" ) se1 <- standard_error(m, vcov = "vcovCL") se2 <- sqrt(diag(sandwich::vcovCL(m))) expect_equal(se1$SE, se2, tolerance = 1e-4, ignore_attr = TRUE) se1 <- standard_error(m, vcov = "vcovOPG") se2 <- sqrt(diag(sandwich::vcovOPG(m))) expect_equal(se1$SE, se2, tolerance = 1e-4, ignore_attr = TRUE) }) test_that("robust-se survival", { skip_if_not_installed("survival") set.seed(123) m <- survival::survreg( formula = survival::Surv(futime, fustat) ~ ecog.ps + rx, data = survival::ovarian, dist = "logistic" ) se1 <- standard_error(m, vcov = "vcovOPG") se2 <- sqrt(diag(sandwich::vcovOPG(m))) expect_equal(se1$SE, se2, tolerance = 1e-4, ignore_attr = TRUE) }) # p-values ------------------------------------- test_that("robust-p lm", { m <- lm(Petal.Length ~ Sepal.Length * Species, data = iris) p1 <- p_value(m, vcov = "HC") # robust p manually se <- sqrt(diag(sandwich::vcovHC(m))) dof <- degrees_of_freedom(m, method = "wald", verbose = FALSE) stat <- coef(m) / se p2 <- 2 * pt(abs(stat), df = dof, lower.tail = FALSE) expect_equal(p1$p, p2, tolerance = 1e-4, ignore_attr = TRUE) }) test_that("robust-p polr", { skip_if_not_installed("MASS") data(housing, package = "MASS") m <- MASS::polr(Sat ~ Infl + Type + Cont, weights = Freq, data = housing) p1 <- p_value(m, vcov = "vcovCL") # robust p manually se <- sqrt(diag(sandwich::vcovCL(m))) dof <- degrees_of_freedom(m, method = "wald", verbose = FALSE) stat <- c(m$coefficients, m$zeta) / se p2 <- 2 * pt(abs(stat), df = dof, lower.tail = FALSE) expect_equal(p1$p, p2, tolerance = 1e-4, ignore_attr = TRUE) p1 <- p_value(m, vcov = "vcovOPG") # robust p manually se <- sqrt(diag(sandwich::vcovOPG(m))) dof <- degrees_of_freedom(m, method = "wald", verbose = FALSE) stat <- c(m$coefficients, m$zeta) / se p2 <- 2 * pt(abs(stat), df = dof, lower.tail = FALSE) expect_equal(p1$p, p2, tolerance = 1e-4, ignore_attr = TRUE) }) test_that("robust-p ivreg", { skip_if_not_installed("AER") data(CigarettesSW, package = "AER") CigarettesSW$rprice <- with(CigarettesSW, price / cpi) CigarettesSW$rincome <- with(CigarettesSW, income / population / cpi) CigarettesSW$tdiff <- with(CigarettesSW, (taxs - tax) / cpi) m <- AER::ivreg( log(packs) ~ log(rprice) + log(rincome) | log(rincome) + tdiff + I(tax / cpi), data = CigarettesSW, subset = year == "1995" ) p1 <- p_value(m, vcov = "vcovCL") # robust p manually se <- sqrt(diag(sandwich::vcovCL(m))) dof <- degrees_of_freedom(m, method = "wald", verbose = FALSE) stat <- coef(m) / se p2 <- 2 * pt(abs(stat), df = dof, lower.tail = FALSE) expect_equal(p1$p, p2, tolerance = 1e-4, ignore_attr = TRUE) p1 <- p_value(m, vcov = "vcovOPG") # robust p manually se <- sqrt(diag(sandwich::vcovOPG(m))) dof <- degrees_of_freedom(m, method = "wald", verbose = FALSE) stat <- coef(m) / se p2 <- 2 * pt(abs(stat), df = dof, lower.tail = FALSE) expect_equal(p1$p, p2, tolerance = 1e-4, ignore_attr = TRUE) }) test_that("robust-p zeroinfl", { skip_if_not_installed("pscl") data("bioChemists", package = "pscl") m <- pscl::zeroinfl(art ~ fem + mar + kid5 + ment | kid5 + phd, data = bioChemists) p1 <- p_value(m, vcov = "vcovCL") # robust p manually se <- sqrt(diag(sandwich::vcovCL(m))) dof <- degrees_of_freedom(m, method = "wald", verbose = FALSE) stat <- coef(m) / se p2 <- 2 * pt(abs(stat), df = dof, lower.tail = FALSE) expect_equal(p1$p, p2, tolerance = 1e-4, ignore_attr = TRUE) p1 <- p_value(m, vcov = "vcovOPG") # robust p manually se <- sqrt(diag(sandwich::vcovOPG(m))) dof <- degrees_of_freedom(m, method = "wald", verbose = FALSE) stat <- coef(m) / se p2 <- 2 * pt(abs(stat), df = dof, lower.tail = FALSE) expect_equal(p1$p, p2, tolerance = 1e-4, ignore_attr = TRUE) }) test_that("robust-p survival", { skip_if_not_installed("survival") set.seed(123) m <- survival::survreg( formula = survival::Surv(futime, fustat) ~ ecog.ps + rx, data = survival::ovarian, dist = "logistic" ) p1 <- p_value(m, vcov = "vcovOPG") # robust p manually se <- sqrt(diag(sandwich::vcovOPG(m))) dof <- degrees_of_freedom(m, method = "wald", verbose = FALSE) stat <- insight::get_parameters(m)$Estimate / se p2 <- 2 * pt(abs(stat), df = dof, lower.tail = FALSE) expect_equal(p1$p, p2, tolerance = 1e-4, ignore_attr = TRUE) }) # CI ------------------------------------- test_that("robust-ci lm", { data(iris) m <- lm(Petal.Length ~ Sepal.Length * Species, data = iris) ci1 <- ci(m, vcov = "HC") # robust CI manually params <- insight::get_parameters(m) se <- sqrt(diag(sandwich::vcovHC(m))) dof <- degrees_of_freedom(m, method = "wald", verbose = FALSE) fac <- suppressWarnings(stats::qt(0.975, df = dof)) ci2 <- as.data.frame(cbind( CI_low = params$Estimate - se * fac, CI_high = params$Estimate + se * fac )) expect_equal(ci1$CI_low, ci2$CI_low, tolerance = 1e-4, ignore_attr = TRUE) expect_equal(ci1$CI_high, ci2$CI_high, tolerance = 1e-4, ignore_attr = TRUE) }) test_that("robust-ci polr", { skip_if_not_installed("MASS") data(housing, package = "MASS") m <- MASS::polr(Sat ~ Infl + Type + Cont, weights = Freq, data = housing) ci1 <- ci(m, vcov = "vcovCL") # robust CI manually se <- sqrt(diag(sandwich::vcovCL(m))) dof <- degrees_of_freedom(m, method = "wald", verbose = FALSE) fac <- suppressWarnings(stats::qt(0.975, df = dof)) ci2 <- as.data.frame(cbind( CI_low = c(m$coefficients, m$zeta) - se * fac, CI_high = c(m$coefficients, m$zeta) + se * fac )) expect_equal(ci1$CI_low, ci2$CI_low, tolerance = 1e-4, ignore_attr = TRUE) expect_equal(ci1$CI_high, ci2$CI_high, tolerance = 1e-4, ignore_attr = TRUE) ci1 <- ci(m, vcov = "vcovOPG") # robust CI manually se <- sqrt(diag(sandwich::vcovOPG(m))) dof <- degrees_of_freedom(m, method = "wald", verbose = FALSE) fac <- suppressWarnings(stats::qt(0.975, df = dof)) ci2 <- as.data.frame(cbind( CI_low = c(m$coefficients, m$zeta) - se * fac, CI_high = c(m$coefficients, m$zeta) + se * fac )) expect_equal(ci1$CI_low, ci2$CI_low, tolerance = 1e-4, ignore_attr = TRUE) expect_equal(ci1$CI_high, ci2$CI_high, tolerance = 1e-4, ignore_attr = TRUE) }) test_that("robust-ci ivreg", { skip_if_not_installed("AER") data(CigarettesSW, package = "AER") CigarettesSW$rprice <- with(CigarettesSW, price / cpi) CigarettesSW$rincome <- with(CigarettesSW, income / population / cpi) CigarettesSW$tdiff <- with(CigarettesSW, (taxs - tax) / cpi) m <- AER::ivreg( log(packs) ~ log(rprice) + log(rincome) | log(rincome) + tdiff + I(tax / cpi), data = CigarettesSW, subset = year == "1995" ) ci1 <- ci(m, vcov = "vcovCL") # robust CI manually se <- sqrt(diag(sandwich::vcovCL(m))) dof <- degrees_of_freedom(m, method = "wald", verbose = FALSE) fac <- suppressWarnings(stats::qt(0.975, df = dof)) ci2 <- as.data.frame(cbind(CI_low = coef(m) - se * fac, CI_high = coef(m) + se * fac)) expect_equal(ci1$CI_low, ci2$CI_low, tolerance = 1e-4, ignore_attr = TRUE) expect_equal(ci1$CI_high, ci2$CI_high, tolerance = 1e-4, ignore_attr = TRUE) ci1 <- ci(m, vcov = "vcovOPG") # robust CI manually se <- sqrt(diag(sandwich::vcovOPG(m))) dof <- degrees_of_freedom(m, method = "wald", verbose = FALSE) fac <- suppressWarnings(stats::qt(0.975, df = dof)) ci2 <- as.data.frame(cbind(CI_low = coef(m) - se * fac, CI_high = coef(m) + se * fac)) expect_equal(ci1$CI_low, ci2$CI_low, tolerance = 1e-4, ignore_attr = TRUE) expect_equal(ci1$CI_high, ci2$CI_high, tolerance = 1e-4, ignore_attr = TRUE) }) test_that("robust-ci zeroinfl", { skip_if_not_installed("pscl") data("bioChemists", package = "pscl") m <- pscl::zeroinfl(art ~ fem + mar + kid5 + ment | kid5 + phd, data = bioChemists) ci1 <- ci(m, vcov = "vcovCL") # robust CI manually se <- sqrt(diag(sandwich::vcovCL(m))) dof <- degrees_of_freedom(m, method = "wald", verbose = FALSE) fac <- suppressWarnings(stats::qt(0.975, df = dof)) ci2 <- as.data.frame(cbind(CI_low = coef(m) - se * fac, CI_high = coef(m) + se * fac)) expect_equal(ci1$CI_low, ci2$CI_low, tolerance = 1e-4, ignore_attr = TRUE) expect_equal(ci1$CI_high, ci2$CI_high, tolerance = 1e-4, ignore_attr = TRUE) ci1 <- ci(m, vcov = "vcovOPG") # robust CI manually se <- sqrt(diag(sandwich::vcovOPG(m))) dof <- degrees_of_freedom(m, method = "wald", verbose = FALSE) fac <- suppressWarnings(stats::qt(0.975, df = dof)) ci2 <- as.data.frame(cbind(CI_low = coef(m) - se * fac, CI_high = coef(m) + se * fac)) expect_equal(ci1$CI_low, ci2$CI_low, tolerance = 1e-4, ignore_attr = TRUE) expect_equal(ci1$CI_high, ci2$CI_high, tolerance = 1e-4, ignore_attr = TRUE) }) test_that("robust-ci survival", { skip_if_not_installed("survival") set.seed(123) m <- survival::survreg( formula = survival::Surv(futime, fustat) ~ ecog.ps + rx, data = survival::ovarian, dist = "logistic" ) ci1 <- ci(m, vcov = "vcovOPG") # robust CI manually se <- sqrt(diag(sandwich::vcovOPG(m))) dof <- degrees_of_freedom(m, method = "wald", verbose = FALSE) fac <- suppressWarnings(stats::qt(0.975, df = dof)) ci2 <- as.data.frame(cbind( CI_low = insight::get_parameters(m)$Estimate - se * fac, CI_high = insight::get_parameters(m)$Estimate + se * fac )) expect_equal(ci1$CI_low, ci2$CI_low, tolerance = 1e-4, ignore_attr = TRUE) expect_equal(ci1$CI_high, ci2$CI_high, tolerance = 1e-4, ignore_attr = TRUE) }) # mixed models ---------------------- skip_if_not_installed("clubSandwich") skip_if_not_installed("lme4") test_that("robust-se lmer", { data(iris) set.seed(1234) iris$grp <- as.factor(sample.int(3, nrow(iris), replace = TRUE)) m <- lme4::lmer( Sepal.Length ~ Species * Sepal.Width + Petal.Length + (1 | grp), data = iris ) se1 <- standard_error( m, vcov = "vcovCR", vcov_args = list(type = "CR1", cluster = iris$grp) ) se2 <- sqrt(diag(clubSandwich::vcovCR(m, type = "CR1", cluster = iris$grp))) expect_equal(se1$SE, se2, tolerance = 1e-4, ignore_attr = TRUE) }) test_that("robust-p lmer", { data(iris) set.seed(1234) iris$grp <- as.factor(sample.int(3, nrow(iris), replace = TRUE)) m <- lme4::lmer( Sepal.Length ~ Species * Sepal.Width + Petal.Length + (1 | grp), data = iris ) p1 <- p_value(m, vcov = "vcovCR", vcov_args = list(type = "CR1", cluster = iris$grp)) se <- sqrt(diag(clubSandwich::vcovCR(m, type = "CR1", cluster = iris$grp))) dof <- degrees_of_freedom(m, method = "wald", verbose = FALSE) stat <- lme4::fixef(m) / se p2 <- 2 * pt(abs(stat), df = dof, lower.tail = FALSE) expect_equal(p1$p, p2, tolerance = 1e-4, ignore_attr = TRUE) }) test_that("robust-ci lmer", { data(iris) set.seed(1234) iris$grp <- as.factor(sample.int(3, nrow(iris), replace = TRUE)) m <- lme4::lmer( Sepal.Length ~ Species * Sepal.Width + Petal.Length + (1 | grp), data = iris ) ci1 <- ci(m, vcov = "vcovCR", vcov_args = list(type = "CR1", cluster = iris$grp)) # robust CI manually params <- insight::get_parameters(m) se <- sqrt(diag(clubSandwich::vcovCR(m, type = "CR1", cluster = iris$grp))) dof <- degrees_of_freedom(m, method = "wald", verbose = FALSE) fac <- suppressWarnings(stats::qt(0.975, df = dof)) ci2 <- as.data.frame(cbind( CI_low = params$Estimate - se * fac, CI_high = params$Estimate + se * fac )) expect_equal(ci1$CI_low, ci2$CI_low, tolerance = 1e-4, ignore_attr = TRUE) expect_equal(ci1$CI_high, ci2$CI_high, tolerance = 1e-4, ignore_attr = TRUE) }) parameters/tests/testthat/test-model_parameters.blmerMod.R0000644000176200001440000000223114716604201023601 0ustar liggesusersskip_if_not_installed("blme") skip_if_not_installed("lme4") data(sleepstudy, package = "lme4") set.seed(123) model <- blme::blmer(Reaction ~ Days + (1 + Days | Subject), data = sleepstudy, cov.prior = NULL) test_that("model_parameters.blmerMod", { params <- model_parameters(model, effects = "fixed") expect_equal(params$SE, c(6.8246, 1.54579), tolerance = 1e-3) expect_named( params, c("Parameter", "Coefficient", "SE", "CI", "CI_low", "CI_high", "t", "df_error", "p", "Effects") ) }) test_that("model_parameters.blmerMod-all", { skip_if_not_installed("merDeriv") params <- model_parameters(model, effects = "all") expect_equal(params$SE, c(6.8246, 1.54579, 5.83626, 1.24804, 0.31859, 1.50801), tolerance = 1e-3) expect_equal(params$Coefficient, c(251.4051, 10.46729, 24.74066, 5.92214, 0.06555, 25.5918), tolerance = 1e-3) expect_named( params, c("Parameter", "Coefficient", "SE", "CI", "CI_low", "CI_high", "t", "df_error", "p", "Effects", "Group") ) expect_identical( params$Parameter, c("(Intercept)", "Days", "SD (Intercept)", "SD (Days)", "Cor (Intercept~Days)", "SD (Observations)") ) }) parameters/tests/testthat/test-random_effects_ci-glmmTMB.R0000644000176200001440000002606715111301621023450 0ustar liggesusers# Test Setup -------------------------------- skip_on_cran() skip_on_os(c("mac", "linux", "solaris")) skip_if_not_installed("glmmTMB", minimum_version = "1.1.12") skip_if_not_installed("lme4") # tests -------------------------------- ## TODO also check messages for profiled CI data(sleepstudy, package = "lme4") data(cake, package = "lme4") set.seed(123) sleepstudy$Months <- sample.int(4, nrow(sleepstudy), TRUE) set.seed(123) m1 <- suppressWarnings(glmmTMB::glmmTMB( angle ~ temperature + (temperature | recipe) + (temperature | replicate), data = cake )) m2 <- glmmTMB::glmmTMB(Reaction ~ Days + (Days | Subject), data = sleepstudy) m3 <- suppressWarnings(glmmTMB::glmmTMB( angle ~ temperature + (temperature | recipe), data = cake )) m4 <- suppressWarnings(glmmTMB::glmmTMB( angle ~ temperature + (temperature | replicate), data = cake )) m5 <- suppressWarnings(glmmTMB::glmmTMB( Reaction ~ Days + (Days + Months | Subject), data = sleepstudy )) set.seed(123) expect_message( { mp1 <- model_parameters(m1, ci_random = TRUE) }, "singularity" ) mp2 <- model_parameters(m2, ci_random = TRUE) # works expect_message( { mp3 <- model_parameters(m3, ci_random = TRUE) }, "singularity" ) # no SE/CI mp4 <- model_parameters(m4, ci_random = TRUE) expect_message( { mp5 <- model_parameters(m5, ci_random = TRUE) }, "singularity" ) # no SE/CI test_that("random effects CIs, two slopes, categorical", { ## FIXME: Results differ across R versions, no idea why... # expect_equal( # mp1$CI_low, # c( # 28.91277, 5.03129, -1.87302, -2.42033, -3.2702, -2.57721, 0.2157, # 4.20738, NaN, NaN, 0.26244, 0.34083, 0.02479, 0.66487, 0.40589, # 0.15295, 0.01405, 0.62939, -0.99996, -0.41209, NaN, NaN, NaN, # -0.40223, NaN, NaN, NaN, NaN, NA, NA, NA, NA, NA, NA, NA, NA, # NA, NaN, NA, NA, NA, NA, NA, NA, NA, NA, NA, NaN, 4.12596 # ), # tolerance = 1e-2, # ignore_attr = TRUE # ) expect_identical( mp1$Parameter, c( "(Intercept)", "temperature.L", "temperature.Q", "temperature.C", "temperature^4", "temperature^5", "SD (Intercept)", "SD (Intercept)", "SD (temperature.L)", "SD (temperature.Q)", "SD (temperature.C)", "SD (temperature^4)", "SD (temperature^5)", "SD (temperature.L)", "SD (temperature.Q)", "SD (temperature.C)", "SD (temperature^4)", "SD (temperature^5)", "Cor (Intercept~temperature.L)", "Cor (Intercept~temperature.Q)", "Cor (Intercept~temperature.C)", "Cor (Intercept~temperature^4)", "Cor (Intercept~temperature^5)", "Cor (Intercept~temperature.L)", "Cor (Intercept~temperature.Q)", "Cor (Intercept~temperature.C)", "Cor (Intercept~temperature^4)", "Cor (Intercept~temperature^5)", "Cor (temperature.L~temperature.Q)", "Cor (temperature.L~temperature.C)", "Cor (temperature.L~temperature^4)", "Cor (temperature.L~temperature^5)", "Cor (temperature.Q~temperature.C)", "Cor (temperature.Q~temperature^4)", "Cor (temperature.Q~temperature^5)", "Cor (temperature.C~temperature^4)", "Cor (temperature.C~temperature^5)", "Cor (temperature^4~temperature^5)", "Cor (temperature.L~temperature.Q)", "Cor (temperature.L~temperature.C)", "Cor (temperature.L~temperature^4)", "Cor (temperature.L~temperature^5)", "Cor (temperature.Q~temperature.C)", "Cor (temperature.Q~temperature^4)", "Cor (temperature.Q~temperature^5)", "Cor (temperature.C~temperature^4)", "Cor (temperature.C~temperature^5)", "Cor (temperature^4~temperature^5)", "SD (Observations)" ) ) expect_identical( mp1$Group, c( "", "", "", "", "", "", "recipe", "replicate", "recipe", "recipe", "recipe", "recipe", "recipe", "replicate", "replicate", "replicate", "replicate", "replicate", "recipe", "recipe", "recipe", "recipe", "recipe", "replicate", "replicate", "replicate", "replicate", "replicate", "recipe", "recipe", "recipe", "recipe", "recipe", "recipe", "recipe", "recipe", "recipe", "recipe", "replicate", "replicate", "replicate", "replicate", "replicate", "replicate", "replicate", "replicate", "replicate", "replicate", "Residual" ) ) }) test_that("random effects CIs, simple slope", { expect_equal( mp2$CI_low, c(238.40611, 7.52295, 15.01709, 3.80546, -0.48781, 22.80047), tolerance = 1e-2, ignore_attr = TRUE ) expect_identical( mp2$Parameter, c( "(Intercept)", "Days", "SD (Intercept)", "SD (Days)", "Cor (Intercept~Days)", "SD (Observations)" ) ) }) test_that("random effects CIs, categorical slope-1", { ## FIXME: Results differ across R versions, no idea why... # expect_equal( # mp3$CI_low, # c( # 31.20278, 4.35879, -2.63767, -2.80041, -3.54983, -3.16627, # 0, 0, NaN, NaN, 0, 0, -1, NaN, NaN, NaN, NaN, NA, NA, NA, NA, # NA, NA, NA, NA, NA, NaN, 7.08478 # ), # tolerance = 1e-2, # ignore_attr = TRUE # ) expect_identical( mp3$Parameter, c( "(Intercept)", "temperature.L", "temperature.Q", "temperature.C", "temperature^4", "temperature^5", "SD (Intercept)", "SD (temperature.L)", "SD (temperature.Q)", "SD (temperature.C)", "SD (temperature^4)", "SD (temperature^5)", "Cor (Intercept~temperature.L)", "Cor (Intercept~temperature.Q)", "Cor (Intercept~temperature.C)", "Cor (Intercept~temperature^4)", "Cor (Intercept~temperature^5)", "Cor (temperature.L~temperature.Q)", "Cor (temperature.L~temperature.C)", "Cor (temperature.L~temperature^4)", "Cor (temperature.L~temperature^5)", "Cor (temperature.Q~temperature.C)", "Cor (temperature.Q~temperature^4)", "Cor (temperature.Q~temperature^5)", "Cor (temperature.C~temperature^4)", "Cor (temperature.C~temperature^5)", "Cor (temperature^4~temperature^5)", "SD (Observations)" ) ) expect_identical( mp3$Group, c( "", "", "", "", "", "", "recipe", "recipe", "recipe", "recipe", "recipe", "recipe", "recipe", "recipe", "recipe", "recipe", "recipe", "recipe", "recipe", "recipe", "recipe", "recipe", "recipe", "recipe", "recipe", "recipe", "recipe", "Residual" ) ) }) test_that("random effects CIs, categorical slope-2", { ## FIXME: Results differ across R versions, no idea why... # expect_equal( # mp4$CI_low, # c( # 29.01106, 5.01248, -1.89447, -1.96271, -2.66795, -2.50896, # 4.23401, 0.62943, 0.36949, 0.13979, 0.01129, 0.6074, 0.50155, # -0.30497, -0.94063, -0.13156, -0.32484, NA, NA, NA, NA, NA, NA, # NA, NA, NA, 0.42465, 4.2358 # ) # , # tolerance = 1e-2, # ignore_attr = TRUE # ) expect_identical( mp4$Parameter, c( "(Intercept)", "temperature.L", "temperature.Q", "temperature.C", "temperature^4", "temperature^5", "SD (Intercept)", "SD (temperature.L)", "SD (temperature.Q)", "SD (temperature.C)", "SD (temperature^4)", "SD (temperature^5)", "Cor (Intercept~temperature.L)", "Cor (Intercept~temperature.Q)", "Cor (Intercept~temperature.C)", "Cor (Intercept~temperature^4)", "Cor (Intercept~temperature^5)", "Cor (temperature.L~temperature.Q)", "Cor (temperature.L~temperature.C)", "Cor (temperature.L~temperature^4)", "Cor (temperature.L~temperature^5)", "Cor (temperature.Q~temperature.C)", "Cor (temperature.Q~temperature^4)", "Cor (temperature.Q~temperature^5)", "Cor (temperature.C~temperature^4)", "Cor (temperature.C~temperature^5)", "Cor (temperature^4~temperature^5)", "SD (Observations)" ) ) expect_identical( mp4$Group, c( "", "", "", "", "", "", "replicate", "replicate", "replicate", "replicate", "replicate", "replicate", "replicate", "replicate", "replicate", "replicate", "replicate", "replicate", "replicate", "replicate", "replicate", "replicate", "replicate", "replicate", "replicate", "replicate", "replicate", "Residual" ) ) }) test_that("random effects CIs, double slope", { expect_equal( mp5$CI_low, c(238.40606, 7.52296, 15.0171, 3.80547, 0, -0.48781, NaN, NaN, 22.80045), tolerance = 1e-2, ignore_attr = TRUE ) expect_identical( mp5$Parameter, c( "(Intercept)", "Days", "SD (Intercept)", "SD (Days)", "SD (Months)", "Cor (Intercept~Days)", "Cor (Intercept~Months)", "Cor (Days~Months)", "SD (Observations)" ) ) }) test_that("random effects CIs, simple slope", { data(sleepstudy, package = "lme4") set.seed(123) sleepstudy$Months <- sample.int(4, nrow(sleepstudy), TRUE) set.seed(123) m2 <- glmmTMB::glmmTMB(Reaction ~ Days + (0 + Days | Subject), data = sleepstudy) m5 <- suppressWarnings(glmmTMB::glmmTMB( Reaction ~ Days + (0 + Days + Months | Subject), data = sleepstudy )) set.seed(123) mp2 <- model_parameters(m2, ci_random = TRUE) expect_message( { mp5 <- model_parameters(m5, ci_random = TRUE) }, "singularity" ) # no SE/CI expect_equal( mp2$CI_low, c(243.55046, 6.89554, 4.98429, 25.94359), tolerance = 1e-2, ignore_attr = TRUE ) expect_identical( mp2$Parameter, c("(Intercept)", "Days", "SD (Days)", "SD (Observations)") ) ## FIXME: Results differ across R versions, no idea why... # expect_equal( # mp5$CI_low, # c(237.03695, 9.04139, NaN, 8.95755, NaN, 30.67054), # tolerance = 1e-2, # ignore_attr = TRUE # ) expect_identical( mp5$Parameter, c( "(Intercept)", "Days", "SD (Days)", "SD (Months)", "Cor (Days~Months)", "SD (Observations)" ) ) }) # messages for profiled CI test_that("profiled CI messages", { mp2 <- model_parameters(m2, ci_method = "profile") expect_message( utils::capture.output(print(mp2)), regexp = "(.*)profile-likelihood(.*)z-distribution(.*)" ) }) parameters/tests/testthat/test-factor_analysis.R0000644000176200001440000000661415030725674021720 0ustar liggesuserstest_that("factor_analysis", { skip_on_cran() skip_if_not_installed("GPArotation") skip_if_not_installed("psych") skip_if_not_installed("discovr") skip_if_not_installed("knitr") set.seed(333) raq_items <- as.data.frame(discovr::raq) raq_items$id <- NULL out <- factor_analysis( raq_items, n = 4, scores = "tenBerge", cor = "poly", rotation = "oblimin", threshold = 0.4, standardize = FALSE ) raq_fa <- psych::fa(r = raq_items, nfactors = 4, scores = "tenBerge", cor = "poly") expect_equal( out$MR1, raq_fa$loadings[, "MR1"], tolerance = 1e-3, ignore_attr = TRUE ) s <- summary(out) expect_equal( as.matrix(as.data.frame(s)[2, -1]), raq_fa$Vaccounted[2, ], tolerance = 1e-3, ignore_attr = TRUE ) # include factor correlations out <- factor_analysis( mtcars[, 1:7], n = 2, rotation = "oblimin", threshold = "max", standardize = TRUE, sort = TRUE ) expect_snapshot(print(summary(out))) expect_snapshot(print_md(summary(out))) # check factor scores fc <- factor_scores(out) expect_identical(dim(fc), c(32L, 2L)) # works with correlation matrix skip_if_not_installed("correlation") skip_on_cran() # takes too long on CRAN raq_poly <- correlation::correlation(raq_items, method = "polychoric") raq_poly_mtx <- as.matrix(raq_poly) # store correlation matrix # needs n_obs expect_error( factor_analysis(raq_poly_mtx, n = 4), regex = "You provided a square matrix" ) out1 <- factor_analysis(raq_poly_mtx, n = 4, n_obs = 2571) expect_identical(dim(out1), c(23L, 7L)) expect_named( out1, c("Variable", "MR1", "MR2", "MR4", "MR3", "Complexity", "Uniqueness") ) out2 <- factor_analysis(as.matrix(raq_items), n = 4) expect_identical(dim(out2), c(23L, 7L)) expect_named( out2, c("Variable", "MR1", "MR2", "MR4", "MR3", "Complexity", "Uniqueness") ) # roughly equal results expect_equal(out1$MR1, out2$MR1, tolerance = 1e-1) # text matrix n_obs williams <- as.data.frame(discovr::williams) williams$ID <- NULL n <- 28 r <- correlation::correlation(williams[1:n]) # create r-matrix r_mat <- matrix(0, nrow = n, ncol = n) diag(r_mat) <- 1 r_mat[lower.tri(r_mat)] <- r$r r_mat[upper.tri(r_mat)] <- r$r # create n-matrix n_mat <- matrix(0, nrow = n, ncol = n) diag(n_mat) <- 1 n_mat[lower.tri(n_mat)] <- r$n_Obs n_mat[upper.tri(n_mat)] <- r$n_Obs out <- suppressWarnings(factor_analysis(r_mat, n_obs = n_mat, n = 2)) expect_identical(dim(out), c(28L, 5L)) expect_named( out, c("Variable", "MR1", "MR2", "Complexity", "Uniqueness") ) n_mat <- matrix(0, nrow = n - 2, ncol = n - 2) diag(n_mat) <- 1 n_mat[lower.tri(n_mat)] <- r$n_Obs[1:325] n_mat[upper.tri(n_mat)] <- r$n_Obs[1:325] # matrix dimensions do not match expect_error( suppressWarnings(factor_analysis(r_mat, n_obs = n_mat, n = 2)), regex = "The provided" ) }) test_that("omega", { skip_on_cran() skip_if_not_installed("GPArotation") skip_if_not_installed("psych") model <- psych::omega(mtcars, nfactors = 3, plot = FALSE) out <- model_parameters(model) expect_snapshot(print(out)) expect_snapshot(print_md(out)) expect_snapshot(print(summary(out))) expect_snapshot(print_md(summary(out))) }) parameters/tests/testthat/test-n_factors.R0000644000176200001440000000502314716604201020476 0ustar liggesuserstest_that("n_factors, default", { skip_if_not_installed("nFactors") skip_if_not_installed("psych") set.seed(333) x <- n_factors(mtcars[, 1:4]) expect_identical(ncol(x), 3L) }) test_that("n_factors, EGAnet", { skip_on_cran() skip_if_not_installed("EGAnet") set.seed(333) x <- n_factors(mtcars, package = "EGAnet") expect_identical(ncol(x), 3L) expect_identical( print(capture.output(x)), c( "# Method Agreement Procedure:", "", "The choice of 3 dimensions is supported by 2 (100.00%) methods out of 2 (EGA (glasso), EGA (TMFG))." ) ) }) test_that("n_factors, EGAnet does not fail", { skip_on_cran() skip_if_not_installed("EGAnet") set.seed(333) x <- n_factors(mtcars[, 1:4], package = "EGAnet") expect_identical(ncol(x), 3L) expect_identical(nrow(x), 1L) expect_identical( print(capture.output(x)), c( "# Method Agreement Procedure:", "", "The choice of 1 dimensions is supported by 1 (100.00%) methods out of 1 (EGA (glasso))." ) ) }) test_that("n_factors, oblimin rotation", { skip_if_not_installed("nFactors") skip_if_not_installed("psych") skip_if_not_installed("GPArotation") set.seed(333) x <- n_factors(mtcars[, 1:4], type = "PCA", rotation = "oblimin") expect_identical(ncol(x), 3L) expect_identical( print(capture.output(x)), c( "# Method Agreement Procedure:", "", "The choice of 1 dimensions is supported by 11 (84.62%) methods out of 13 (Bartlett, Anderson, Lawley, Optimal coordinates, Acceleration factor, Parallel analysis, Kaiser criterion, Scree (SE), Scree (R2), VSS complexity 1, Velicer's MAP)." # nolint ) ) }) test_that("n_factors, no rotation, psych only", { skip_if_not_installed("nFactors") skip_if_not_installed("psych") set.seed(333) x <- n_factors(mtcars[, 1:4], rotation = "none", package = "psych") expect_identical(ncol(x), 3L) expect_identical( print(capture.output(x)), c( "# Method Agreement Procedure:", "", "The choice of 1 dimensions is supported by 3 (60.00%) methods out of 5 (Velicer's MAP, BIC, BIC (adjusted))." ) ) }) test_that("n_factors, variance explained", { skip_on_cran() skip_if_not_installed("nFactors") skip_if_not_installed("psych") set.seed(333) x <- n_factors(mtcars[, 1:4], type = "PCA") expect_equal( attributes(x)$Variance_Explained$Variance_Cumulative, c(0.84126, 0.85088, 0.85859, 0.85859), tolerance = 1e-4 ) }) parameters/tests/testthat/test-standardize_parameters.R0000644000176200001440000004105415111054715023256 0ustar liggesusersdata("iris") dat <<- iris # simple ------------------------------------------------------------------ test_that("standardize_parameters (simple)", { r <- as.numeric(cor.test(dat$Sepal.Length, dat$Petal.Length)$estimate) model <- lm(Sepal.Length ~ Petal.Length, data = dat) es <- standardize_parameters(model) expect_equal(es[2, 2], r, tolerance = 0.01) expect_error(standardize_parameters(model, robust = TRUE), NA) }) # Robust ------------------------------------------------------------------ test_that("Robust post hoc", { model <- lm(mpg ~ hp, weights = gear, data = mtcars) expect_error(standardize_parameters(model, method = "basic", robust = TRUE), NA) expect_error(standardize_parameters(model, method = "basic", robust = TRUE, two_sd = TRUE), NA) model <- lm(mpg ~ hp, data = mtcars) expect_error(standardize_parameters(model, method = "basic", robust = TRUE), NA) expect_error(standardize_parameters(model, method = "basic", robust = TRUE, two_sd = TRUE), NA) }) # Labels ------------------------------------------------------------------ test_that("Preserve labels", { fit <- lm(Sepal.Length ~ Species, data = iris) out <- standardize_parameters(fit) expect_snapshot(print(out)) }) # model_parameters ------------------------------- test_that("standardize_parameters (model_parameters)", { skip_on_cran() model <<- lm(mpg ~ cyl + am, data = mtcars) mp <<- model_parameters(model, effects = "fixed") s1 <- standardize_parameters(model, method = "basic") s2 <- standardize_parameters(mp, method = "basic") expect_equal(s1$Parameter, s2$Parameter, tolerance = 1e-4) expect_equal(s1$Std_Coefficient, s2$Std_Coefficient, tolerance = 1e-4) expect_equal(s1$CI_low, s2$CI_low, tolerance = 1e-4) expect_equal(s1$CI_high, s2$CI_high, tolerance = 1e-4) mp_exp <<- model_parameters(model, exponentiate = TRUE, effects = "fixed") se1 <- standardize_parameters(model, method = "basic", exponentiate = TRUE) se2 <- standardize_parameters(mp_exp, method = "basic", exponentiate = TRUE) expect_equal(se1$Parameter, se2$Parameter, tolerance = 1e-4) expect_equal(se1$Std_Coefficient, se2$Std_Coefficient, tolerance = 1e-4) expect_equal(se1$CI_low, se2$CI_low, tolerance = 1e-4) expect_equal(se1$CI_high, se2$CI_high, tolerance = 1e-4) }) # bootstrap_model --------------------------------------------------------- test_that("standardize_parameters (bootstrap_model)", { skip_on_cran() skip_if_not_installed("boot") m <- lm(mpg ~ factor(cyl) + hp, mtcars) set.seed(1) bm_draws <- bootstrap_model(m, iterations = 599) set.seed(1) bm_tab <- bootstrap_parameters(m, iterations = 599) out_true <- standardize_parameters(m, method = "basic") out_boot1 <- standardize_parameters(bm_draws, method = "basic") out_boot2 <- standardize_parameters(bm_tab, method = "basic") expect_equal(out_boot1$Std_Coefficient, out_true$Std_Coefficient, tolerance = 0.05 ) expect_equal(out_boot1, out_boot2, ignore_attr = TRUE) expect_error(standardize_parameters(bm_draws, method = "refit")) expect_error(standardize_parameters(bm_tab, method = "refit")) }) # lm with ci ----------------------------------- test_that("standardize_parameters (lm with ci)", { data("iris") model <- lm(Sepal.Length ~ Species + Petal.Width, data = iris) expect_equal( standardize_parameters(model, method = "refit")$Std_Coefficient, c(0.044, -0.072, -0.060, 0.844), tolerance = 0.01 ) expect_equal( standardize_parameters(model, method = "posthoc")$Std_Coefficient, c(0, -0.072, -0.060, 0.844), tolerance = 0.01 ) expect_equal( standardize_parameters(model, method = "smart")$Std_Coefficient, c(0, -0.170, -0.142, 0.844), tolerance = 0.01 ) z_basic <- standardize_parameters(model, method = "basic") expect_equal( z_basic$Std_Coefficient, c(0, -0.034, -0.028, 0.844), tolerance = 0.01 ) ## CI expect_equal( z_basic$CI_low, c(0, -0.294, -0.433, 0.491), tolerance = 0.01 ) expect_equal( z_basic$CI_high, c(0, 0.225, 0.375, 1.196), tolerance = 0.01 ) z_basic.0.80 <- standardize_parameters(model, ci = 0.8, method = "basic") expect_equal( z_basic.0.80$CI_low, c(0, -0.203, -0.292, 0.614), tolerance = 0.01 ) expect_equal( z_basic.0.80$CI_high, c(0, 0.135, 0.234, 1.073), tolerance = 0.01 ) data("mtcars") m0 <- lm(mpg ~ cyl + factor(am), mtcars) expect_equal( standardize_parameters(m0, method = "refit")[[2]][-1], standardize_parameters(m0, method = "smart")[[2]][-1], tolerance = 0.01 ) expect_equal( standardize_parameters(m0, method = "refit", two_sd = TRUE)[[2]][-1], standardize_parameters(m0, method = "smart", two_sd = TRUE)[[2]][-1], tolerance = 0.01 ) }) # aov --------------------------------------------------------------------- test_that("standardize_parameters (aov)", { dat2 <- iris dat2$Cat1 <- rep_len(c("A", "B"), nrow(dat2)) dat3 <<- dat2 m_aov <- aov(Sepal.Length ~ Species * Cat1, data = dat3) m_lm <- lm(Sepal.Length ~ Species * Cat1, data = dat3) expect_equal(standardize_parameters(m_aov), standardize_parameters(m_lm), ignore_attr = TRUE ) }) # with function interactions" ------------------- test_that("standardize_parameters (with functions / interactions)", { skip_on_cran() X <- scale(rnorm(100), TRUE, FALSE) Z <- scale(rnorm(100), TRUE, FALSE) Y <- scale(Z + X * Z + rnorm(100), TRUE, FALSE) m1 <- lm(Y ~ X * Z) m2 <- lm(Y ~ X * scale(Z)) m3 <- lm(Y ~ scale(X) * Z) m4 <- lm(scale(Y) ~ scale(X) + scale(Z) + scale(scale(X) * scale(Z))) # ground truth expect_equal( standardize_parameters(m1, method = "basic")$Std_Coefficient, model_parameters(m4)$Coefficient, ignore_attr = TRUE ) expect_equal( standardize_parameters(m2, method = "basic")$Std_Coefficient, model_parameters(m4)$Coefficient, ignore_attr = TRUE ) expect_equal( standardize_parameters(m3, method = "basic")$Std_Coefficient, model_parameters(m4)$Coefficient ) # transformed resp or pred should not affect mtcars$cyl_exp <- exp(mtcars$cyl) mtcars$mpg_sqrt <- sqrt(mtcars$mpg) m1 <- lm(exp(cyl) ~ am + sqrt(mpg), mtcars) m2 <- lm(cyl_exp ~ am + mpg_sqrt, mtcars) expect_message({ stdX <- standardize_parameters(m1, method = "refit") }) expect_false(isTRUE(all.equal( stdX[[2]], standardize_parameters(m2, method = "refit")[[2]] ))) expect_equal( standardize_parameters(m1, method = "basic")[[2]], standardize_parameters(m2, method = "basic")[[2]], ignore_attr = TRUE ) # posthoc / smart don't support data transformation expect_message(standardize_parameters(m1, method = "smart")) expect_message(standardize_parameters(m1, method = "posthoc")) }) # exponentiate ------------------------------------------------------------ test_that("standardize_parameters (exponentiate)", { mod_b <- glm(am ~ mpg + cyl + hp, data = mtcars, family = poisson() ) mod_refit <- standardize_parameters(mod_b, method = "refit", exponentiate = TRUE) expect_equal( mod_refit[[2]][-1], standardize_parameters(mod_b, method = "basic", exponentiate = TRUE)[[2]][-1], ignore_attr = TRUE ) expect_equal( mod_refit[[2]][-1], standardize_parameters(mod_b, method = "posthoc", exponentiate = TRUE)[[2]][-1], ignore_attr = TRUE ) expect_equal( mod_refit[[2]][-1], exp(standardize_parameters(mod_b, method = "basic")[[2]])[-1], ignore_attr = TRUE ) mod_b <- glm(am ~ mpg + cyl, data = mtcars, family = binomial() ) mod_refit <- standardize_parameters(mod_b, method = "refit", exponentiate = TRUE) expect_equal( mod_refit[[2]][-1], standardize_parameters(mod_b, method = "basic", exponentiate = TRUE)[[2]][-1], ignore_attr = TRUE ) expect_equal( mod_refit[[2]][-1], standardize_parameters(mod_b, method = "posthoc", exponentiate = TRUE)[[2]][-1], ignore_attr = TRUE ) expect_equal( mod_refit[[2]][-1], exp(standardize_parameters(mod_b, method = "basic")[[2]])[-1], tolerance = 1e-5 ) mod_b <- glm(am ~ mpg + cyl + hp, data = mtcars, family = stats::gaussian() ) mod_refit <- standardize_parameters(mod_b, method = "refit", exponentiate = TRUE) expect_equal( mod_refit[[2]][-1], standardize_parameters(mod_b, method = "basic", exponentiate = TRUE)[[2]][-1], tolerance = 1e-5 ) expect_equal( mod_refit[[2]][-1], standardize_parameters(mod_b, method = "posthoc", exponentiate = TRUE)[[2]][-1], tolerance = 1e-5 ) expect_equal( mod_refit[[2]][-1], exp(standardize_parameters(mod_b, method = "basic")[[2]])[-1], tolerance = 1e-5 ) }) # Bayes ---------------------------------------- test_that("standardize_parameters (Bayes)", { skip_on_cran() skip_if_not_installed("rstanarm") set.seed(1234) suppressWarnings({ model <- rstanarm::stan_glm(Sepal.Length ~ Species + Petal.Width, data = iris, iter = 500, refresh = 0 ) }) set.seed(1234) expect_equal( suppressWarnings(standardize_parameters(model, method = "refit")$Std_Median[1:4]), c(0.03787, -0.06088, -0.04807, 0.84067), tolerance = 0.1 ) set.seed(1234) expect_equal( suppressWarnings(standardize_parameters(model, method = "posthoc")$Std_Median[1:4]), c(0, -0.0586, -0.05258, 0.83883), tolerance = 0.01 ) posts <- standardize_posteriors(model, method = "posthoc") expect_identical(dim(posts), c(1000L, 4L)) expect_s3_class(posts, "data.frame") }) # Pseudo - GLMM -------------------------------- test_that("standardize_parameters (Pseudo - GLMM)", { skip_if_not_installed("datawizard") skip_if_not_installed("performance", minimum_version = "0.14.0") skip_on_cran() skip_if_not_installed("lme4") set.seed(1) dat <- data.frame( X = rnorm(1000), Z = rnorm(1000), C = sample(letters[1:3], size = 1000, replace = TRUE), ID = sort(rep_len(letters, 1000)) ) dat <- transform(dat, Y = X + Z + rnorm(1000)) dat <- cbind(dat, datawizard::demean(dat, c("X", "Z"), "ID")) m <- lme4::lmer(Y ~ scale(X_within) * X_between + C + (scale(X_within) | ID), data = dat ) ## No robust methods... (yet) expect_message(standardize_parameters(m, method = "pseudo", robust = TRUE, verbose = FALSE), regexp = "robust") ## Correctly identify within and between terms dev_resp <- standardize_info(m, include_pseudo = TRUE)$Deviation_Response_Pseudo expect_identical(insight::n_unique(dev_resp[c(2, 4, 5, 6)]), 1L) expect_true(dev_resp[2] != dev_resp[3]) ## Calc b <- lme4::fixef(m)[-1] mm <- model.matrix(m)[, -1] SD_x <- numeric(ncol(mm)) SD_x[c(1, 3, 4, 5)] <- apply(mm[, c(1, 3, 4, 5)], 2, sd) SD_x[2] <- sd(tapply(mm[, 2], dat$ID, mean)) m0 <- lme4::lmer(Y ~ 1 + (1 | ID), data = dat) m0v <- insight::get_variance(m0) SD_y <- sqrt(c(m0v$var.residual, m0v$var.intercept)) SD_y <- SD_y[c(1, 2, 1, 1, 1)] expect_equal( data.frame(Deviation_Response_Pseudo = c(SD_y[2], SD_y), Deviation_Pseudo = c(0, SD_x)), standardize_info(m, include_pseudo = TRUE)[, c("Deviation_Response_Pseudo", "Deviation_Pseudo")], tolerance = 1e-5 ) expect_equal( standardize_parameters(m, method = "pseudo")$Std_Coefficient[-1], unname(b * SD_x / SD_y), tolerance = 1e-5 ) ## scaling should not affect m1 <- lme4::lmer(Y ~ X_within + X_between + C + (X_within | ID), data = dat ) m2 <- lme4::lmer(scale(Y) ~ X_within + X_between + C + (X_within | ID), data = dat ) m3 <- lme4::lmer(Y ~ scale(X_within) + X_between + C + (scale(X_within) | ID), data = dat ) m4 <- lme4::lmer(Y ~ X_within + scale(X_between) + C + (X_within | ID), data = dat ) std1 <- standardize_parameters(m1, method = "pseudo") expect_equal(std1$Std_Coefficient, standardize_parameters(m2, method = "pseudo")$Std_Coefficient, tolerance = 0.001 ) expect_equal(std1$Std_Coefficient, standardize_parameters(m3, method = "pseudo")$Std_Coefficient, tolerance = 0.001 ) expect_equal(std1$Std_Coefficient, standardize_parameters(m4, method = "pseudo")$Std_Coefficient, tolerance = 0.001 ) ## Give warning for within that is also between mW <- lme4::lmer(Y ~ X_between + Z_within + C + (1 | ID), dat) mM <- lme4::lmer(Y ~ X + Z + C + (1 | ID), dat) expect_warning(standardize_parameters(mW, method = "pseudo"), regexp = NA) expect_message(standardize_parameters(mM, method = "pseudo"), regexp = "within-group") }) # ZI models --------------------------------------------------------------- test_that("standardize_parameters (pscl)", { skip_on_cran() skip_if_not_installed("pscl") data("bioChemists", package = "pscl") m <- pscl::zeroinfl(art ~ fem + mar + kid5 + ment | kid5 + phd, data = bioChemists) mp <- model_parameters(m, effects = "fixed") sm1 <- standardize_parameters(m, method = "refit") expect_message({ sm2 <- standardize_parameters(m, method = "posthoc") }) suppressMessages({ sm3 <- standardize_parameters(m, method = "basic") sm4 <- standardize_parameters(m, method = "smart") }) # post hoc does it right (bar intercept) expect_equal(sm1$Std_Coefficient[-c(1, 6)], sm2$Std_Coefficient[-c(1, 6)], tolerance = 0.01 ) # basic / smart miss the ZI expect_equal(mp$Coefficient[6:8], sm3$Std_Coefficient[6:8], tolerance = 0.01 ) expect_equal(mp$Coefficient[7:8], sm4$Std_Coefficient[7:8], tolerance = 0.1 ) # get count numerics al right expect_equal(sm1$Std_Coefficient[4:5], sm3$Std_Coefficient[4:5], tolerance = 0.01 ) expect_equal(sm1$Std_Coefficient[4:5], sm4$Std_Coefficient[4:5], tolerance = 0.01 ) }) test_that("include_response | (g)lm", { # lm --- data(iris) iris$Sepal.Length <- iris$Sepal.Length * 5 m <- lm(Sepal.Length ~ Petal.Length + Petal.Width, data = iris) m_z <- datawizard::standardize(m, include_response = FALSE) par_z0 <- standardize_parameters(m, method = "basic") par_z1 <- standardize_parameters(m, include_response = FALSE) par_z2 <- standardize_parameters(m, method = "basic", include_response = FALSE) expect_equal(coef(m_z), par_z1$Std_Coefficient, ignore_attr = TRUE) expect_equal(par_z1$Std_Coefficient[-1], par_z2$Std_Coefficient[-1], tolerance = 1e-5) expect_equal(par_z0$Std_Coefficient * sd(iris$Sepal.Length), par_z2$Std_Coefficient, tolerance = 1e-5) # glm --- m <- glm(am ~ mpg, mtcars, family = binomial()) expect_equal( standardize_parameters(m), standardize_parameters(m, include_response = FALSE), ignore_attr = TRUE ) }) test_that("include_response | parameters", { data(iris) iris$Sepal.Length <- iris$Sepal.Length * 5 m <<- lm(Sepal.Length ~ Petal.Length + Petal.Width, data = iris) # parameters --- pars <- model_parameters(m, effects = "fixed") pars_z0 <- standardize_parameters(pars, method = "basic") pars_z1 <- standardize_parameters(pars, method = "basic", include_response = FALSE) expect_equal(pars_z0$Std_Coefficient[-1] * sd(iris$Sepal.Length), pars_z1$Std_Coefficient[-1], tolerance = 1e-5) # boot --- skip_if_not_installed("boot") pars <- bootstrap_parameters(m) pars_z0 <- standardize_parameters(pars, method = "basic") pars_z1 <- standardize_parameters(pars, method = "basic", include_response = FALSE) expect_equal(pars_z0$Std_Coefficient[-1] * sd(iris$Sepal.Length), pars_z1$Std_Coefficient[-1], tolerance = 1e-5) }) test_that("include_response | bayes", { skip_if_not_installed("rstanarm") skip_on_cran() data(iris) iris$Sepal.Length <- iris$Sepal.Length * 5 m <- rstanarm::stan_glm(Sepal.Length ~ Petal.Length + Petal.Width, data = iris, refresh = 0) expect_warning({ m_z <- datawizard::standardize(m, include_response = FALSE) }) expect_warning({ par_z1 <- standardize_posteriors(m, include_response = FALSE) }) par_z0 <- standardize_posteriors(m, method = "basic") par_z2 <- standardize_posteriors(m, method = "basic", include_response = FALSE) expect_equal(sapply(insight::get_parameters(m_z), mean), sapply(par_z1, mean), tolerance = 0.1) expect_equal(sapply(par_z1, mean)[-1], sapply(par_z2, mean)[-1], tolerance = 0.1) expect_equal(sapply(par_z0, mean) * sd(iris$Sepal.Length), sapply(par_z2, mean), tolerance = 0.1) }) parameters/tests/testthat/test-svyolr.R0000644000176200001440000000177315111301621020054 0ustar liggesusersskip_on_cran() skip_if_not_installed("MASS") skip_if_not_installed("survey") test_that("robust-se polr", { data(api, package = "survey") dclus1 <- survey::svydesign(id = ~dnum, weights = ~pw, data = apiclus1, fpc = ~fpc) dclus1 <- update(dclus1, mealcat = cut(meals, c(0, 25, 50, 75, 100))) m <- survey::svyolr(mealcat ~ avg.ed + mobility + stype, design = dclus1) out <- model_parameters(m) expect_identical(attributes(out)$coefficient_name, "Log-Odds") expect_identical( out$Component, c("alpha", "alpha", "alpha", "beta", "beta", "beta", "beta") ) expect_identical( out$Parameter, c( "(0,25]|(25,50]", "(25,50]|(50,75]", "(50,75]|(75,100]", "avg.ed", "mobility", "stypeH", "stypeM" ) ) expect_named( out, c( "Parameter", "Coefficient", "SE", "CI", "CI_low", "CI_high", "t", "df_error", "p", "Component" ) ) }) parameters/tests/testthat/test-survey.R0000644000176200001440000000214114716604201020053 0ustar liggesusersskip_if_not_installed("withr") skip_if_not_installed("survey") withr::with_environment( new.env(), test_that("model_parameters svytable", { # svychisq is called in model_parameters svychisq <<- survey::svychisq data(api, package = "survey") dclus1 <<- survey::svydesign(id = ~dnum, weights = ~pw, data = apiclus1, fpc = ~fpc) m <- survey::svytable(~ sch.wide + stype, dclus1) mp <- model_parameters(m) expect_named(mp, c("F", "df", "df_error", "p", "Method")) expect_equal(mp$p, 0.02174746, tolerance = 1e-3) }) ) withr::with_environment( new.env(), test_that("model_parameters, bootstrap svyglm", { data(api, package = "survey") dstrat <- survey::svydesign( id = ~1, strata = ~stype, weights = ~pw, data = apistrat, fpc = ~fpc ) model_svyglm <- suppressWarnings(survey::svyglm(sch.wide ~ ell + meals + mobility, design = dstrat, family = binomial(link = "logit") )) expect_message(parameters(model_svyglm, bootstrap = TRUE), regex = "arguments are not supported") }) ) parameters/tests/testthat/test-model_parameters.MASS.R0000644000176200001440000000115614413515226022612 0ustar liggesuserstest_that("model_parameters.rlm", { skip_if_not_installed("MASS") model <- MASS::rlm(formula = mpg ~ am * cyl, data = mtcars) s <- summary(model) params <- model_parameters(model) expect_equal(params$SE, as.vector(coef(s)[, 2]), tolerance = 1e-3) expect_equal(params$Coefficient, as.vector(coef(s)[, 1]), tolerance = 1e-3) expect_equal(params$t, as.vector(coef(s)[, 3]), tolerance = 1e-3) expect_equal(params$df_error, c(28, 28, 28, 28), tolerance = 1e-3) expect_equal( colnames(params), c("Parameter", "Coefficient", "SE", "CI", "CI_low", "CI_high", "t", "df_error", "p") ) }) parameters/tests/testthat/test-parameters_type.R0000644000176200001440000000126714355245205021736 0ustar liggesuserstest_that("parameters_type-1", { m0 <- lm(mpg ~ am * cyl, mtcars) m1 <- lm(mpg ~ am * scale(cyl), mtcars) m2 <- lm(mpg ~ scale(am) * cyl, mtcars) m3 <- lm(mpg ~ scale(am) * scale(cyl), mtcars) expect_equal(parameters_type(m0)[4, "Type"], "interaction") expect_equal(parameters_type(m1)[4, "Type"], "interaction") expect_equal(parameters_type(m2)[4, "Type"], "interaction") expect_equal(parameters_type(m3)[4, "Type"], "interaction") }) test_that("parameters_type-2", { model <- lm(Sepal.Length ~ Petal.Width * scale(Petal.Length, TRUE, FALSE), data = iris) expect_equal(parameters_type(model)$Type, c("intercept", "numeric", "numeric", "interaction")) }) parameters/tests/testthat/test-model_parameters.cpglmm.R0000644000176200001440000000111314413515226023317 0ustar liggesusersunloadNamespace("BayesFactor") test_that("model_parameters.cpglmm", { skip_if_not_installed("cplm") loadNamespace("cplm") data("FineRoot", package = "cplm") cpglmm <- cplm::cpglmm model <- cpglmm(RLD ~ Stock + Spacing + (1 | Plant), data = FineRoot) params <- model_parameters(model, effects = "fixed") expect_equal(params$SE, c(0.1308, 0.2514, 0.2, 0.1921), tolerance = 1e-3) expect_equal( colnames(params), c("Parameter", "Coefficient", "SE", "CI", "CI_low", "CI_high", "t", "df_error", "p", "Effects") ) unloadNamespace("cplm") }) parameters/tests/testthat/test-format_model_parameters2.R0000644000176200001440000002246014413515226023503 0ustar liggesuserstest_that("format_model_parameters", { skip_on_cran() skip_if_not_installed("lme4") withr::with_options( list(parameters_interaction = "*"), { d <- structure(list( Drought = structure(c( 1L, 1L, 1L, 1L, 1L, 1L, 2L, 2L, 2L, 2L, 2L, 2L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 2L, 2L, 2L, 2L, 2L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 2L, 2L, 2L, 2L, 2L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L ), levels = c("no", "yes"), class = "factor"), Tree.ID = structure(c( 1L, 1L, 2L, 2L, 3L, 3L, 4L, 4L, 5L, 5L, 6L, 6L, 7L, 7L, 8L, 8L, 9L, 9L, 10L, 10L, 11L, 11L, 12L, 12L, 13L, 13L, 14L, 14L, 15L, 15L, 16L, 16L, 17L, 17L, 18L, 18L, 19L, 19L, 20L, 20L, 21L, 21L, 22L, 22L, 23L, 23L, 24L, 24L, 25L, 25L, 26L, 26L, 27L, 27L, 28L, 28L, 29L, 29L, 30L, 30L, 31L, 31L, 32L, 32L, 33L, 33L, 34L, 34L, 35L, 35L, 36L, 36L, 37L, 37L, 38L, 38L, 39L, 39L, 40L, 40L, 41L, 41L, 42L, 42L, 43L, 43L, 44L, 44L, 45L, 45L, 46L, 46L, 47L, 47L, 48L, 48L, 1L, 1L, 2L, 2L, 3L, 3L, 10L, 10L, 11L, 11L, 12L, 12L, 13L, 13L, 14L, 14L, 15L, 15L, 16L, 16L, 17L, 17L, 18L, 18L, 19L, 19L, 21L, 21L, 22L, 22L, 23L, 23L, 24L, 24L, 25L, 25L, 26L, 26L, 27L, 27L, 28L, 28L, 29L, 29L, 30L, 30L, 31L, 31L, 32L, 32L, 33L, 33L, 37L, 37L, 38L, 38L, 39L, 39L, 43L, 43L, 44L, 44L, 45L, 45L, 46L, 46L, 47L, 47L, 48L, 48L ), levels = c( "102_6", "102_7", "102_8", "105_1", "105_2", "105_4", "111_7", "111_8", "111_9", "113_2", "113_4", "113_5", "114_7", "114_8", "114_9", "116_6", "116_7", "116_9", "122_3", "122_4", "122_5", "132_3", "132_4", "132_5", "242_2", "242_4", "242_5", "243_1", "243_2", "243_4", "245_1", "245_2", "245_5", "246_1", "246_2", "246_3", "251_10", "251_8", "251_9", "253_7", "253_8", "253_9", "254_6", "254_7", "254_8", "267_10", "267_6", "267_8" ), class = "factor"), Stratum = structure(c( 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L, 1L, 2L ), levels = c("lower", "upper"), class = "factor"), Year = structure(c( 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L ), levels = c("1", "2"), class = "factor"), Treatment = c( "Control", "Control", "Control", "Control", "Control", "Control", "Drought", "Drought", "Drought", "Drought", "Drought", "Drought", "Control", "Control", "Control", "Control", "Control", "Control", "Drought", "Drought", "Drought", "Drought", "Drought", "Drought", "Control", "Control", "Control", "Control", "Control", "Control", "Control", "Control", "Control", "Control", "Control", "Control", "Drought", "Drought", "Drought", "Drought", "Drought", "Drought", "Drought", "Drought", "Drought", "Drought", "Drought", "Drought", "Drought", "Drought", "Drought", "Drought", "Drought", "Drought", "Drought", "Drought", "Drought", "Drought", "Drought", "Drought", "Drought", "Drought", "Drought", "Drought", "Drought", "Drought", "Drought", "Drought", "Drought", "Drought", "Drought", "Drought", "Control", "Control", "Control", "Control", "Control", "Control", "Control", "Control", "Control", "Control", "Control", "Control", "Control", "Control", "Control", "Control", "Control", "Control", "Control", "Control", "Control", "Control", "Control", "Control", "Control", "Control", "Control", "Control", "Control", "Control", "Drought", "Drought", "Drought", "Drought", "Drought", "Drought", "Control", "Control", "Control", "Control", "Control", "Control", "Control", "Control", "Control", "Control", "Control", "Control", "Drought", "Drought", "Drought", "Drought", "Drought", "Drought", "Drought", "Drought", "Drought", "Drought", "Drought", "Drought", "Drought", "Drought", "Drought", "Drought", "Drought", "Drought", "Drought", "Drought", "Drought", "Drought", "Drought", "Drought", "Drought", "Drought", "Drought", "Drought", "Control", "Control", "Control", "Control", "Control", "Control", "Control", "Control", "Control", "Control", "Control", "Control", "Control", "Control", "Control", "Control", "Control", "Control" ), branch_miner_No = c( 41L, 25L, 47L, 49L, 50L, 49L, 49L, 50L, 49L, 50L, 48L, 47L, 49L, 49L, 50L, 49L, 49L, 43L, 41L, 45L, 49L, 37L, 50L, 49L, 50L, 50L, 50L, 49L, 45L, 44L, 49L, 48L, 50L, 48L, 50L, 49L, 44L, 38L, 50L, 34L, 38L, 28L, 47L, 39L, 50L, 49L, 47L, 50L, 42L, 19L, 47L, 46L, 50L, 50L, 49L, 46L, 49L, 50L, 40L, 45L, 50L, 50L, 41L, 44L, 50L, 50L, 50L, 46L, 50L, 48L, 50L, 50L, 48L, 38L, 49L, 42L, 39L, 31L, 49L, 33L, 38L, 49L, 48L, 48L, 49L, 49L, 48L, 50L, 45L, 37L, 28L, 25L, 45L, 45L, 39L, 35L, 38L, 43L, 46L, 34L, 49L, 33L, 40L, 47L, 47L, 39L, 46L, 31L, 47L, 40L, 47L, 45L, 47L, 42L, 48L, 47L, 39L, 25L, 37L, 46L, 38L, 42L, 44L, 48L, 47L, 46L, 48L, 49L, 38L, 44L, 39L, 31L, 41L, 42L, 44L, 18L, 23L, 48L, 26L, 26L, 28L, 32L, 47L, 46L, 49L, 33L, 47L, 38L, 35L, 17L, 39L, 30L, 44L, 42L, 47L, 36L, 8L, 33L, 32L, 37L, 33L, 38L, 32L, 45L, 47L, 41L ), branch_miner_Yes = c( 9L, 25L, 3L, 1L, 0L, 1L, 1L, 0L, 1L, 0L, 2L, 3L, 1L, 1L, 0L, 1L, 1L, 7L, 9L, 5L, 1L, 13L, 0L, 1L, 0L, 0L, 0L, 1L, 5L, 6L, 1L, 2L, 0L, 2L, 0L, 1L, 6L, 12L, 0L, 16L, 12L, 22L, 3L, 11L, 0L, 1L, 3L, 0L, 8L, 31L, 3L, 4L, 0L, 0L, 1L, 4L, 1L, 0L, 10L, 5L, 0L, 0L, 9L, 6L, 0L, 0L, 0L, 4L, 0L, 2L, 0L, 0L, 2L, 12L, 1L, 8L, 11L, 19L, 1L, 17L, 12L, 1L, 2L, 2L, 1L, 1L, 2L, 0L, 5L, 13L, 22L, 25L, 5L, 5L, 11L, 15L, 12L, 7L, 4L, 16L, 1L, 17L, 10L, 3L, 3L, 11L, 4L, 19L, 3L, 10L, 3L, 5L, 3L, 8L, 2L, 3L, 11L, 25L, 13L, 4L, 12L, 8L, 6L, 2L, 3L, 4L, 2L, 1L, 12L, 6L, 11L, 19L, 9L, 8L, 6L, 32L, 27L, 2L, 24L, 24L, 22L, 18L, 3L, 4L, 1L, 17L, 3L, 12L, 15L, 33L, 11L, 20L, 6L, 8L, 3L, 14L, 42L, 17L, 18L, 13L, 17L, 12L, 18L, 5L, 3L, 9L ) ), row.names = c( NA, -166L ), class = "data.frame") d$Year <- factor(d$Year) d$Drought <- as.factor(d$Drought) d$Stratum <- as.factor(d$Stratum) levels(d$Stratum) <- list(lower = "shade", upper = "sun") d$Tree.ID <- as.factor(d$Tree.ID) mod <- lme4::glmer( cbind(branch_miner_Yes, branch_miner_No) ~ Drought * Stratum + Drought * Year + Year * Stratum + (1 | Tree.ID), data = d, family = binomial(), na.action = na.exclude ) out <- model_parameters(mod, component = "conditional") expect_identical( attributes(out)$pretty_names, c( `(Intercept)` = "(Intercept)", Droughtyes = "Drought [yes]", Stratumupper = "Stratum [upper]", Year2 = "Year [2]", `Droughtyes:Stratumupper` = "Drought [yes] * Stratum [upper]", `Droughtyes:Year2` = "Drought [yes] * Year [2]", `Stratumupper:Year2` = "Stratum [upper] * Year [2]" ) ) } ) }) parameters/tests/testthat/test-ordered.R0000644000176200001440000000120714420256646020154 0ustar liggesuserstest_that("ordered factors", { data(PlantGrowth) m_ord <- lm(weight ~ as.ordered(group), PlantGrowth) pt <- parameters_type(m_ord) mp <- model_parameters(m_ord) expect_identical(pt$Type, c("intercept", "ordered", "ordered")) expect_identical(pt$Parameter, c("(Intercept)", "as.ordered(group).L", "as.ordered(group).Q")) expect_identical(mp$Parameter, c("(Intercept)", "as.ordered(group).L", "as.ordered(group).Q")) expect_identical( attributes(mp)$pretty_names, c( `(Intercept)` = "(Intercept)", `as.ordered(group).L` = "group [linear]", `as.ordered(group).Q` = "group [quadratic]" ) ) }) parameters/tests/testthat/test-model_parameters_df_method.R0000644000176200001440000001467415111071254024064 0ustar liggesusersskip_if_not_installed("lmerTest") skip_if_not_installed("pbkrtest") skip_if_not_installed("lme4") skip_if_not_installed("glmmTMB", minimum_version = "1.1.13") skip_if_not_installed("insight", minimum_version = "1.4.3") mtcars$cyl <- as.factor(mtcars$cyl) model <- suppressMessages(lme4::lmer( mpg ~ as.factor(gear) * hp + as.factor(am) + wt + (1 | cyl), data = mtcars )) model2 <- suppressMessages(lmerTest::lmer( mpg ~ as.factor(gear) * hp + as.factor(am) + wt + (1 | cyl), data = mtcars )) model3 <- suppressMessages(glmmTMB::glmmTMB( mpg ~ as.factor(gear) * hp + as.factor(am) + wt + (1 | cyl), data = mtcars, REML = TRUE )) model4 <- suppressMessages(glmmTMB::glmmTMB( mpg ~ as.factor(gear) * hp + as.factor(am) + wt + (1 | cyl), data = mtcars, REML = FALSE )) mp0 <- model_parameters(model, digits = 5, effects = "fixed") mp1 <- model_parameters(model, digits = 5, ci_method = "normal", effects = "fixed") mp2 <- model_parameters(model, digits = 5, ci_method = "s", effects = "fixed") mp3 <- model_parameters(model, digits = 5, ci_method = "kr", effects = "fixed") mp4 <- model_parameters(model, digits = 5, ci_method = "wald", effects = "fixed") mp5 <- model_parameters(model3, digits = 5, ci_method = "kr", effects = "fixed") mp6 <- model_parameters( model3, digits = 5, ci_method = "satterthwaite", effects = "fixed" ) test_that("model_parameters, ci_method default (residual)", { expect_equal( mp0$SE, c(2.77457, 3.69574, 3.521, 0.01574, 1.58514, 0.86316, 0.02973, 0.01668), tolerance = 1e-3 ) expect_equal(mp0$df, c(22, 22, 22, 22, 22, 22, 22, 22), tolerance = 1e-3) expect_equal( mp0$p, c(0, 0.00258, 0.14297, 0.17095, 0.84778, 0.00578, 0.00151, 0.32653), tolerance = 1e-3 ) expect_equal( mp0$CI_low, c(24.54722, 4.89698, -1.95317, -0.05493, -2.97949, -4.42848, -0.16933, -0.05133), tolerance = 1e-3 ) }) test_that("model_parameters, ci_method normal", { expect_equal( mp1$SE, c(2.77457, 3.69574, 3.521, 0.01574, 1.58514, 0.86316, 0.02973, 0.01668), tolerance = 1e-3 ) expect_equal(mp1$df, c(22, 22, 22, 22, 22, 22, 22, 22), tolerance = 1e-3) expect_equal( mp1$p, c(0, 0.00068, 0.12872, 0.15695, 0.846, 0.00224, 0.00029, 0.31562), tolerance = 1e-3 ) expect_equal( mp1$CI_low, c(24.86326, 5.31796, -1.5521, -0.05313, -2.79893, -4.33015, -0.16595, -0.04943), tolerance = 1e-3 ) }) test_that("model_parameters, ci_method satterthwaite", { expect_equal( mp2$SE, c(2.77457, 3.69574, 3.521, 0.01574, 1.58514, 0.86316, 0.02973, 0.01668), tolerance = 1e-3 ) expect_equal(mp2$df, c(24, 24, 24, 24, 24, 24, 24, 24), tolerance = 1e-3) expect_equal( mp2$p, c(0, 0.00236, 0.14179, 0.16979, 0.84763, 0.00542, 0.00136, 0.32563), tolerance = 1e-3 ) expect_equal( mp2$CI_low, c(24.57489, 4.93385, -1.91805, -0.05477, -2.96368, -4.41987, -0.16904, -0.05117), tolerance = 1e-3 ) expect_equal(mp2$SE, mp6$SE, tolerance = 1e-3) expect_equal(mp2$df_error, mp6$df_error, tolerance = 1e-3) }) test_that("model_parameters, ci_method kenward", { expect_equal( mp3$SE, c(2.97608, 6.10454, 3.98754, 0.02032, 1.60327, 0.91599, 0.05509, 0.01962), tolerance = 1e-3 ) expect_equal( mp3$df, c(19.39553, 5.27602, 23.57086, 8.97297, 22.7421, 23.76299, 2.72622, 22.82714), tolerance = 1e-3 ) expect_equal( mp3$p, c(0, 0.09176, 0.19257, 0.30147, 0.84942, 0.00828, 0.15478, 0.40248), tolerance = 1e-3 ) expect_equal( mp3$CI_low, c(24.08091, -2.887, -2.88887, -0.06828, -3.01082, -4.5299, -0.29339, -0.05735), tolerance = 1e-3 ) expect_equal(mp5$SE, mp3$SE, tolerance = 1e-3) expect_equal(mp5$df_error, mp3$df_error, tolerance = 1e-3) expect_warning(expect_warning(expect_warning( { mp7 <- model_parameters(model4, digits = 5, ci_method = "kr", effects = "fixed") }, regex = "Model was not fitted" ))) expect_equal(mp5$SE, mp7$SE, tolerance = 1e-3) expect_equal(mp5$df_error, mp7$df_error, tolerance = 1e-3) }) test_that("model_parameters, ci_method wald (t)", { expect_equal( mp4$SE, c(2.77457, 3.69574, 3.521, 0.01574, 1.58514, 0.86316, 0.02973, 0.01668), tolerance = 1e-3 ) expect_equal(mp4$df, c(22, 22, 22, 22, 22, 22, 22, 22), tolerance = 1e-3) expect_equal( mp4$p, c(0, 0.00258, 0.14297, 0.17095, 0.84778, 0.00578, 0.00151, 0.32653), tolerance = 1e-3 ) expect_equal( mp4$CI_low, c(24.54722, 4.89698, -1.95317, -0.05493, -2.97949, -4.42848, -0.16933, -0.05133), tolerance = 1e-3 ) }) test_that("model_parameters, satterthwaite compare", { s <- summary(model2) expect_equal(mp2$df, as.vector(s$coefficients[, "df"]), tolerance = 1e-4) expect_equal(mp2$t, as.vector(s$coefficients[, "t value"]), tolerance = 1e-4) expect_equal(mp2$p, as.vector(s$coefficients[, "Pr(>|t|)"]), tolerance = 1e-4) expect_equal(mp2$SE, as.vector(s$coefficients[, "Std. Error"]), tolerance = 1e-4) }) test_that("model_parameters, satterthwaite Conf Int-1", { ci1 <- ci_satterthwaite(model) expect_equal(mp2$CI_low, ci1$CI_low, tolerance = 1e-4) ci2 <- ci_satterthwaite(model2) expect_equal(mp2$CI_low, ci2$CI_low, tolerance = 1e-4) }) test_that("model_parameters, satterthwaite Conf Int-2", { coef.table <- as.data.frame(summary(model2)$coefficients) coef.table$CI_low <- coef.table$Estimate - (coef.table$"Std. Error" * qt(0.975, df = coef.table$df)) coef.table$CI_high <- coef.table$Estimate + (coef.table$"Std. Error" * qt(0.975, df = coef.table$df)) expect_equal(mp2$CI_low, coef.table$CI_low, tolerance = 1e-4) expect_equal(mp2$CI_high, coef.table$CI_high, tolerance = 1e-4) }) test_that("model_parameters, Kenward-Roger compare", { s <- summary(model2, ddf = "Kenward-Roger") expect_equal(mp3$df, as.vector(s$coefficients[, "df"]), tolerance = 1e-4) expect_equal(mp3$t, as.vector(s$coefficients[, "t value"]), tolerance = 1e-4) expect_equal(mp3$p, as.vector(s$coefficients[, "Pr(>|t|)"]), tolerance = 1e-4) expect_equal(mp3$SE, as.vector(s$coefficients[, "Std. Error"]), tolerance = 1e-4) }) model <- lm(mpg ~ as.factor(gear) * hp + as.factor(am) + wt, data = mtcars) test_that("model_parameters, ci_method-lm", { expect_s3_class(model_parameters(model), "parameters_model") expect_message(model_parameters(model, ci_method = "kenward")) }) parameters/tests/testthat/test-model_parameters_labels.R0000644000176200001440000001123614413515226023372 0ustar liggesusersskip_if_not_installed("withr") withr::with_options( list(parameters_interaction = "*"), { test_that("model_parameters_labels", { skip_if_not_installed("lme4") skip_if_not_installed("merDeriv") data(mtcars) mtcars$am <- as.factor(mtcars$am) m1 <- lme4::lmer(mpg ~ hp * am + (1 | cyl), data = mtcars) expect_equal( attr(model_parameters(m1), "pretty_names"), c(`(Intercept)` = "(Intercept)", hp = "hp", am1 = "am [1]", `hp:am1` = "hp * am [1]") ) m2 <- lme4::lmer(mpg ~ hp * as.factor(am) + (1 | cyl), data = mtcars) expect_equal( attr(model_parameters(m2), "pretty_names"), c( `(Intercept)` = "(Intercept)", hp = "hp", `as.factor(am)1` = "am [1]", `hp:as.factor(am)1` = "hp * am [1]" ) ) m3 <- lme4::lmer(mpg ~ hp * log(gear) + (1 | cyl), data = mtcars) expect_equal( attr(model_parameters(m3), "pretty_names"), c( `(Intercept)` = "(Intercept)", hp = "hp", `log(gear)` = "gear [log]", `hp:log(gear)` = "hp * gear [log]" ) ) m4 <- lm(mpg ~ as.factor(cyl) + hp * log(gear), data = mtcars) expect_equal( attr(model_parameters(m4), "pretty_names"), c( `(Intercept)` = "(Intercept)", `as.factor(cyl)6` = "cyl [6]", `as.factor(cyl)8` = "cyl [8]", hp = "hp", `log(gear)` = "gear [log]", `hp:log(gear)` = "hp * gear [log]" ) ) m5 <- lm(mpg ~ as.factor(cyl) * I(wt / 10) + hp * log(gear), data = mtcars) expect_equal( attr(model_parameters(m5), "pretty_names"), c( `(Intercept)` = "(Intercept)", `as.factor(cyl)6` = "cyl [6]", `as.factor(cyl)8` = "cyl [8]", `I(wt/10)` = "wt/10", hp = "hp", `log(gear)` = "gear [log]", `as.factor(cyl)6:I(wt/10)` = "cyl [6] * wt/10", `as.factor(cyl)8:I(wt/10)` = "cyl [8] * wt/10", `hp:log(gear)` = "hp * gear [log]" ) ) m6 <- lm(mpg ~ as.factor(cyl) * log(wt) + hp * log(gear), data = mtcars) expect_equal( attr(model_parameters(m6), "pretty_names"), c( `(Intercept)` = "(Intercept)", `as.factor(cyl)6` = "cyl [6]", `as.factor(cyl)8` = "cyl [8]", `log(wt)` = "wt [log]", hp = "hp", `log(gear)` = "gear [log]", `as.factor(cyl)6:log(wt)` = "cyl [6] * wt [log]", `as.factor(cyl)8:log(wt)` = "cyl [8] * wt [log]", `hp:log(gear)` = "hp * gear [log]" ) ) m7 <- lm(mpg ~ as.factor(cyl) * poly(wt, 2) + hp * log(gear), data = mtcars) expect_equal( attr(model_parameters(m7), "pretty_names"), c( `(Intercept)` = "(Intercept)", `as.factor(cyl)6` = "cyl6", `as.factor(cyl)8` = "cyl8", `poly(wt, 2)1` = "wt [1st degree]", `poly(wt, 2)2` = "wt [2nd degree]", hp = "hp", `log(gear)` = "gear [log]", `as.factor(cyl)6:poly(wt, 2)1` = "cyl6 * wt [1st degree]", `as.factor(cyl)8:poly(wt, 2)1` = "cyl8 * wt [1st degree]", `as.factor(cyl)6:poly(wt, 2)2` = "cyl6 * wt [2nd degree]", `as.factor(cyl)8:poly(wt, 2)2` = "cyl8 * wt [2nd degree]", `hp:log(gear)` = "hp * gear [log]" ) ) m8 <- lm(mpg ~ as.factor(cyl) * I(wt^2) + hp * log(gear), data = mtcars) expect_equal( attr(model_parameters(m8), "pretty_names"), c( `(Intercept)` = "(Intercept)", `as.factor(cyl)6` = "cyl [6]", `as.factor(cyl)8` = "cyl [8]", `I(wt^2)` = "wt^2", hp = "hp", `log(gear)` = "gear [log]", `as.factor(cyl)6:I(wt^2)` = "cyl [6] * wt^2", `as.factor(cyl)8:I(wt^2)` = "cyl [8] * wt^2", `hp:log(gear)` = "hp * gear [log]" ) ) }) test_that("Issue #785: partial and factor labels", { dat <- mtcars dat$cyl <- factor(dat$cyl) attr(dat$hp, "label") <- "Horsepower" attr(dat$cyl, "label") <- "Cylinders" m <- lm(mpg ~ hp + drat + cyl, data = dat) mp <- model_parameters(m) known <- c("(Intercept)", "Horsepower", "drat", "Cylinders [6]", "Cylinders [8]") expect_equal(attr(mp, "pretty_labels"), known, ignore_attr = TRUE) }) test_that("Issue #806: Missing label for variance component in lme4", { skip_if_not_installed("lme4") skip_if_not_installed("merDeriv") mod <- lme4::lmer(mpg ~ hp + (1 | gear), data = mtcars) p <- parameters::parameters(mod, pretty_names = "labels") expect_true("SD (Intercept)" %in% attr(p, "pretty_labels")) }) } ) parameters/tests/testthat/test-bootstrap_parameters.R0000644000176200001440000000131615033425412022757 0ustar liggesusersskip_on_cran() skip_if_not_installed("boot") test_that("bootstrap_parameters.bootstrap_model", { data(iris) m_draws <- lm(Sepal.Length ~ Sepal.Width + Petal.Length, data = iris) set.seed(123) draws <- bootstrap_model(m_draws) draws$lin_comb <- draws$Sepal.Width - draws$Petal.Length out <- bootstrap_parameters(draws) expect_snapshot(print(out)) }) test_that("bootstrap_model intercept-only", { y <- 1:10 mod <- lm(y ~ 1) set.seed(123) out <- bootstrap_model(mod, iterations = 20) expect_equal( as.numeric(out), c( 6.3, 4.8, 7, 6, 6.3, 6.7, 6.6, 6.6, 6.1, 6.1, 6.3, 6.3, 6.1, 7.2, 5.9, 5.6, 5.7, 6.8, 6.7, 6.4 ), tolerance = 1e-2 ) }) parameters/tests/testthat/test-brms.R0000644000176200001440000000340515006105056017462 0ustar liggesusersskip_on_cran() skip_on_os("mac") skip_if_not_installed("curl") skip_if_offline() skip_if_not_installed("httr2") skip_if_not_installed("withr") skip_if_not_installed("brms") skip_if_not_installed("rstan") withr::with_options( list(parameters_warning_exponentiate = TRUE), test_that("mp, footer exp", { m <- suppressWarnings(insight::download_model("brms_bernoulli_1")) skip_if(is.null(m)) out <- parameters::model_parameters(m, exponentiate = FALSE) expect_snapshot(print(out)) out <- parameters::model_parameters(m, exponentiate = TRUE) expect_snapshot(print(out)) }) ) test_that("mp, dpars in total effects", { m <- suppressWarnings(insight::download_model("brms_chocomini_1")) skip_if(is.null(m)) out <- parameters::model_parameters(m, effects = "total") expect_identical(dim(out), c(80L, 10L)) expect_identical(unique(out$Component), c("conditional", "delta", "k", "phi")) expect_named( out, c( "Group", "Level", "Parameter", "Median", "CI", "CI_low", "CI_high", "pd", "Component", "Effects" ) ) out <- parameters::model_parameters(m, effects = "grouplevel") expect_identical(dim(out), c(60L, 10L)) expect_identical(unique(out$Component), c("conditional", "delta", "k")) expect_named( out, c( "Parameter", "Component", "Median", "CI", "CI_low", "CI_high", "pd", "Rhat", "ESS", "Group" ) ) out <- parameters::model_parameters(m, effects = "all") expect_identical(dim(out), c(7L, 11L)) expect_identical(unique(out$Component), c("conditional", "delta", "k", "phi")) expect_named( out, c( "Parameter", "Effects", "Component", "Median", "CI", "CI_low", "CI_high", "pd", "Rhat", "ESS", "Group" ) ) }) parameters/tests/testthat/test-model_parameters.glht.R0000644000176200001440000000131614413515226023003 0ustar liggesuserstest_that("model_parameters.glht", { skip_if_not_installed("multcomp") set.seed(123) lmod <- lm(Fertility ~ ., data = swiss) model <- multcomp::glht( model = lmod, linfct = c( "Agriculture = 0", "Examination = 0", "Education = 0", "Catholic = 0", "Infant.Mortality = 0" ) ) params <- model_parameters(model) expect_equal(params$Coefficient, c(-0.1721, -0.258, -0.8709, 0.1041, 1.077), tolerance = 1e-2) expect_equal(params$SE, c(0.0703, 0.2539, 0.183, 0.0353, 0.3817), tolerance = 1e-2) expect_equal( params$Parameter, c("Agriculture == 0", "Examination == 0", "Education == 0", "Catholic == 0", "Infant.Mortality == 0") ) }) parameters/tests/testthat/test-printing-stan.R0000644000176200001440000000476515111301621021317 0ustar liggesusersskip_on_cran() skip_if_not_installed("curl") skip_if_offline() skip_if_not_installed("brms") skip_if_not_installed("insight") skip_if_not_installed("withr") skip_if_not_installed("rstanarm") skip_if_not_installed("httr2") withr::with_options(list(parameters_exponentiate = FALSE), { test_that("print brms", { m1 <- insight::download_model("brms_1") skip_if(is.null(m1)) mp1 <- model_parameters(m1, effects = "all", component = "all", centrality = "mean") expect_snapshot(mp1) m2 <- insight::download_model("brms_mixed_1") skip_if(is.null(m2)) mp2 <- model_parameters(m2, effects = "all", component = "all", centrality = "mean") expect_snapshot(mp2) m3 <- insight::download_model("brms_mixed_2") skip_if(is.null(m3)) mp3 <- model_parameters(m3, effects = "all", component = "all", centrality = "mean") expect_snapshot(mp3) m4 <- insight::download_model("brms_mixed_3") skip_if(is.null(m4)) mp4 <- model_parameters(m4, effects = "all", component = "all", centrality = "mean") expect_snapshot(mp4) m5 <- insight::download_model("brms_mixed_4") skip_if(is.null(m5)) mp5 <- model_parameters(m5, effects = "all", component = "all", centrality = "mean") expect_snapshot(mp5) m6 <- insight::download_model("brms_mixed_7") skip_if(is.null(m6)) mp6 <- model_parameters(m6, effects = "all", component = "all", centrality = "mean") expect_snapshot(mp6) m7 <- insight::download_model("brms_zi_1") skip_if(is.null(m7)) mp7 <- model_parameters( m7, effects = "all", component = "all", centrality = "mean", verbose = FALSE ) expect_snapshot(mp7) ## TODO: check why local results differ from snapshot # m8 <- insight::download_model("brms_zi_3") # mp8 <- model_parameters(m8, effects = "all", component = "all", centrality = "mean", verbose = FALSE) # expect_snapshot(mp8) m9 <- insight::download_model("brms_ordinal_1") skip_if(is.null(m9)) mp9 <- model_parameters(m9, effects = "all", component = "all", centrality = "mean") expect_snapshot(mp9) }) test_that("print-information", { m <- insight::download_model("brms_1") skip_if(is.null(m)) out <- model_parameters(m) expect_snapshot(out) out <- model_parameters(m, ci_method = "HDI") expect_snapshot(out) m <- insight::download_model("stanreg_glm_1") skip_if(is.null(m)) out <- model_parameters(m) }) }) parameters/tests/testthat/test-parameters_table.R0000644000176200001440000000235514413515226022041 0ustar liggesusersskip_if_not_installed("effectsize") skip_if_not_installed("insight") test_that("parameters_table 1", { x <- model_parameters(lm(Sepal.Length ~ Species, data = iris), standardize = "refit") tab <- insight::format_table(x) expect_equal(colnames(tab), c("Parameter", "Coefficient", "SE", "95% CI", "t(147)", "p")) }) test_that("parameters_table 2", { skip_if_not_installed("lme4") x <- model_parameters(lme4::lmer(Sepal.Length ~ Petal.Length + (1 | Species), data = iris), effects = "fixed") tab <- insight::format_table(x) expect_true(all(names(tab) == c("Parameter", "Coefficient", "SE", "95% CI", "t(146)", "p", "Effects"))) }) test_that("parameters_table 3", { x <- effectsize::effectsize(lm(Sepal.Length ~ Species, data = iris)) tab <- insight::format_table(x) expect_equal(colnames(tab), c("Parameter", "Std. Coef.", "95% CI")) }) test_that("parameters_table 4", { x <- model_parameters(lm(Sepal.Length ~ Species, data = iris), standardize = "posthoc") tab <- insight::format_table(x) expect_equal(colnames(tab), c("Parameter", "Std. Coef.", "SE", "95% CI", "t(147)", "p")) }) # x <- report::report_table(lm(Sepal.Length ~ Species, data=iris)) # Once on CRAN # t <- insight::format_table(x) # t parameters/tests/testthat/test-p_significance.R0000644000176200001440000000374615057525051021477 0ustar liggesusersskip_on_cran() skip_if_not_installed("bayestestR") skip_if_not_installed("distributional") skip_if_not_installed("withr") withr::with_environment( new.env(), test_that("p_significance", { data(mtcars) m <<- lm(mpg ~ gear + wt + cyl + hp, data = mtcars) set.seed(123) x <- p_significance(m) expect_identical(c(nrow(x), ncol(x)), c(5L, 5L)) expect_named(x, c("Parameter", "CI", "CI_low", "CI_high", "ps")) expect_snapshot(print(x)) mp <- model_parameters(m) set.seed(123) x2 <- p_significance(mp) expect_equal(x$ps, x2$ps, tolerance = 1e-4) set.seed(123) x <- p_significance(m, ci = 0.8) expect_equal(x$ps, c(1, 0.3983, 0.9959, 0.6188, 0), tolerance = 1e-3) set.seed(123) x <- p_significance(m, threshold = 0.5) expect_equal(x$ps, c(1, 0.4393, 0.9969, 0.6803, 0), tolerance = 1e-4) set.seed(123) # Test p_significance with custom thresholds for specific parameters x <- p_significance(m, threshold = list(cyl = 0.5, wt = 0.7)) expect_equal(x$ps, c(1, 0.5982, 0.9955, 0.6803, 1e-04), tolerance = 1e-4) }) ) test_that("p_significance, glmmTMB", { skip_if_not_installed("glmmTMB") data(Salamanders, package = "glmmTMB") m1 <- glmmTMB::glmmTMB(count ~ mined + cover + (1 | site), zi = ~mined, family = poisson, data = Salamanders ) out <- p_significance(m1) expect_identical(c(nrow(out), ncol(out)), c(5L, 6L)) expect_named(out, c("Parameter", "CI", "CI_low", "CI_high", "ps", "Component")) expect_equal(out$ps, c(0.6451, 1, 0.9015, 1, 1), tolerance = 1e-4) expect_identical( out$Parameter, c( "(Intercept)_cond", "minedno_cond", "cover_cond", "(Intercept)_zi", "minedno_zi" ) ) }) test_that("p_significance, robust", { skip_if_not_installed("sandwich") data(mtcars) m <- lm(mpg ~ gear + wt + cyl + hp, data = mtcars) set.seed(123) x <- p_significance(m, vcov = "HC3") expect_snapshot(print(x)) }) parameters/tests/testthat/test-model_parameters.glm.R0000644000176200001440000000630514775505314022637 0ustar liggesusersskip_if_not_installed("boot") test_that("model_parameters.lm", { model <- lm(mpg ~ wt, data = mtcars) params <- model_parameters(model, verbose = FALSE) expect_identical(c(nrow(params), ncol(params)), c(2L, 9L)) expect_equal(params$CI_high, c(41.119752761418, -4.20263490802709), tolerance = 1e-3) expect_equal(attributes(params)$sigma, 3.045882, tolerance = 1e-3) params <- model_parameters(model, ci = c(0.8, 0.9), verbose = FALSE) expect_identical(c(nrow(params), ncol(params)), c(2L, 10L)) params <- model_parameters(model, dispersion = TRUE, bootstrap = TRUE, iterations = 500, verbose = FALSE) expect_identical(c(nrow(params), ncol(params)), c(2L, 7L)) model <- lm(mpg ~ wt + cyl, data = mtcars) params <- model_parameters(model, verbose = FALSE) expect_identical(c(nrow(params), ncol(params)), c(3L, 9L)) model <- lm(mpg ~ wt * cyl, data = mtcars) params <- model_parameters(model, verbose = FALSE) expect_identical(c(nrow(params), ncol(params)), c(4L, 9L)) params <- model_parameters(model, component = "conditional", effects = "fixed", verbose = FALSE) }) test_that("print digits model_parameters.lm", { model <- lm(mpg ~ wt, data = mtcars) params <- model_parameters(model, digits = 4, ci_digits = 5, verbose = FALSE) out <- capture.output(print(params)) expect_identical(out[3], "(Intercept) | 37.2851 | 1.8776 | [33.45050, 41.11975] | 19.8576 | < .001") }) test_that("print digits model_parameters.lm", { skip_if_not_installed("performance") model <- lm(mpg ~ wt, data = mtcars) params <- model_parameters(model, include_info = TRUE, verbose = FALSE) expect_snapshot(params) params <- model_parameters(model, include_info = FALSE, verbose = FALSE) expect_snapshot(params) }) test_that("model_parameters.glm - binomial", { set.seed(333) model <- glm(vs ~ wt + cyl, data = mtcars, family = "binomial") params <- model_parameters(model, verbose = FALSE) expect_identical(c(nrow(params), ncol(params)), c(3L, 9L)) params <- suppressWarnings(model_parameters(model, bootstrap = TRUE, iterations = 500, verbose = FALSE)) expect_identical(c(nrow(params), ncol(params)), c(3L, 6L)) params <- model_parameters(model, component = "conditional", effects = "fixed", verbose = FALSE) }) test_that("model_parameters.glm - binomial probit", { set.seed(333) model <- glm(vs ~ wt + cyl, data = mtcars, family = binomial("probit")) params <- model_parameters(model, verbose = FALSE) expect_identical(attributes(params)$coefficient_name, "Z-Score") }) test_that("model_parameters.glm - Gamma - print", { # test printing for prevalence ratios clotting <- data.frame( u = c(5, 10, 15, 20, 30, 40, 60, 80, 100), lot1 = c(118, 58, 42, 35, 27, 25, 21, 19, 18), lot2 = c(69, 35, 26, 21, 18, 16, 13, 12, 12) ) m <- glm(lot1 ~ log(u), data = clotting, family = Gamma("log")) mp <- model_parameters(m, exponentiate = TRUE) expect_snapshot(mp) }) test_that("model_parameters.glm - glm, identity link", { data(mtcars) m <- glm(am ~ vs, data = mtcars, family = binomial(link = "identity")) p <- model_parameters(m) expect_identical(attributes(p)$coefficient_name, "Risk") }) parameters/tests/testthat/test-format_p_adjust.R0000644000176200001440000000024114412513617021701 0ustar liggesuserstest_that("format_p_adjust", { expect_identical(format_p_adjust("holm"), "Holm (1979)") expect_identical(format_p_adjust("bonferroni"), "Bonferroni") }) parameters/tests/testthat/test-cluster_analysis.R0000644000176200001440000000122014410544614022101 0ustar liggesuserstest_that("cluster_analysis, predict, matrix", { data(iris) iris.dat <- iris[c(1:15, 51:65, 101:115), -5] set.seed(123) iris.dat.km <- cluster_analysis(iris.dat, n = 4, method = "kmeans") x1 <- predict(iris.dat.km) set.seed(123) iris.mat <- as.matrix(iris.dat) iris.mat.km <- cluster_analysis(iris.mat, n = 4, method = "kmeans") x2 <- predict(iris.mat.km) expect_identical(x1, x2) }) test_that("cluster_analysis, works with include_factors, #847", { d <- iris[3:5] rz_kmeans <- cluster_analysis(d, n = 3, method = "kmeans", include_factors = TRUE) expect_identical(rz_kmeans$Cluster, c("1", "2", "3")) }) parameters/tests/testthat/test-backticks.R0000644000176200001440000000664114413515226020467 0ustar liggesusersskip_on_cran() data(iris) iris$`a m` <<- iris$Species iris$`Sepal Width` <<- iris$Sepal.Width m1 <- lm(`Sepal Width` ~ Petal.Length + `a m` * log(Sepal.Length), data = iris) m2 <- lm(Sepal.Width ~ Petal.Length + Species * log(Sepal.Length), data = iris) test_that("standard_error, backticks", { expect_identical( standard_error(m1)$Parameter, c( "(Intercept)", "Petal.Length", "a mversicolor", "a mvirginica", "log(Sepal.Length)", "a mversicolor:log(Sepal.Length)", "a mvirginica:log(Sepal.Length)" ) ) expect_identical( standard_error(m2)$Parameter, c( "(Intercept)", "Petal.Length", "Speciesversicolor", "Speciesvirginica", "log(Sepal.Length)", "Speciesversicolor:log(Sepal.Length)", "Speciesvirginica:log(Sepal.Length)" ) ) }) test_that("ci, backticks", { expect_identical( ci(m1)$Parameter, c( "(Intercept)", "Petal.Length", "a mversicolor", "a mvirginica", "log(Sepal.Length)", "a mversicolor:log(Sepal.Length)", "a mvirginica:log(Sepal.Length)" ) ) expect_identical( ci(m2)$Parameter, c( "(Intercept)", "Petal.Length", "Speciesversicolor", "Speciesvirginica", "log(Sepal.Length)", "Speciesversicolor:log(Sepal.Length)", "Speciesvirginica:log(Sepal.Length)" ) ) expect_identical( ci(m1, method = "wald")$Parameter, c( "(Intercept)", "Petal.Length", "a mversicolor", "a mvirginica", "log(Sepal.Length)", "a mversicolor:log(Sepal.Length)", "a mvirginica:log(Sepal.Length)" ) ) expect_identical( ci(m2, method = "wald")$Parameter, c( "(Intercept)", "Petal.Length", "Speciesversicolor", "Speciesvirginica", "log(Sepal.Length)", "Speciesversicolor:log(Sepal.Length)", "Speciesvirginica:log(Sepal.Length)" ) ) }) test_that("p, backticks", { expect_identical( p_value(m1)$Parameter, c( "(Intercept)", "Petal.Length", "a mversicolor", "a mvirginica", "log(Sepal.Length)", "a mversicolor:log(Sepal.Length)", "a mvirginica:log(Sepal.Length)" ) ) expect_identical( p_value(m2)$Parameter, c( "(Intercept)", "Petal.Length", "Speciesversicolor", "Speciesvirginica", "log(Sepal.Length)", "Speciesversicolor:log(Sepal.Length)", "Speciesvirginica:log(Sepal.Length)" ) ) }) test_that("model_parameters, backticks", { expect_identical( model_parameters(m1)$Parameter, c( "(Intercept)", "Petal.Length", "a mversicolor", "a mvirginica", "log(Sepal.Length)", "a mversicolor:log(Sepal.Length)", "a mvirginica:log(Sepal.Length)" ) ) expect_identical( model_parameters(m2)$Parameter, c( "(Intercept)", "Petal.Length", "Speciesversicolor", "Speciesvirginica", "log(Sepal.Length)", "Speciesversicolor:log(Sepal.Length)", "Speciesvirginica:log(Sepal.Length)" ) ) }) test_that("model_parameters-2, backticks", { expect_identical( model_parameters(select_parameters(m1))$Parameter, c( "(Intercept)", "a mversicolor", "a mvirginica", "log(Sepal.Length)", "a mversicolor:log(Sepal.Length)", "a mvirginica:log(Sepal.Length)" ) ) expect_identical( model_parameters(select_parameters(m2))$Parameter, c( "(Intercept)", "Speciesversicolor", "Speciesvirginica", "log(Sepal.Length)", "Speciesversicolor:log(Sepal.Length)", "Speciesvirginica:log(Sepal.Length)" ) ) }) parameters/tests/testthat/test-lmerTest.R0000644000176200001440000000202414413515226020317 0ustar liggesusersskip_on_cran() skip_if_not_installed("lmerTest") skip_if_not_installed("pbkrtest") data("carrots", package = "lmerTest") m1 <- lmerTest::lmer(Preference ~ sens2 + Homesize + (1 + sens2 | Consumer), data = carrots) test_that("model_parameters, satterthwaite", { params <- model_parameters(m1, effects = "fixed", ci_method = "satterthwaite") s <- summary(m1) expect_equal(params$df, as.vector(s$coefficients[, "df"]), tolerance = 1e-4) expect_equal(params$t, as.vector(s$coefficients[, "t value"]), tolerance = 1e-4) expect_equal(params$p, as.vector(s$coefficients[, "Pr(>|t|)"]), tolerance = 1e-4) }) test_that("model_parameters, kenward", { params <- model_parameters(m1, effects = "fixed", ci_method = "kenward") s <- summary(m1, ddf = "Kenward-Roger") expect_equal(params$df, as.vector(s$coefficients[, "df"]), tolerance = 1e-4) expect_equal(params$t, as.vector(s$coefficients[, "t value"]), tolerance = 1e-4) expect_equal(params$p, as.vector(s$coefficients[, "Pr(>|t|)"]), tolerance = 1e-4) }) parameters/tests/testthat/test-format_model_parameters.R0000644000176200001440000001626714716604201023427 0ustar liggesusersskip_if_not_installed("withr") # make sure we have the correct interaction mark for tests withr::with_options( list(parameters_interaction = "*"), { test_that("format_model_parameters-1", { m <- lm(mpg ~ qsec:wt + wt:drat, data = mtcars) expect_identical(unname(format_parameters(m)), c("(Intercept)", "qsec * wt", "wt * drat")) }) test_that("format_model_parameters-2", { m <- lm(mpg ~ qsec:wt + wt / drat, data = mtcars) expect_identical(unname(format_parameters(m)), c("(Intercept)", "wt", "qsec * wt", "wt * drat")) }) test_that("format_model_parameters-3", { m <- lm(mpg ~ qsec:wt + wt:drat + wt, data = mtcars) expect_identical(unname(format_parameters(m)), c("(Intercept)", "wt", "qsec * wt", "wt * drat")) }) test_that("format_model_parameters-4", { m <- lm(mpg ~ qsec:wt + wt / drat + wt, data = mtcars) expect_identical(unname(format_parameters(m)), c("(Intercept)", "wt", "qsec * wt", "wt * drat")) }) test_that("format_model_parameters-5", { m <- lm(mpg ~ qsec * wt + wt:drat + wt, data = mtcars) expect_identical(unname(format_parameters(m)), c("(Intercept)", "qsec", "wt", "qsec * wt", "wt * drat")) }) test_that("format_model_parameters-6", { m <- lm(mpg ~ wt + qsec + wt:qsec, data = mtcars) expect_identical(unname(format_parameters(m)), c("(Intercept)", "wt", "qsec", "wt * qsec")) }) test_that("format_model_parameters-7", { m <- lm(Sepal.Width ~ Species * Petal.Length, data = iris) expect_identical( unname(format_parameters(m)), c( "(Intercept)", "Species [versicolor]", "Species [virginica]", "Petal Length", "Species [versicolor] * Petal Length", "Species [virginica] * Petal Length" ) ) }) test_that("format_model_parameters-8", { m <- lm(Sepal.Width ~ Species:Petal.Length, data = iris) expect_identical( unname(format_parameters(m)), c( "(Intercept)", "Species [setosa] * Petal Length", "Species [versicolor] * Petal Length", "Species [virginica] * Petal Length" ) ) }) test_that("format_model_parameters-9", { m <- lm(Sepal.Width ~ Species / Petal.Length, data = iris) expect_identical( unname(format_parameters(m)), c( "(Intercept)", "Species [versicolor]", "Species [virginica]", "Species [setosa] * Petal Length", "Species [versicolor] * Petal Length", "Species [virginica] * Petal Length" ) ) }) test_that("format_model_parameters-10", { m <- lm(Sepal.Width ~ Species * Petal.Length + Species, data = iris) expect_identical( unname(format_parameters(m)), c( "(Intercept)", "Species [versicolor]", "Species [virginica]", "Petal Length", "Species [versicolor] * Petal Length", "Species [virginica] * Petal Length" ) ) }) test_that("format_model_parameters-11", { m <- lm(Sepal.Width ~ Species:Petal.Length + Species, data = iris) expect_identical( unname(format_parameters(m)), c( "(Intercept)", "Species [versicolor]", "Species [virginica]", "Species [setosa] * Petal Length", "Species [versicolor] * Petal Length", "Species [virginica] * Petal Length" ) ) }) test_that("format_model_parameters-12", { m <- lm(Sepal.Width ~ Species / Petal.Length + Species, data = iris) expect_identical( unname(format_parameters(m)), c( "(Intercept)", "Species [versicolor]", "Species [virginica]", "Species [setosa] * Petal Length", "Species [versicolor] * Petal Length", "Species [virginica] * Petal Length" ) ) }) test_that("format_model_parameters-13", { m <- lm(Sepal.Width ~ Species * Petal.Length + Petal.Length, data = iris) expect_identical( unname(format_parameters(m)), c( "(Intercept)", "Species [versicolor]", "Species [virginica]", "Petal Length", "Species [versicolor] * Petal Length", "Species [virginica] * Petal Length" ) ) }) test_that("format_model_parameters-14", { m <- lm(Sepal.Width ~ Species:Petal.Length + Petal.Length, data = iris) expect_identical( unname(format_parameters(m)), c( "(Intercept)", "Petal Length", "Species [versicolor] * Petal Length", "Species [virginica] * Petal Length" ) ) }) test_that("format_model_parameters-15", { m <- lm(Sepal.Width ~ Species / Petal.Length + Petal.Length, data = iris) expect_identical( unname(format_parameters(m)), c( "(Intercept)", "Species [versicolor]", "Species [virginica]", "Petal Length", "Species [versicolor] * Petal Length", "Species [virginica] * Petal Length" ) ) }) test_that("format_model_parameters-16", { m <- lm(Sepal.Width ~ Species * Petal.Length + Petal.Length + Species, data = iris) expect_identical( unname(format_parameters(m)), c( "(Intercept)", "Species [versicolor]", "Species [virginica]", "Petal Length", "Species [versicolor] * Petal Length", "Species [virginica] * Petal Length" ) ) }) test_that("format_model_parameters-17", { m <- lm(Sepal.Width ~ Species:Petal.Length + Petal.Length + Species, data = iris) expect_identical( unname(format_parameters(m)), c( "(Intercept)", "Petal Length", "Species [versicolor]", "Species [virginica]", "Species [versicolor] * Petal Length", "Species [virginica] * Petal Length" ) ) }) test_that("format_model_parameters-18", { m <- lm(Sepal.Width ~ Species / Petal.Length + Petal.Length + Species, data = iris) expect_identical( unname(format_parameters(m)), c( "(Intercept)", "Species [versicolor]", "Species [virginica]", "Petal Length", "Species [versicolor] * Petal Length", "Species [virginica] * Petal Length" ) ) }) } ) skip_if_not_installed("lme4") skip_if_not_installed("glmmTMB") test_that("format, compare_parameters, mixed models", { data(mtcars) data(Salamanders, package = "glmmTMB") model1 <- lme4::lmer(mpg ~ wt + (1 | gear), data = mtcars) model2 <- glmmTMB::glmmTMB( count ~ spp + mined + (1 | site), ziformula = ~mined, family = poisson(), data = Salamanders ) out <- compare_parameters(model1, model2, effects = "all", component = "all") f <- format(out) expect_length(f, 3) f <- format(out, format = "html") expect_identical( f$Component, c( "Fixed Effects", "Fixed Effects", "Fixed Effects", "Fixed Effects", "Fixed Effects", "Fixed Effects", "Fixed Effects", "Fixed Effects", "Fixed Effects", "Fixed Effects (Zero-Inflation Component)", "Fixed Effects (Zero-Inflation Component)", "Random Effects", "Random Effects", "Random Effects" ) ) }) parameters/tests/testthat/test-model_parameters_random_pars.R0000644000176200001440000001453714413515226024444 0ustar liggesusersskip_on_cran() skip_if_not_installed("lme4") data(sleepstudy, package = "lme4") model <- lme4::lmer(Reaction ~ Days + (1 | Subject), data = sleepstudy) mp <- model_parameters(model, effects = "random") test_that("model_parameters-random pars 1", { expect_equal(mp$Coefficient, as.data.frame(lme4::VarCorr(model))$sdcor, tolerance = 1e-3) expect_equal(mp$SE, c(6.81191, 1.72707), tolerance = 1e-3) expect_equal(mp$CI_low, c(25.90983, 27.78454), tolerance = 1e-3) expect_equal(mp$Parameter, c("SD (Intercept)", "SD (Observations)")) }) model <- lme4::lmer(Reaction ~ Days + (1 + Days | Subject), data = sleepstudy) mp <- model_parameters(model, effects = "random") test_that("model_parameters-random pars 2", { expect_equal(mp$Coefficient, as.data.frame(lme4::VarCorr(model))$sdcor, tolerance = 1e-3) expect_equal(mp$SE, c(5.83626, 1.24804, 0.31859, 1.50801), tolerance = 1e-3) expect_equal(mp$CI_low, c(15.5817, 3.91828, -0.50907, 22.80044), tolerance = 1e-3) expect_equal(mp$Parameter, c("SD (Intercept)", "SD (Days)", "Cor (Intercept~Days)", "SD (Observations)")) }) model <- lme4::lmer(Reaction ~ Days + (1 + Days || Subject), data = sleepstudy) mp <- model_parameters(model, effects = "random") test_that("model_parameters-random pars 3", { expect_equal(mp$Coefficient, as.data.frame(lme4::VarCorr(model))$sdcor, tolerance = 1e-3) expect_equal(mp$SE, c(5.66046, 1.21291, 1.50063), tolerance = 1e-3) expect_equal(mp$CI_low, c(16.08784, 4.0261, 22.78698), tolerance = 1e-3) expect_equal(mp$Parameter, c("SD (Intercept)", "SD (Days)", "SD (Observations)")) }) model <- lme4::lmer(Reaction ~ Days + (0 + Days || Subject), data = sleepstudy) mp <- model_parameters(model, effects = "random") test_that("model_parameters-random pars 4", { expect_equal(mp$Coefficient, as.data.frame(lme4::VarCorr(model))$sdcor, tolerance = 1e-3) expect_equal(mp$SE, c(1.31507, 1.6171), tolerance = 1e-3) expect_equal(mp$CI_low, c(5.09041, 26.01525), tolerance = 1e-3) expect_equal(mp$Parameter, c("SD (Days)", "SD (Observations)")) }) data(sleepstudy, package = "lme4") set.seed(12345) sleepstudy$grp <- sample(1:5, size = 180, replace = TRUE) sleepstudy$subgrp <- NA for (i in 1:5) { filter_group <- sleepstudy$grp == i sleepstudy$subgrp[filter_group] <- sample(1:30, size = sum(filter_group), replace = TRUE) } model <- lme4::lmer(Reaction ~ Days + (1 | grp / subgrp) + (1 | Subject), data = sleepstudy) mp <- model_parameters(model, effects = "random", ci_random = TRUE) test_that("model_parameters-random pars 5", { expect_equal(mp$Coefficient, as.data.frame(lme4::VarCorr(model))$sdcor, tolerance = 1e-3) expect_equal(mp$SE, c(8.92501, 6.80902, 6.70278, 2.41892), tolerance = 1e-3) expect_equal(mp$CI_low, c(0.37493, 25.90517, 0.00135, 25.92818), tolerance = 1e-3) expect_equal(mp$Parameter, c("SD (Intercept)", "SD (Intercept)", "SD (Intercept)", "SD (Observations)")) }) model <- lme4::lmer(Reaction ~ Days + (1 | grp / subgrp), data = sleepstudy) mp <- model_parameters(model, effects = "random") test_that("model_parameters-random pars 6", { expect_equal(mp$Coefficient, as.data.frame(lme4::VarCorr(model))$sdcor, tolerance = 1e-3) expect_equal(mp$SE, c(11.37581, 10.02558, 3.45893), tolerance = 1e-3) expect_equal(mp$CI_low, c(1.33029, 0.00166, 40.13353), tolerance = 1e-3) expect_equal(mp$Parameter, c("SD (Intercept)", "SD (Intercept)", "SD (Observations)")) }) data(sleepstudy, package = "lme4") sleepstudy$Days2 <- cut(sleepstudy$Days, breaks = c(-1, 3, 6, 10)) model <- lme4::lmer(Reaction ~ Days2 + (1 + Days2 | Subject), data = sleepstudy) mp <- model_parameters(model, effects = "random") test_that("model_parameters-random pars 7", { expect_equal(mp$Coefficient, as.data.frame(lme4::VarCorr(model))$sdcor, tolerance = 1e-3) expect_equal(mp$SE, c(5.68189, 5.16887, 8.47536, 0.3384, 0.47038, 0.41966, 1.7238), tolerance = 1e-3) expect_equal(mp$CI_low, c(16.7131, 21.12065, 24.1964, -0.36662, -0.59868, -0.93174, 24.18608), tolerance = 1e-3) expect_equal( mp$Parameter, c( "SD (Intercept)", "SD (Days2(3,6])", "SD (Days2(6,10])", "Cor (Intercept~Days2(3,6])", "Cor (Intercept~Days2(6,10])", "Cor (Days2(3,6]~Days2(6,10])", "SD (Observations)" ) ) }) model <- lme4::lmer(Reaction ~ Days2 + (0 + Days2 | Subject), data = sleepstudy) mp <- model_parameters(model, effects = "random") test_that("model_parameters-random pars 8", { expect_equal(mp$Coefficient, as.data.frame(lme4::VarCorr(model))$sdcor, tolerance = 1e-3) expect_equal(mp$SE, c(5.68188, 4.951, 9.773, 0.34887, 0.59977, 0.3494, 1.7238), tolerance = 1e-3) expect_equal(mp$CI_low, c(16.713, 37.06178, 36.14261, -0.65336, -0.92243, -0.99569, 24.18612), tolerance = 1e-3) expect_equal( mp$Parameter, c( "SD (Days2(-1,3])", "SD (Days2(3,6])", "SD (Days2(6,10])", "Cor (Days2(-1,3]~Days2(3,6])", "Cor (Days2(-1,3]~Days2(6,10])", "Cor (Days2(3,6]~Days2(6,10])", "SD (Observations)" ) ) }) test_that("model_parameters-random pars 9", { suppressMessages( model <- lme4::lmer(Reaction ~ Days2 + (1 + Days2 || Subject), data = sleepstudy) ) mp <- model_parameters(model, effects = "random", verbose = FALSE) expect_equal(mp$Coefficient, as.data.frame(lme4::VarCorr(model))$sdcor, tolerance = 1e-3) expect_true(all(is.na(mp$SE))) expect_equal( mp$Parameter, c( "SD (Intercept)", "SD (Days2(-1,3])", "SD (Days2(3,6])", "SD (Days2(6,10])", "Cor (Days2(-1,3]~Days2(3,6])", "Cor (Days2(-1,3]~Days2(6,10])", "Cor (Days2(3,6]~Days2(6,10])", "SD (Observations)" ) ) }) test_that("model_parameters-random pars 10", { model <- lme4::lmer(Reaction ~ Days2 + (0 + Days2 || Subject), data = sleepstudy) mp <- model_parameters(model, effects = "random") expect_equal(mp$Coefficient, as.data.frame(lme4::VarCorr(model))$sdcor, tolerance = 1e-3) expect_equal(mp$SE, c(5.68188, 4.951, 9.773, 0.34887, 0.59977, 0.3494, 1.7238), tolerance = 1e-3) expect_equal(mp$CI_low, c(16.713, 37.06178, 36.14261, -0.65336, -0.92243, -0.99569, 24.18612), tolerance = 1e-3) expect_equal( mp$Parameter, c( "SD (Days2(-1,3])", "SD (Days2(3,6])", "SD (Days2(6,10])", "Cor (Days2(-1,3]~Days2(3,6])", "Cor (Days2(-1,3]~Days2(6,10])", "Cor (Days2(3,6]~Days2(6,10])", "SD (Observations)" ) ) }) parameters/tests/testthat/test-simulate_parameters.R0000644000176200001440000000274014413515226022573 0ustar liggesusersskip_on_os(c("mac", "linux", "solaris")) skip_if_not(getRversion() >= "4.0.0") skip_if_not_installed("sandwich") mod <- lm(mpg ~ wt + cyl, data = mtcars) test_that("simulate_parameters, lm", { set.seed(123) s1 <- simulate_parameters(mod) set.seed(123) s2 <- simulate_parameters(mod, vcov = "HC1") expect_equal(dim(s1), c(3L, 5L)) expect_equal(dim(s2), c(3L, 5L)) expect_false(isTRUE(all.equal(s1$Coefficient, s2$Coefficient, tolerance = 1e-5))) expect_false(isTRUE(all.equal(s1$Coefficient, s2$CI_low, tolerance = 1e-5))) }) skip_on_cran() skip_if_not_installed("glmmTMB") data(fish) mod <- suppressWarnings(glmmTMB::glmmTMB( count ~ child + camper + (1 | persons), ziformula = ~ child + camper + (1 | persons), data = fish, family = glmmTMB::truncated_poisson() )) test_that("simulate_parameters, glmmTMB", { set.seed(123) s <- simulate_parameters(mod) expect_equal(dim(s), c(6L, 6L)) expect_equal(s$Coefficient, c(1.26979, -1.14433, 0.73637, -0.39618, 2.05839, -1.01957), tolerance = 1e-1) expect_equal(s$CI_low, c(0.33767, -1.33193, 0.55914, -1.65328, 1.44539, -1.65345), tolerance = 1e-1) }) test_that("simulate_parameters, glmmTMB, conditional only", { set.seed(123) s <- simulate_parameters(mod, component = "conditional") expect_equal(dim(s), c(3L, 5L)) expect_equal(s$Coefficient, c(1.26979, -1.14433, 0.73637), tolerance = 1e-1) expect_equal(s$CI_low, c(0.33767, -1.33193, 0.55914), tolerance = 1e-1) }) parameters/tests/testthat/test-get_scores.R0000644000176200001440000000064415030725674020671 0ustar liggesusersskip_on_cran() test_that("get_scores", { skip_if_not_installed("psych") data(mtcars) pca <- principal_components(mtcars[, 1:7], n = 2, rotation = "varimax") scores <- get_scores(pca) expect_equal(head(scores$Component_1), c(38.704, 38.755, 28.194, 58.339, 78.658, 51.064), tolerance = 1e-2) expect_equal(head(scores$Component_2), c(63.23, 63.51, 55.805, 64.72, 96.01, 62.61), tolerance = 1e-2) }) parameters/tests/testthat/test-model_parameters.mediate.R0000644000176200001440000000523315111301621023443 0ustar liggesusersskip_on_cran() skip_if_not_installed("mediation") skip_if_not_installed("MASS") data(jobs, package = "mediation") b <- lm(job_seek ~ treat + econ_hard + sex + age, data = jobs) c <- lm(depress2 ~ treat + job_seek + econ_hard + sex + age, data = jobs) set.seed(1234) m1 <- mediation::mediate(b, c, sims = 50, treat = "treat", mediator = "job_seek") b2 <- lm(job_seek ~ educ + sex, data = jobs) c2 <- lm(depress2 ~ educ + job_seek + sex, data = jobs) set.seed(1234) m2 <- mediation::mediate( b2, c2, treat = "educ", mediator = "job_seek", sims = 50, control.value = "gradwk", treat.value = "somcol" ) test_that("model_parameters.mediate-1", { params <- model_parameters(m1) expect_equal( params$Estimate, c(-0.01488, -0.04753, -0.06242, 0.16635), tolerance = 1e-2 ) expect_equal(params$Parameter, c("ACME", "ADE", "Total Effect", "Prop. Mediated")) }) test_that("model_parameters.mediate-2", { params <- model_parameters(m2) expect_equal( params$Estimate, c(0.02484, -0.05793, -0.03309, -0.27914), tolerance = 1e-2 ) expect_equal(params$Parameter, c("ACME", "ADE", "Total Effect", "Prop. Mediated")) }) test_that("model_parameters.mediate-3", { skip_on_cran() ## FIXME: bug in the latest CRAN version of the mediation package # maintainer contacted on 13. June 2025 skip_if(TRUE) jobs$job_disc <- as.factor(jobs$job_disc) b.ord <- MASS::polr( job_disc ~ treat + econ_hard + sex + age, data = jobs, method = "probit", Hess = TRUE ) d.bin <- glm( work1 ~ treat + job_disc + econ_hard + sex + age, data = jobs, family = binomial(link = "probit") ) set.seed(1234) m3 <- mediation::mediate( b.ord, d.bin, sims = 50, treat = "treat", mediator = "job_disc" ) params <- model_parameters(m3) expect_equal( params$Estimate, c( 0.00216, 0.00231, 0.0486, 0.04875, 0.05091, 0.03981, 0.04829, 0.00223, 0.04868, 0.04405 ), tolerance = 1e-2 ) expect_equal( params$Parameter, c( "ACME (control)", "ACME (treated)", "ADE (control)", "ADE (treated)", "Total Effect", "Prop. Mediated (control)", "Prop. Mediated (treated)", "ACME (average)", "ADE (average)", "Prop. Mediated (average)" ) ) expect_equal( params$Component, c( "control", "treated", "control", "treated", "Total Effect", "control", "treated", "average", "average", "average" ) ) }) parameters/tests/testthat/test-model_parameters.truncreg.R0000644000176200001440000000117614413515226023702 0ustar liggesuserstest_that("model_parameters.truncreg", { skip_if_not_installed("truncreg") skip_if_not_installed("survival") set.seed(123) data("tobin", package = "survival") model <- truncreg::truncreg( formula = durable ~ age + quant, data = tobin, subset = durable > 0 ) params <- model_parameters(model) expect_equal(params$SE, c(9.21875, 0.22722, 0.03259, 0.56841), tolerance = 1e-3) expect_equal(params$t, c(1.36653, 1.89693, -3.64473, 2.90599), tolerance = 1e-3) expect_equal( colnames(params), c("Parameter", "Coefficient", "SE", "CI", "CI_low", "CI_high", "t", "df_error", "p") ) }) parameters/tests/testthat/test-model_parameters.epi2x2.R0000644000176200001440000000112614716604201023153 0ustar liggesusersskip_on_cran() skip_if_not_installed("epiR") test_that("model_parameters.epi2x2", { data(mtcars) tab <- xtabs(~ am + vs, data = mtcars) out <- model_parameters(epiR::epi.2by2(tab)) expect_identical(out$Parameter, c("RR", "OR", "ARisk", "AFRisk", "PARisk", "PAFRisk")) expect_identical( attributes(out)$pretty_names, c( RR = "Risk Ratio", OR = "Odds Ratio", ARisk = "Attributable Risk", AFRisk = "Attributable Fraction in Exposed (%)", PARisk = "Attributable Risk in Population", PAFRisk = "Attributable Fraction in Population (%)" ) ) }) parameters/tests/testthat/test-mipo.R0000644000176200001440000000217414433114017017465 0ustar liggesusersskip_if_not_installed("mice") skip_if_not_installed("nnet") skip_if_not(packageVersion("insight") > "0.19.1") test_that("param ordinal", { set.seed(1234) d <- suppressWarnings(mice::ampute(mtcars)) ## Ampute mtcars and impute two data sets imp <- suppressWarnings(mice::mice(d$amp, m = 2, printFlag = FALSE)) imp.l <- mice::complete(imp, action = "long") model <- list() ## Fit and pool models for (i in 1:2) { capture.output({ model[[i]] <- nnet::multinom(cyl ~ disp + hp, data = imp.l, subset = .imp == i) }) } pooled <- mice::pool(model) mp <- model_parameters(pooled) expect_snapshot(print(mp)) }) test_that("param normal", { set.seed(1234) d <- suppressWarnings(mice::ampute(mtcars)) ## Ampute mtcars and impute two data sets imp <- suppressWarnings(mice::mice(d$amp, m = 2, printFlag = FALSE)) imp.l <- mice::complete(imp, action = "long") model <- list() ## Fit and pool models for (i in 1:2) model[[i]] <- lm(mpg ~ disp + hp, data = imp.l, subset = .imp == i) pooled <- mice::pool(model) mp <- model_parameters(pooled) expect_snapshot(print(mp)) }) parameters/tests/testthat/test-model_parameters_ordinal.R0000644000176200001440000000603515057525051023563 0ustar liggesusersskip_on_cran() skip_if_not_installed("ordinal") d <- data.frame( Stim = c( "New", "New", "New", "New", "New", "New", "Old", "Old", "Old", "Old", "Old", "Old" ), Response = c( "Confidence1", "Confidence2", "Confidence3", "Confidence4", "Confidence5", "Confidence6", "Confidence1", "Confidence2", "Confidence3", "Confidence4", "Confidence5", "Confidence6" ), w = c(320, 295, 243, 206, 174, 159, 136, 188, 208, 256, 302, 333), stringsAsFactors = FALSE ) m1 <- ordinal::clm(ordered(Response) ~ Stim, scale = ~Stim, link = "probit", data = d, weights = w ) m2 <- ordinal::clm2(ordered(Response) ~ Stim, scale = ~Stim, link = "probit", data = d, weights = w ) test_that("model_parameters.clm", { mp <- model_parameters(m1) expect_equal( mp$Parameter, c( "Confidence1|Confidence2", "Confidence2|Confidence3", "Confidence3|Confidence4", "Confidence4|Confidence5", "Confidence5|Confidence6", "StimOld", "StimOld" ), tolerance = 1e-4 ) expect_equal( mp$Component, c("intercept", "intercept", "intercept", "intercept", "intercept", "location", "scale"), tolerance = 1e-4 ) expect_equal( mp$Coefficient, c(-0.72845, -0.15862, 0.26583, 0.69614, 1.23477, 0.55237, -0.04069), tolerance = 1e-4 ) mp <- model_parameters(m1, exponentiate = TRUE) expect_equal( mp$Coefficient, c(0.48266, 0.85332, 1.30451, 2.006, 3.4376, 1.737366, 0.9601267), tolerance = 1e-4 ) expect_snapshot(print(mp)) }) test_that("model_parameters.clm2", { mp <- model_parameters(m2) expect_equal( mp$Parameter, c( "Confidence1|Confidence2", "Confidence2|Confidence3", "Confidence3|Confidence4", "Confidence4|Confidence5", "Confidence5|Confidence6", "StimOld", "StimOld" ), tolerance = 1e-4 ) expect_equal( mp$Component, c("intercept", "intercept", "intercept", "intercept", "intercept", "location", "scale"), tolerance = 1e-4 ) expect_equal( mp$Coefficient, c(-0.72845, -0.15862, 0.26583, 0.69614, 1.23477, 0.55237, -0.04069), tolerance = 1e-4 ) mp <- model_parameters(m2, exponentiate = TRUE) expect_equal( mp$Coefficient, c(0.48266, 0.85332, 1.30451, 2.006, 3.4376, 1.73737, 0.96013), tolerance = 1e-4 ) expect_snapshot(print(mp)) }) test_that("model_parameters.clmm, exponentiate works w/o component column", { data(wine, package = "ordinal") mox <- ordinal::clmm(rating ~ temp + contact + (1 | judge), data = wine) out1 <- model_parameters(mox, exponentiate = FALSE) out2 <- model_parameters(mox, exponentiate = TRUE) expect_equal(out1$Coefficient, c(-1.62367, 1.51337, 4.22853, 6.08877, 3.063, 1.83488, 1.13113), tolerance = 1e-4) expect_equal(out2$Coefficient, c(0.19717, 4.54199, 68.61606, 440.87991, 21.39156, 6.26441, 1.13113), tolerance = 1e-4) expect_identical(attributes(out1)$coefficient_name, "Log-Odds") expect_identical(attributes(out2)$coefficient_name, "Odds Ratio") }) parameters/tests/testthat/test-rank_deficienty.R0000644000176200001440000000077014716604201021662 0ustar liggesusersset.seed(123) data(mtcars) model <- stats::lm( formula = wt ~ am * cyl * vs, data = mtcars ) test_that("model_parameters-rank_deficiency", { expect_message(model_parameters(model)) params <- suppressWarnings(suppressMessages(model_parameters(model))) expect_equal(params$Parameter, c("(Intercept)", "am", "cyl", "vs", "am:cyl", "am:vs"), tolerance = 1e-3) expect_equal(params$Coefficient, c(2.28908, -1.37908, 0.22688, -0.26158, 0.08062, 0.14987), tolerance = 1e-3) }) parameters/tests/testthat/test-simulate_model.R0000644000176200001440000000341614413515226021531 0ustar liggesusersskip_on_os(c("mac", "linux", "solaris")) skip_if_not(getRversion() >= "4.0.0") skip_if_not_installed("sandwich") mod <- lm(mpg ~ wt + cyl, data = mtcars) test_that("simulate_model, lm", { set.seed(123) s1 <- simulate_model(mod, iterations = 100) set.seed(123) s2 <- simulate_model(mod, iterations = 100, vcov = "HC1") expect_identical(dim(s1), c(100L, 3L)) expect_identical(dim(s2), c(100L, 3L)) expect_false(isTRUE(all.equal(head(s1$wt), head(s2$wt), tolerance = 1e-5))) expect_false(isTRUE(all.equal(mean(s1$cyl), mean(s2$cyl), tolerance = 1e-5))) }) skip_on_cran() skip_if_not_installed("glmmTMB") data(fish) mod <- suppressWarnings(glmmTMB::glmmTMB( count ~ child + camper + (1 | persons), ziformula = ~ child + camper + (1 | persons), data = fish, family = glmmTMB::truncated_poisson() )) test_that("simulate_model, glmmTMB", { set.seed(123) s <- simulate_model(mod, iterations = 100) expect_identical(dim(s), c(100L, 6L)) expect_identical( colnames(s), c( "(Intercept)", "child", "camper1", "(Intercept)_zi", "child_zi", "camper1_zi" ) ) expect_equal( head(s$child), c(-1.21946, -1.23724, -1.10968, -1.14867, -1.04882, -1.11192), tolerance = 1e-2 ) expect_equal(mean(s$camper1), 0.717259, tolerance = 1e-1) }) test_that("simulate_model, glmmTMB, conditional only", { set.seed(123) s <- simulate_model(mod, component = "conditional", iterations = 100) expect_identical(dim(s), c(100L, 3L)) expect_identical(colnames(s), c("(Intercept)", "child", "camper1")) expect_equal( head(s$child), c(-1.21946, -1.23724, -1.10968, -1.14867, -1.04882, -1.11192), tolerance = 1e-2 ) expect_equal(mean(s$camper1), 0.717259, tolerance = 1e-1) }) parameters/tests/testthat/test-model_parameters.maov.R0000644000176200001440000000070314355245205023010 0ustar liggesusersfit <- lm(cbind(mpg, disp, hp) ~ factor(cyl), data = mtcars) m <- aov(fit) mp <- model_parameters(m) test_that("model_parameters.maov", { expect_equal( mp$Sum_Squares, as.vector(do.call(c, lapply(summary(m), function(i) as.data.frame(i)$`Sum Sq`))), tolerance = 1e-3 ) expect_equal( mp[["F"]], as.vector(do.call(c, lapply(summary(m), function(i) as.data.frame(i)[["F value"]]))), tolerance = 1e-3 ) }) parameters/tests/testthat/test-rstanarm.R0000644000176200001440000000341415006152761020353 0ustar liggesusersskip_on_os("mac") skip_on_cran() skip_if_not_installed("rstanarm") test_that("mp", { set.seed(123) model <- rstanarm::stan_glm( vs ~ mpg + cyl, data = mtcars, refresh = 0, family = "binomial", seed = 123 ) mp <- model_parameters(model, centrality = "mean") s <- summary(model) expect_equal(mp$Mean, unname(s[1:3, 1]), tolerance = 1e-2, ignore_attr = TRUE) expect_equal(mp$Prior_Scale, c(2.5, 0.4148, 1.39984), tolerance = 1e-2) }) test_that("mp2", { data(pbcLong, package = "rstanarm") pbcLong$ybern <- as.integer(pbcLong$logBili >= mean(pbcLong$logBili)) set.seed(123) invisible(capture.output({ model <- rstanarm::stan_mvmer( formula = list( ybern ~ year + (1 | id), albumin ~ sex + year + (year | id) ), data = pbcLong, refresh = 0, seed = 123 ) })) mp <- suppressWarnings(model_parameters(model, centrality = "mean")) s <- summary(model) expect_equal( mp$Mean, unname(s[c("y1|(Intercept)", "y1|year", "y2|(Intercept)", "y2|sexf", "y2|year"), 1]), tolerance = 1e-2, ignore_attr = TRUE ) expect_identical(mp$Response, c("y1", "y1", "y2", "y2", "y2")) expect_equal(mp$Prior_Scale, c(4.9647, 0.3465, 5.57448, 1.39362, 0.38906), tolerance = 1e-2) }) test_that("mp3", { skip_if_not_installed("curl") skip_if_offline() skip_if_not_installed("httr2") m <- insight::download_model("stanreg_merMod_1") skip_if(is.null(m)) out <- model_parameters(m, effects = "grouplevel") expect_identical(dim(out), c(3L, 9L)) expect_equal(out$Median, c(0.21861, 0.17629, -0.68358), tolerance = 1e-3) expect_identical(out$Parameter, c("b[(Intercept) gear:3]", "b[(Intercept) gear:4]", "b[(Intercept) gear:5]")) }) parameters/tests/testthat/test-model_parameters.mixed.R0000644000176200001440000001432115006147473023157 0ustar liggesusersskip_if_not_installed("lme4") skip_on_cran() m1 <- lme4::lmer(wt ~ cyl + (1 | gear), data = mtcars) m2 <- lme4::glmer(vs ~ cyl + (1 | gear), data = mtcars, family = "binomial") m3 <- lme4::lmer(wt ~ cyl + mpg + (1 | gear), data = mtcars) test_that("model_parameters.mixed", { params <- model_parameters(m3, keep = "^cyl", effects = "fixed") expect_identical(dim(params), c(1L, 10L)) expect_message({ params <- model_parameters(m3, keep = "^abc", effects = "fixed") }) expect_identical(dim(params), c(3L, 10L)) params <- model_parameters(m1, ci_method = "normal", effects = "fixed") expect_identical(c(nrow(params), ncol(params)), c(2L, 10L)) expect_equal(params$CI_high, c(1.6373105660317, 0.554067677205595), tolerance = 1e-3) params <- model_parameters(m1, effects = "fixed") expect_identical(c(nrow(params), ncol(params)), c(2L, 10L)) expect_equal(params$CI_high, c(1.68181, 0.56083), tolerance = 1e-3) params <- model_parameters(m1, ci = c(0.8, 0.9), ci_method = "normal", effects = "fixed") expect_identical(c(nrow(params), ncol(params)), c(2L, 11L)) expect_equal(params$CI_high_0.8, c(1.29595665381331, 0.502185700948862), tolerance = 1e-3) expect_equal(params$CI_high_0.9, c(1.47875781798108, 0.529969433080186), tolerance = 1e-3) params <- model_parameters(m1, ci_method = "normal", effects = "fixed") lme4_ci <- na.omit(as.data.frame(confint(m1, method = "Wald"))) expect_equal(params$CI_low, lme4_ci$`2.5 %`, tolerance = 1e-4) params <- model_parameters(m1, ci = c(0.8, 0.9), ci_method = "wald", effects = "fixed") expect_identical(c(nrow(params), ncol(params)), c(2L, 11L)) expect_equal(params$CI_high_0.8, c(1.31154, 0.50455), tolerance = 1e-3) expect_equal(params$CI_high_0.9, c(1.50707, 0.53427), tolerance = 1e-3) params <- model_parameters(m1, ci = c(0.8, 0.9), effects = "fixed") expect_identical(c(nrow(params), ncol(params)), c(2L, 11L)) expect_equal(params$CI_high_0.8, c(1.31154, 0.50455), tolerance = 1e-3) expect_equal(params$CI_high_0.9, c(1.50707, 0.53427), tolerance = 1e-3) params <- model_parameters(m2, effects = "fixed") expect_identical(c(nrow(params), ncol(params)), c(2L, 10L)) model <- lme4::glmer(vs ~ drat + cyl + (1 | gear), data = mtcars, family = "binomial") params <- model_parameters(model, effects = "fixed") cs <- coef(summary(model)) expect_identical(c(nrow(params), ncol(params)), c(3L, 10L)) expect_named(params, c( "Parameter", "Coefficient", "SE", "CI", "CI_low", "CI_high", "z", "df_error", "p", "Effects" )) expect_identical(params$Parameter, rownames(cs)) }) test_that("model_parameters.mixed bootstrap", { skip_on_os(c("linux", "mac")) skip_on_cran() set.seed(123) suppressWarnings(expect_message( { params <- model_parameters(m1, bootstrap = TRUE, iterations = 100) }, regex = "only returns" )) expect_equal(params$Coefficient, c(0.60496, 0.41412), tolerance = 1e-3) }) test_that("model_parameters.mixed-random", { params <- model_parameters(m1, effects = "random", group_level = TRUE) expect_identical(c(nrow(params), ncol(params)), c(3L, 9L)) expect_identical(as.vector(params$Parameter), c("(Intercept)", "(Intercept)", "(Intercept)")) expect_identical(as.vector(params$Level), c("3", "4", "5")) expect_equal(params$Coefficient, c(0.1692, 0.0566, -0.2259), tolerance = 1e-2) }) test_that("model_parameters.mixed-random, grouplevel", { params <- model_parameters(m1, effects = "grouplevel") expect_identical(c(nrow(params), ncol(params)), c(3L, 9L)) expect_identical(as.vector(params$Parameter), c("(Intercept)", "(Intercept)", "(Intercept)")) expect_identical(as.vector(params$Level), c("3", "4", "5")) expect_equal(params$Coefficient, c(0.1692, 0.0566, -0.2259), tolerance = 1e-2) }) test_that("model_parameters.mixed-ran_pars", { params <- model_parameters(m1, effects = "random") expect_identical(c(nrow(params), ncol(params)), c(2L, 8L)) expect_identical( as.vector(params$Parameter), c("SD (Intercept)", "SD (Observations)") ) expect_equal(params$Coefficient, c(0.27049, 0.59385), tolerance = 1e-2) }) test_that("model_parameters.mixed-all", { params <- model_parameters(m1, effects = "all") expect_identical(c(nrow(params), ncol(params)), c(4L, 11L)) expect_identical( as.vector(params$Parameter), c("(Intercept)", "cyl", "SD (Intercept)", "SD (Observations)") ) expect_equal(params$Coefficient, c(0.65112, 0.40418, 0.27049, 0.59385), tolerance = 1e-2) }) test_that("model_parameters.mixed-all_pars", { params <- model_parameters(m1, effects = "all", group_level = TRUE) expect_identical(c(nrow(params), ncol(params)), c(5L, 12L)) expect_identical( as.vector(params$Parameter), c("(Intercept)", "cyl", "(Intercept)", "(Intercept)", "(Intercept)") ) expect_equal(as.vector(params$Level), c(NA, NA, "3", "4", "5"), ignore_attr = TRUE) expect_equal( params$Coefficient, c(0.65112, 0.40418, 0.16923, 0.05663, -0.22586), tolerance = 1e-2 ) }) data("qol_cancer") qol_cancer <- cbind( qol_cancer, demean(qol_cancer, select = c("phq4", "QoL"), by = "ID") ) model <- lme4::lmer( QoL ~ time + phq4_within + phq4_between + (1 | ID), data = qol_cancer ) test_that("model_parameters.mixed", { mp <- model_parameters(model, effects = "fixed", wb_component = TRUE) mp2 <- model_parameters(model, effects = "fixed", wb_component = FALSE) expect_identical(mp$Component, c("rewb-contextual", "rewb-contextual", "within", "between")) expect_null(mp2$Component) }) test_that("print-model_parameters-1", { expect_snapshot(model_parameters(model, effects = "fixed", wb_component = TRUE)) expect_snapshot(model_parameters(model, effects = "fixed", wb_component = FALSE)) }) test_that("print-model_parameters-2", { skip_if_not_installed("merDeriv") expect_snapshot(model_parameters(m1, effects = "all", wb_component = TRUE)) expect_snapshot(model_parameters(m1, effects = "all", wb_component = FALSE)) expect_snapshot(model_parameters(m1, effects = "fixed", include_info = TRUE, wb_component = TRUE)) expect_snapshot(model_parameters(m1, effects = "fixed", include_info = TRUE, wb_component = FALSE)) }) parameters/tests/testthat/test-parameters_type-2.R0000644000176200001440000000460414355245205022073 0ustar liggesusersdat <- iris m <- lm(Sepal.Length ~ Species, data = dat) test_that("parameters_type default contrasts", { p_type <- parameters_type(m) expect_equal(p_type$Type, c("intercept", "factor", "factor")) expect_equal(p_type$Level, c(NA, "versicolor", "virginica")) }) dat <- iris dat$Species <- as.ordered(dat$Species) m <- lm(Sepal.Length ~ Species, data = dat) test_that("parameters_type ordered factor", { p_type <- parameters_type(m) expect_equal(p_type$Type, c("intercept", "ordered", "ordered")) expect_equal(p_type$Level, c(NA, "[linear]", "[quadratic]")) }) dat <- iris dat$Species <- as.ordered(dat$Species) contrasts(dat$Species) <- contr.treatment(3) m <- lm(Sepal.Length ~ Species, data = dat) test_that("parameters_type ordered factor", { p_type <- parameters_type(m) expect_equal(p_type$Type, c("intercept", "factor", "factor")) expect_equal(p_type$Level, c(NA, "2", "3")) }) dat <- iris contrasts(dat$Species) <- contr.poly(3) m <- lm(Sepal.Length ~ Species, data = dat) test_that("parameters_type poly contrasts", { p_type <- parameters_type(m) expect_equal(p_type$Type, c("intercept", "factor", "factor")) expect_equal(p_type$Level, c(NA, ".L", ".Q")) }) dat <- iris contrasts(dat$Species) <- contr.treatment(3) m <- lm(Sepal.Length ~ Species, data = dat) test_that("parameters_type treatment contrasts", { p_type <- parameters_type(m) expect_equal(p_type$Type, c("intercept", "factor", "factor")) expect_equal(p_type$Level, c(NA, "2", "3")) }) dat <- iris contrasts(dat$Species) <- contr.sum(3) m <- lm(Sepal.Length ~ Species, data = dat) test_that("parameters_type sum contrasts", { p_type <- parameters_type(m) expect_equal(p_type$Type, c("intercept", "factor", "factor")) expect_equal(p_type$Level, c(NA, "1", "2")) }) dat <- iris contrasts(dat$Species) <- contr.helmert(3) m <- lm(Sepal.Length ~ Species, data = dat) test_that("parameters_type helmert contrasts", { p_type <- parameters_type(m) expect_equal(p_type$Type, c("intercept", "factor", "factor")) expect_equal(p_type$Level, c(NA, "1", "2")) }) dat <- iris contrasts(dat$Species) <- contr.SAS(3) m <- lm(Sepal.Length ~ Species, data = dat) test_that("parameters_type SAS contrasts", { p_type <- parameters_type(m) expect_equal(p_type$Type, c("intercept", "factor", "factor")) expect_equal(p_type$Level, c(NA, "1", "2")) }) parameters/tests/testthat/test-sampleSelection.R0000644000176200001440000000207715036175633021665 0ustar liggesusersskip_on_os("mac") skip_on_cran() skip_if_not_installed("sampleSelection") skip_if_not_installed("mvtnorm") test_that("model_parameters", { set.seed(0) vc <- diag(3) vc[lower.tri(vc)] <- c(0.9, 0.5, 0.1) vc[upper.tri(vc)] <- vc[lower.tri(vc)] eps <- mvtnorm::rmvnorm(500, c(0, 0, 0), vc) xs <- runif(500) ys <- xs + eps[, 1] > 0 xo1 <- runif(500) yo1 <- xo1 + eps[, 2] xo2 <- runif(500) yo2 <- xo2 + eps[, 3] yo <- ifelse(ys, yo2, yo1) ys <- as.numeric(ys) + 1 dat_sel <<- data.frame(ys, yo, yo1, yo2, xs, xo1, xo2) m1 <- sampleSelection::selection(ys ~ xs, list(yo1 ~ xo1, yo2 ~ xo2), data = dat_sel) data(Mroz87, package = "sampleSelection") Mroz87$kids <- (Mroz87$kids5 + Mroz87$kids618 > 0) m2 <- sampleSelection::selection( lfp ~ age + I(age^2) + faminc + kids + educ, wage ~ exper + I(exper^2) + educ + city, data = Mroz87 ) expect_snapshot(print(model_parameters(m1), zap_small = TRUE, table_width = Inf)) expect_snapshot(print(model_parameters(m2), zap_small = TRUE, table_width = Inf)) }) parameters/tests/testthat/test-zeroinfl.R0000644000176200001440000000461014413515226020353 0ustar liggesusersskip_if_not_installed("pscl") data("bioChemists", package = "pscl") m1 <- pscl::zeroinfl(art ~ fem + mar + kid5 + ment | kid5 + phd, data = bioChemists) test_that("ci", { expect_equal( ci(m1)$CI_low, c(0.42844, -0.34446, 0.00734, -0.26277, 0.01717, -1.77978, -0.37558, -0.51411), tolerance = 1e-4 ) }) test_that("se", { expect_equal( standard_error(m1)$SE, c(0.06797, 0.05868, 0.06593, 0.04874, 0.00212, 0.43378, 0.21509, 0.1352), tolerance = 1e-4 ) }) test_that("p_value", { expect_equal( p_value(m1)$p, c(0, 9e-05, 0.03833, 6e-04, 0, 0.03211, 0.83068, 0.06539), tolerance = 1e-4 ) expect_s3_class(p_value(m1, method = "robust"), "data.frame") expect_s3_class(p_value(m1, method = "robust", vcov = NULL), "data.frame") expect_s3_class(p_value(m1, vcov = NULL), "data.frame") ## TODO package sandwich errors for these... # expect_s3_class(p_value(m1, vcov = "HC"), "data.frame") # expect_s3_class(p_value(m1, method = "robust", vcov = "HC"), "data.frame") }) test_that("model_parameters", { expect_equal( model_parameters(m1)$Coefficient, as.vector(coef(m1)), tolerance = 1e-4 ) }) m2 <- pscl::zeroinfl(formula = art ~ . | 1, data = bioChemists, dist = "negbin") test_that("model_parameters", { expect_equal( model_parameters(m2)$Coefficient, as.vector(coef(m2)), tolerance = 1e-4 ) expect_equal( model_parameters(m2)$Component, c( "conditional", "conditional", "conditional", "conditional", "conditional", "conditional", "zero_inflated" ) ) }) m3 <- pscl::zeroinfl(art ~ mar + kid5 * fem + ment | kid5 * fem + phd, data = bioChemists) test_that("model_parameters", { expect_equal( model_parameters(m3)$Coefficient, as.vector(coef(m3)), tolerance = 1e-4 ) }) test_that("parameters_type", { expect_equal( parameters_type(m3)$Type, c( "intercept", "factor", "numeric", "factor", "numeric", "interaction", "intercept", "numeric", "factor", "numeric", "interaction" ), tolerance = 1e-4 ) }) test_that("parameters_type", { expect_equal( parameters_type(m3)$Link, c( "Mean", "Difference", "Association", "Difference", "Association", "Difference", "Mean", "Association", "Difference", "Association", "Difference" ), tolerance = 1e-4 ) }) parameters/tests/testthat/test-model_parameters.metaBMA.R0000644000176200001440000000715414412513617023322 0ustar liggesusersskip_if_not_installed("metaBMA") data(towels, package = "metaBMA") set.seed(1234) m <- suppressWarnings( metaBMA::meta_random( logOR, SE, study, data = towels, ci = 0.95, iter = 100, logml_iter = 200 ) ) test_that("model_parameters.meta_random", { params <- model_parameters(m) expect_identical( params$Parameter, c( "Goldstein, Cialdini, & Griskevicius (2008), Exp. 1", "Goldstein, Cialdini, & Griskevicius (2008), Exp. 2", "Schultz, Khazian, & Zaleski (2008), Exp. 2", "Schultz, Khazian, & Zaleski (2008), Exp. 3", "Mair & Bergin-Seers (2010), Exp. 1", "Bohner & Schluter (2014), Exp. 1", "Bohner & Schluter (2014), Exp. 2", "Overall", "tau" ) ) expect_equal( params$Coefficient, c(0.3806, 0.30494, 0.20554, 0.25084, 0.28768, -0.12154, -1.45792, 0.2004, 0.12107), tolerance = 1e-3 ) expect_equal( params$CI_low, c(-0.00686, 0.03816, -0.16998, -0.0825, -1.32685, -0.60772, -2.94785, -0.02744, 0.02641), tolerance = 1e-3 ) expect_identical( colnames(params), c( "Parameter", "Coefficient", "SE", "CI", "CI_low", "CI_high", "Weight", "BF", "Rhat", "ESS", "Component", "Prior_Distribution", "Prior_Location", "Prior_Scale", "Method" ) ) }) set.seed(1234) m2 <- metaBMA::meta_fixed( logOR, SE, study, data = towels, ci = 0.95 ) test_that("model_parameters.meta_fixed", { params <- model_parameters(m2) expect_identical(params$Parameter, c( "Goldstein, Cialdini, & Griskevicius (2008), Exp. 1", "Goldstein, Cialdini, & Griskevicius (2008), Exp. 2", "Schultz, Khazian, & Zaleski (2008), Exp. 2", "Schultz, Khazian, & Zaleski (2008), Exp. 3", "Mair & Bergin-Seers (2010), Exp. 1", "Bohner & Schluter (2014), Exp. 1", "Bohner & Schluter (2014), Exp. 2", "Overall" )) expect_equal(params$Coefficient, c(0.3806, 0.30494, 0.20554, 0.25084, 0.28768, -0.12154, -1.45792, 0.22141), tolerance = 1e-3 ) expect_equal( params$CI_low, c(-0.00686, 0.03816, -0.16998, -0.0825, -1.32685, -0.60772, -2.94785, 0.06839), tolerance = 1e-3 ) expect_identical( colnames(params), c( "Parameter", "Coefficient", "SE", "CI", "CI_low", "CI_high", "Weight", "BF", "Rhat", "ESS", "Component", "Prior_Distribution", "Prior_Location", "Prior_Scale", "Method" ) ) }) set.seed(1234) m3 <- suppressWarnings( metaBMA::meta_random( logOR, SE, study, data = towels, ci = 0.99, iter = 100, logml_iter = 200 ) ) test_that("model_parameters.meta_random", { params <- model_parameters(m3) expect_identical( params$Parameter, c( "Goldstein, Cialdini, & Griskevicius (2008), Exp. 1", "Goldstein, Cialdini, & Griskevicius (2008), Exp. 2", "Schultz, Khazian, & Zaleski (2008), Exp. 2", "Schultz, Khazian, & Zaleski (2008), Exp. 3", "Mair & Bergin-Seers (2010), Exp. 1", "Bohner & Schluter (2014), Exp. 1", "Bohner & Schluter (2014), Exp. 2", "Overall", "tau" ) ) expect_equal( params$Coefficient, c(0.3806, 0.30494, 0.20554, 0.25084, 0.28768, -0.12154, -1.45792, 0.2004, 0.12107), tolerance = 1e-3 ) expect_equal( params$CI_low, c(-0.00686, 0.03816, -0.16998, -0.0825, -1.32685, -0.60772, -2.94785, -0.15494, 0.01993), tolerance = 1e-3 ) expect_identical( colnames(params), c( "Parameter", "Coefficient", "SE", "CI", "CI_low", "CI_high", "Weight", "BF", "Rhat", "ESS", "Component", "Prior_Distribution", "Prior_Location", "Prior_Scale", "Method" ) ) }) parameters/tests/testthat/test-bracl.R0000644000176200001440000000322414413515226017606 0ustar liggesusersskip_if_not_installed("brglm2") data("stemcell", package = "brglm2") levels(stemcell$research) <- c("definitly", "alterly", "probably not", "definitely not") m1 <- brglm2::bracl(research ~ as.numeric(religion) + gender, weights = frequency, data = stemcell, type = "ML") test_that("model_parameters", { params <- model_parameters(m1, verbose = FALSE) expect_identical( params$Response, c( "definitly", "alterly", "probably not", "definitly", "alterly", "probably not", "definitly", "alterly", "probably not" ) ) expect_identical( params$Parameter, c( "definitly:(Intercept)", "alterly:(Intercept)", "probably not:(Intercept)", "definitly:as.numeric(religion)", "alterly:as.numeric(religion)", "probably not:as.numeric(religion)", "definitly:genderfemale", "alterly:genderfemale", "probably not:genderfemale" ) ) expect_equal( params$Coefficient, c(-1.24836, 0.47098, 0.42741, 0.4382, 0.25962, 0.01192, -0.13683, 0.18707, -0.16093), tolerance = 1e-3 ) }) # check vcov args test_that("model_parameters", { expect_message({ out <- model_parameters(m1, vcov = "vcovHAC") }) expect_equal(out$SE, unname(coef(summary(m1))[, 2]), tolerance = 1e-3) }) # check order of response levels test_that("print model_parameters", { out <- suppressMessages(utils::capture.output(print(model_parameters(m1, verbose = FALSE)))) expect_identical(out[1], "# Response level: definitly") expect_identical(out[9], "# Response level: alterly") expect_identical(out[17], "# Response level: probably not") }) parameters/tests/testthat/test-model_parameters_std_mixed.R0000644000176200001440000001427715111301621024104 0ustar liggesusersskip_on_cran() skip_if_not_installed("effectsize") skip_if_not_installed("lme4") data(iris) set.seed(1234) iris$grp <- as.factor(sample(1:3, nrow(iris), replace = TRUE)) # fit example model model <- lme4::lmer( Sepal.Length ~ Species * Sepal.Width + Petal.Length + (1 | grp), data = iris ) test_that("model_parameters, standardize-refit, wald-normal", { params <- model_parameters( model, ci_method = "normal", standardize = "refit", verbose = FALSE, effects = "fixed" ) expect_equal(c(nrow(params), ncol(params)), c(7, 10)) expect_equal( params$Coefficient, c(0.96949, -1.28631, -1.81461, 0.34791, 1.74252, -0.25421, -0.18834), tolerance = 1e-3 ) expect_equal( params$SE, c(0.2045, 0.2619, 0.34035, 0.05968, 0.13914, 0.09762, 0.0945), tolerance = 1e-3 ) expect_equal( params$CI_high, c(1.37031, -0.77301, -1.14754, 0.46488, 2.01523, -0.06287, -0.00312), tolerance = 1e-3 ) }) test_that("model_parameters, standardize-refit, wald-t", { params <- model_parameters( model, ci_method = "wald", standardize = "refit", verbose = FALSE, effects = "fixed" ) expect_equal( params$CI_high, c(1.37378, -0.76856, -1.14177, 0.4659, 2.01759, -0.06121, -0.00151), tolerance = 1e-3 ) }) test_that("model_parameters, standardize-refit", { params <- model_parameters( model, standardize = "refit", verbose = FALSE, effects = "fixed" ) expect_equal(c(nrow(params), ncol(params)), c(7, 10)) expect_equal( params$Coefficient, c(0.96949, -1.28631, -1.81461, 0.34791, 1.74252, -0.25421, -0.18834), tolerance = 1e-3 ) expect_equal( params$SE, c(0.2045, 0.2619, 0.34035, 0.05968, 0.13914, 0.09762, 0.0945), tolerance = 1e-3 ) expect_equal( params$CI_high, c(1.37378, -0.76856, -1.14177, 0.4659, 2.01759, -0.06121, -0.00151), tolerance = 1e-3 ) params <- model_parameters( model, standardize = "refit", verbose = FALSE, effects = "all" ) paramsZ <- model_parameters( effectsize::standardize(model), effects = "all", verbose = FALSE ) expect_equal(paramsZ, params, ignore_attr = TRUE) }) test_that("model_parameters, standardize-posthoc", { params <- model_parameters( model, standardize = "posthoc", verbose = FALSE, effects = "fixed" ) expect_equal(c(nrow(params), ncol(params)), c(7, 10)) expect_equal( params$Std_Coefficient, c(0, 0.49679, -0.49355, 0.34791, 1.74252, -0.25421, -0.18834), tolerance = 1e-3 ) expect_equal( params$SE, c(0, 0.66228, 0.70202, 0.05968, 0.13914, 0.09762, 0.0945), tolerance = 1e-3 ) expect_equal( params$CI_high, c(0, 1.80607, 0.8943, 0.4659, 2.01759, -0.06121, -0.00151), tolerance = 1e-3 ) }) test_that("model_parameters, standardize-posthoc", { params <- model_parameters( model, ci_method = "normal", standardize = "posthoc", verbose = FALSE, effects = "fixed" ) expect_equal(c(nrow(params), ncol(params)), c(7, 10)) expect_equal( params$Std_Coefficient, c(0, 0.49679, -0.49355, 0.34791, 1.74252, -0.25421, -0.18834), tolerance = 1e-3 ) expect_equal( params$SE, c(0, 0.66228, 0.70202, 0.05968, 0.13914, 0.09762, 0.0945), tolerance = 1e-3 ) expect_equal( params$CI_high, c(0, 1.79483, 0.88238, 0.46488, 2.01523, -0.06287, -0.00312), tolerance = 1e-3 ) }) test_that("model_parameters, standardize-posthoc, wald-t", { params <- model_parameters( model, ci_method = "wald", standardize = "posthoc", verbose = FALSE, effects = "fixed" ) expect_equal( params$CI_high, c(0, 1.80607, 0.8943, 0.4659, 2.01759, -0.06121, -0.00151), tolerance = 1e-3 ) }) test_that("model_parameters, standardize-basic", { params <- model_parameters( model, ci_method = "normal", standardize = "basic", verbose = FALSE, effects = "fixed" ) expect_equal(c(nrow(params), ncol(params)), c(7, 10)) expect_equal( params$Std_Coefficient, c(0, 0.23497, -0.23344, 0.34791, 1.74252, -0.77129, -0.61304), tolerance = 1e-3 ) expect_equal( params$SE, c(0, 0.31325, 0.33204, 0.05968, 0.13914, 0.2962, 0.30761), tolerance = 1e-3 ) expect_equal( params$CI_high, c(0, 0.84893, 0.41735, 0.46488, 2.01523, -0.19075, -0.01014), tolerance = 1e-3 ) }) test_that("model_parameters, standardize-basic", { params <- model_parameters( model, ci_method = "residual", standardize = "basic", verbose = FALSE, effects = "fixed" ) expect_equal(c(nrow(params), ncol(params)), c(7, 10)) expect_equal( params$Std_Coefficient, c(0, 0.23497, -0.23344, 0.34791, 1.74252, -0.77129, -0.61304), tolerance = 1e-3 ) expect_equal( params$SE, c(0, 0.31325, 0.33204, 0.05968, 0.13914, 0.2962, 0.30761), tolerance = 1e-3 ) expect_equal( params$CI_high, c(0, 0.85424, 0.42299, 0.4659, 2.01759, -0.18572, -0.00492), tolerance = 1e-3 ) }) test_that("model_parameters, standardize-basic", { params <- model_parameters( model, standardize = "basic", verbose = FALSE, effects = "fixed" ) expect_equal( params$CI_high, c(0, 0.85424, 0.42299, 0.4659, 2.01759, -0.18572, -0.00492), tolerance = 1e-3 ) }) test_that("model_parameters, standardize-refit robust", { skip_if_not_installed("clubSandwich") params <- model_parameters( model, standardize = "refit", effects = "fixed", vcov = "CR", vcov_args = list(type = "CR1", cluster = iris$grp), verbose = FALSE ) expect_equal(c(nrow(params), ncol(params)), c(7, 10)) expect_equal( params$Coefficient, c(0.96949, -1.28631, -1.81461, 0.34791, 1.74252, -0.25421, -0.18834), tolerance = 1e-3 ) expect_equal( params$SE, c(0.07726, 0.33406, 0.22647, 0.0524, 0.10092, 0.18537, 0.05552), tolerance = 1e-3 ) expect_equal( params$CI_high, c(1.12224, -0.6259, -1.36691, 0.45151, 1.94204, 0.11227, -0.07858), tolerance = 1e-3 ) }) parameters/tests/testthat/test-parameters_selection.R0000644000176200001440000000025014355245205022731 0ustar liggesuserstest_that("select_parameters", { model <- lm(mpg ~ ., data = mtcars) x <- select_parameters(model) expect_equal(n_parameters(model) - n_parameters(x), 7) }) parameters/tests/testthat/test-base.R0000644000176200001440000000163014716604201017432 0ustar liggesuserstest_that("model_parameters.data.frame", { data(iris) expect_warning(expect_null(model_parameters(iris))) }) test_that("model_parameters.data.frame as draws", { data(iris) mp <- suppressWarnings(model_parameters(iris[1:4], as_draws = TRUE)) expect_equal(mp$Median, c(5.8, 3, 4.35, 1.3), tolerance = 1e-2, ignore_attr = TRUE) expect_identical(mp$Parameter, c("Sepal.Length", "Sepal.Width", "Petal.Length", "Petal.Width")) expect_identical(colnames(mp), c("Parameter", "Median", "CI_low", "CI_high", "pd")) }) test_that("model_parameters.data.frame as draws, exponentiate", { data(iris) mp <- suppressWarnings(model_parameters(iris[1:4], as_draws = TRUE, exponentiate = TRUE)) expect_equal(mp$Median, c(330.29956, 20.08554, 77.47846, 3.6693), tolerance = 1e-2, ignore_attr = TRUE) }) # require model input test_that("model_parameters", { expect_error(model_parameters()) }) parameters/tests/testthat/test-pool_parameters.R0000644000176200001440000001250715022763445021730 0ustar liggesuserstest_that("pooled parameters", { skip_if_not_installed("mice") data("nhanes2", package = "mice") set.seed(123) imp <- mice::mice(nhanes2, printFlag = FALSE) models <- lapply(1:5, function(i) { lm(bmi ~ age + hyp + chl, data = mice::complete(imp, action = i)) }) pp <- pool_parameters(models) expect_equal(pp$df_error, c(9.2225, 8.1903, 3.6727, 10.264, 6.4385), tolerance = 1e-3) expect_snapshot(print(pp)) }) test_that("pooled parameters", { skip_if_not_installed("mice") skip_if_not_installed("datawizard") data("nhanes2", package = "mice") nhanes2$hyp <- datawizard::slide(as.numeric(nhanes2$hyp)) set.seed(123) imp <- mice::mice(nhanes2, printFlag = FALSE) models <- lapply(1:5, function(i) { glm(hyp ~ age + chl, family = binomial, data = mice::complete(imp, action = i)) }) pp1 <- pool_parameters(models) expect_equal(pp1$df_error, c(Inf, Inf, Inf, Inf), tolerance = 1e-3) pp2 <- pool_parameters(models, ci_method = "residual") m_mice <- with(data = imp, exp = glm(hyp ~ age + chl, family = binomial)) pp3 <- summary(mice::pool(m_mice)) expect_equal(pp2$df_error, pp3$df, tolerance = 1e-3) }) skip_on_cran() test_that("pooled parameters, glmmTMB, components", { skip_if_not_installed("mice") skip_if_not_installed("glmmTMB") sim1 <- function(nfac = 4, nt = 10, facsd = 0.1, tsd = 0.15, mu = 0, residsd = 1) { dat <- expand.grid(fac = factor(letters[1:nfac]), t = 1:nt) n <- nrow(dat) dat$REfac <- rnorm(nfac, sd = facsd)[dat$fac] dat$REt <- rnorm(nt, sd = tsd)[dat$t] dat$x <- rnorm(n, mean = mu, sd = residsd) + dat$REfac + dat$REt dat } set.seed(101) d1 <- sim1(mu = 100, residsd = 10) d2 <- sim1(mu = 200, residsd = 5) d1$sd <- "ten" d2$sd <- "five" dat <- rbind(d1, d2) set.seed(101) dat$REfac[sample.int(nrow(dat), 10)] <- NA dat$x[sample.int(nrow(dat), 10)] <- NA dat$sd[sample.int(nrow(dat), 10)] <- NA impdat <- suppressWarnings(mice::mice(dat, printFlag = FALSE)) models <- lapply(1:5, function(i) { glmmTMB::glmmTMB( x ~ sd + (1 | t), dispformula = ~sd, data = mice::complete(impdat, action = i) ) }) out <- pool_parameters(models, component = "conditional") expect_named( out, c( "Parameter", "Coefficient", "SE", "CI_low", "CI_high", "Statistic", "df_error", "p" ) ) expect_equal(out$Coefficient, c(187.280225, -87.838969), tolerance = 1e-3) out <- suppressMessages(pool_parameters(models, component = "all", effects = "all")) expect_named( out, c( "Parameter", "Coefficient", "Effects", "SE", "CI_low", "CI_high", "Statistic", "df_error", "p", "Component" ) ) expect_equal( out$Coefficient, c(187.280225, -87.838969, 3.51576, -1.032665, 0.610992, NaN), tolerance = 1e-3 ) out <- pool_parameters(models, component = "all", effects = "fixed") expect_named( out, c( "Parameter", "Coefficient", "SE", "CI_low", "CI_high", "Statistic", "df_error", "p", "Component" ) ) expect_equal( out$Coefficient, c(187.280225, -87.838969, 3.51576, -1.032665), tolerance = 1e-3 ) }) test_that("pooled parameters, glmmTMB, zero-inflated", { skip_if_not_installed("mice") skip_if_not_installed("glmmTMB") skip_if_not_installed("broom.mixed") data(Salamanders, package = "glmmTMB") set.seed(123) Salamanders$cover[sample.int(nrow(Salamanders), 50)] <- NA Salamanders$mined[sample.int(nrow(Salamanders), 10)] <- NA impdat <- suppressWarnings(mice::mice(Salamanders, printFlag = FALSE)) models <- lapply(1:5, function(i) { glmmTMB::glmmTMB( count ~ mined + cover + (1 | site), ziformula = ~mined, family = poisson(), data = mice::complete(impdat, action = i) ) }) out <- pool_parameters(models, ci_method = "residual") expect_named( out, c( "Parameter", "Coefficient", "SE", "CI_low", "CI_high", "Statistic", "df_error", "p", "Component" ) ) expect_equal( out$Coefficient, c(0.13409, 1.198551, -0.181912, 1.253029, -1.844026), tolerance = 1e-3 ) # validate against mice --------------- m_mice <- suppressWarnings(with(data = impdat, exp = glmmTMB::glmmTMB( count ~ mined + cover + (1 | site), ziformula = ~mined, family = poisson() ))) mice_summ <- suppressWarnings(summary(mice::pool(m_mice, dfcom = Inf))) expect_equal(out$Coefficient, mice_summ$estimate, tolerance = 1e-3) expect_equal(out$SE, mice_summ$std.error, tolerance = 1e-3) expect_equal(out$p, mice_summ$p.value, tolerance = 1e-3) out <- pool_parameters(models, component = "all", effects = "all") expect_named( out, c( "Parameter", "Coefficient", "Effects", "SE", "CI_low", "CI_high", "Statistic", "df_error", "p", "Component" ) ) expect_equal( out$Coefficient, c(0.13409, 1.198551, -0.181912, 1.253029, -1.844026, 0.158795), tolerance = 1e-3 ) out <- pool_parameters(models, component = "conditional", effects = "fixed") expect_named( out, c( "Parameter", "Coefficient", "SE", "CI_low", "CI_high", "Statistic", "df_error", "p" ) ) expect_equal( out$Coefficient, c(0.13409, 1.198551, -0.181912), tolerance = 1e-3 ) }) parameters/tests/testthat/test-p_adjust.R0000644000176200001440000000767315033425412020344 0ustar liggesusersskip_on_cran() test_that("model_parameters, p-adjust", { model <- lm(mpg ~ wt * cyl + am + log(hp), data = mtcars) mp <- model_parameters(model) expect_equal(mp$p, c(0, 0.00304, 0.02765, 0.65851, 0.01068, 0.02312), tolerance = 1e-3) mp <- model_parameters(model, p_adjust = "BH") expect_equal(mp$p, c(0, 0.00912, 0.03318, 0.65851, 0.02137, 0.03318), tolerance = 1e-3) mp <- model_parameters(model, p_adjust = "bonferroni") expect_equal(mp$p, c(0, 0.01824, 0.16588, 1, 0.06411, 0.13869), tolerance = 1e-3) mp <- model_parameters(model, p_adjust = "scheffe") expect_equal(mp$p, c(0, 0.1425, 0.50499, 0.99981, 0.30911, 0.46396), tolerance = 1e-3) mp <- model_parameters(model, p_adjust = "tukey") expect_equal(mp$p, c(0, 0.03225, 0.21714, 0.99748, 0.09875, 0.18822), tolerance = 1e-3) mp <- model_parameters(model, p_adjust = "sidak") expect_equal(mp$p, c(0, 0.0181, 0.15483, 0.99841, 0.06242, 0.13092), tolerance = 1e-3) }) test_that("model_parameters, p-adjust after keep/drop", { model <- lm(mpg ~ wt + cyl + gear + hp, data = mtcars) mp <- model_parameters(model, p_adjust = "bonferroni") expect_equal( mp[["p"]], p.adjust(coef(summary(model))[, 4], "bonferroni"), tolerance = 1e-4, ignore_attr = TRUE ) expect_message( mp <- model_parameters(model, include_info = TRUE, keep = c("wt", "hp"), p_adjust = "bonferroni"), "more than 1 element" ) expect_equal( mp[["p"]], p.adjust(coef(summary(model))[c(2, 5), 4], "bonferroni"), tolerance = 1e-4, ignore_attr = TRUE ) expect_message( mp <- model_parameters(model, include_info = TRUE, keep = c("cyl", "gear"), p_adjust = "bonferroni"), "more than 1 element" ) expect_equal( mp[["p"]], p.adjust(coef(summary(model))[3:4, 4], "bonferroni"), tolerance = 1e-4, ignore_attr = TRUE ) }) test_that("model_parameters, emmeans, p-adjust", { skip_if_not_installed("emmeans") m <- pairs(emmeans::emmeans(aov(Sepal.Width ~ Species, data = iris), ~Species)) mp <- model_parameters(m) expect_equal(mp$p, as.data.frame(m)$p.value, tolerance = 1e-4) m <- pairs(emmeans::emmeans(aov(Sepal.Width ~ Species, data = iris), ~Species), adjust = "scheffe") mp <- model_parameters(m, p_adjust = "scheffe") expect_equal(mp$p, as.data.frame(m)$p.value, tolerance = 1e-4) }) test_that("model_parameters, simultaenous confidence intervals", { skip_if_not_installed("mvtnorm") m <- lm(mpg ~ wt + hp, data = mtcars) set.seed(123) out <- model_parameters(m, p_adjust = "sup-t") expect_snapshot(print(out, zap_small = TRUE)) skip_if_not_installed("lme4") data("qol_cancer") qol_cancer <- cbind( qol_cancer, demean(qol_cancer, select = c("phq4", "QoL"), by = "ID") ) model <- lme4::lmer( QoL ~ time + phq4_within + phq4_between + (1 | ID), data = qol_cancer ) set.seed(123) mp <- model_parameters(model, p_adjust = "sup-t") expect_equal(mp$p, c(0, 0.27904, 0, 0, NA, NA), tolerance = 1e-3) expect_equal(mp$CI_low, c(67.70195, -0.48377, -4.66802, -7.51974, 8.42651, 11.50991), tolerance = 1e-3) skip_if_not_installed("glmmTMB") data("Salamanders", package = "glmmTMB") model <- suppressWarnings(glmmTMB::glmmTMB( count ~ spp + mined + (1 | site), ziformula = ~ spp + mined, family = glmmTMB::nbinom2, data = Salamanders )) set.seed(123) mp <- model_parameters(model, p_adjust = "sup-t") expect_equal( mp$p, c( 0.56769, 0.57466, 0.98029, 0.83123, 0.22681, 0.06271, 0.99876, 0.00068, 0.61786, 0.95269, 0.81296, 0.60973, 0.97504, 0.80566, 0.81871, 0.00024, NA, NA ), tolerance = 1e-3 ) expect_equal( mp$CI_low, c( -1.6935, -2.6841, -0.45839, -1.30244, -0.14911, -0.01933, -0.76516, 0.4494, -0.77256, -2.41501, -3.08449, -0.87083, -2.50859, -2.91223, -8.38616, -4.18299, 0.92944, 0.16671 ), tolerance = 1e-3) }) parameters/tests/testthat/test-polr.R0000644000176200001440000000077415111301621017472 0ustar liggesusersskip_on_cran() skip_if_not_installed("MASS") test_that("robust-se polr", { data(housing, package = "MASS") m <- MASS::polr(Sat ~ Infl + Type + Cont, weights = Freq, data = housing) out <- model_parameters(m) expect_identical(attributes(out)$coefficient_name, "Log-Odds") m <- MASS::polr( Sat ~ Infl + Type + Cont, weights = Freq, data = housing, method = "probit" ) out <- model_parameters(m) expect_identical(attributes(out)$coefficient_name, "Z-Score") }) parameters/tests/testthat/test-PMCMRplus.R0000644000176200001440000000055114413515226020305 0ustar liggesuserstest_that("model_parameters.PMCMR", { skip_if_not_installed("PMCMRplus") set.seed(123) mod <- suppressWarnings(PMCMRplus::kwAllPairsConoverTest(count ~ spray, data = InsectSprays)) df <- as.data.frame(model_parameters(mod)) # no need to add strict tests, since `toTidy` is tested in `PMCMRplus` itself expect_equal(dim(df), c(15L, 8L)) }) parameters/tests/testthat/test-model_parameters.bracl.R0000644000176200001440000000225215111301621023114 0ustar liggesusersskip_on_cran() skip_if_not_installed("brglm2") skip_if_not_installed("faraway") skip_if_not(packageVersion("insight") > "0.19.1") test_that("model_parameters.bracl", { data("cns", package = "faraway") cns2 <- reshape( cns, direction = "long", timevar = "Type", times = names(cns)[3:5], varying = 3:5, v.names = "Freq" )[, 3:6] cns2$Type <- factor(cns2$Type, levels = unique(cns2$Type)) mbracl <- brglm2::bracl(Type ~ Water + Work, data = cns2, weights = Freq) mpbracl <- model_parameters(mbracl) expect_named( mpbracl, c( "Parameter", "Coefficient", "SE", "CI", "CI_low", "CI_high", "z", "df_error", "p", "Response" ) ) expect_identical( mpbracl$Parameter, c( "An:(Intercept)", "Sp:(Intercept)", "An:Water", "Sp:Water", "An:WorkNonManual", "Sp:WorkNonManual" ) ) expect_identical(mpbracl$Response, c("An", "Sp", "An", "Sp", "An", "Sp")) expect_equal( mpbracl$Coefficient, c(-0.37392, 1.49063, 0.00129, -0.00349, -0.11292, 0.36384), tolerance = 1e-4 ) }) parameters/tests/testthat/test-model_parameters_df.R0000644000176200001440000002665114736731407022541 0ustar liggesusersskip_on_cran() # glm --------------------------- set.seed(123) data(mtcars) model <- glm(vs ~ wt + cyl, data = mtcars, family = "binomial") test_that("model_parameters.glm", { params <- suppressWarnings(model_parameters(model)) expect_equal(params$df_error, c(Inf, Inf, Inf), tolerance = 1e-3) expect_equal(params$CI_low, c(4.7888, -0.52956, -6.91917), tolerance = 1e-3) expect_equal(params$p, c(0.01084, 0.17431, 0.03362), tolerance = 1e-3) params <- suppressWarnings(model_parameters(model, ci_method = "normal")) expect_equal(params$df_error, c(Inf, Inf, Inf), tolerance = 1e-3) expect_equal(params$CI_low, c(2.4503, -0.9299, -5.63472), tolerance = 1e-3) expect_equal(params$p, c(0.01084, 0.17431, 0.03362), tolerance = 1e-3) params <- suppressWarnings(model_parameters(model, ci_method = "residual")) expect_equal(params$df_error, c(29, 29, 29), tolerance = 1e-3) expect_equal(params$CI_low, c(2.09492, -1.06171, -5.75235), tolerance = 1e-3) expect_equal(params$p, c(0.0164, 0.18479, 0.04227), tolerance = 1e-3) }) # PROreg --------------------------- test_that("model_parameters.BBmm", { skip_if_not_installed("PROreg", minimum_version = "1.3.0") set.seed(1234) # defining the parameters k <- 100 m <- 10 phi <- 0.5 beta <- c(1.5, -1.1) sigma <- 0.5 # simulating the covariate and random effects x <- runif(k, 0, 10) X <- model.matrix(~x) z <- as.factor(PROreg::rBI(k, 4, 0.5, 2)) Z <- model.matrix(~ z - 1) u <- rnorm(5, 0, sigma) # the linear predictor and simulated response variable eta <- beta[1] + beta[2] * x + crossprod(t(Z), u) p <- 1 / (1 + exp(-eta)) y <- PROreg::rBB(k, m, p, phi) dat <- data.frame(cbind(y, x, z)) dat$z <- as.factor(dat$z) # apply the model invisible(capture.output({ model <- PROreg::BBmm( fixed.formula = y ~ x, random.formula = ~z, m = m, data = dat ) })) params <- suppressWarnings(model_parameters(model)) expect_equal(params$df_error, c(96, 96), tolerance = 1e-3) expect_equal(params$CI_low, c(0.26366, -1.46628), tolerance = 1e-3) expect_equal(params$p, c(0.00814, 0), tolerance = 1e-3) params <- suppressWarnings(model_parameters(model, ci_method = "normal")) expect_equal(params$df_error, c(Inf, Inf), tolerance = 1e-3) expect_equal(params$CI_low, c(0.27313, -1.46119), tolerance = 1e-3) expect_equal(params$p, c(0.00814, 0), tolerance = 1e-3) }) test_that("model_parameters.BBreg", { skip_if_not_installed("PROreg", minimum_version = "1.3.0") set.seed(18) # we simulate a covariate, fix the paramters of the beta-binomial # distribution and simulate a response variable. # then we apply the model, and try to get the same values. k <- 1000 m <- 10 x <- rnorm(k, 5, 3) beta <- c(-10, 2) p <- 1 / (1 + exp(-1 * (beta[1] + beta[2] * x))) phi <- 1.2 y <- PROreg::rBB(k, m, p, phi) # model model <- PROreg::BBreg(y ~ x, m) params <- suppressWarnings(model_parameters(model)) expect_equal(params$df_error, c(997, 997), tolerance = 1e-3) expect_equal(params$CI_low, c(-11.08184, 1.84727), tolerance = 1e-3) expect_equal(params$p, c(0, 0), tolerance = 1e-3) params <- suppressWarnings(model_parameters(model, ci_method = "normal")) expect_equal(params$df_error, c(Inf, Inf), tolerance = 1e-3) expect_equal(params$CI_low, c(-11.08069, 1.84749), tolerance = 1e-3) expect_equal(params$p, c(0, 0), tolerance = 1e-3) }) # MASS / nnet --------------------------- test_that("model_parameters.multinom", { skip_if_not_installed("MASS") skip_if_not_installed("nnet") set.seed(123) utils::example(topic = birthwt, echo = FALSE, package = "MASS") # model model <- nnet::multinom( formula = low ~ ., data = bwt, trace = FALSE ) params <- suppressWarnings(model_parameters(model, ci_method = "wald")) expect_equal(params$df_error, c(178, 178, 178, 178, 178, 178, 178, 178, 178, 178, 178), tolerance = 1e-3) expect_equal(params$CI_low, c( -1.6332, -0.11362, -0.02963, 0.13471, -0.17058, -0.08325, 0.39528, 0.49086, -0.23614, -1.38245, -0.72163 ), tolerance = 1e-3) expect_equal(params$p, c( 0.50926, 0.33729, 0.02833, 0.02736, 0.11049, 0.07719, 0.00575, 0.00866, 0.14473, 0.36392, 0.69537 ), tolerance = 1e-3) params <- suppressWarnings(model_parameters(model, ci_method = "normal")) expect_equal(params$df_error, c(Inf, Inf, Inf, Inf, Inf, Inf, Inf, Inf, Inf, Inf, Inf), tolerance = 1e-3) expect_equal(params$CI_low, c( -1.6165, -0.1131, -0.02953, 0.1419, -0.16439, -0.07755, 0.40173, 0.50053, -0.22991, -1.37601, -0.71551 ), tolerance = 1e-3) expect_equal(params$p, c( 0.5084, 0.33599, 0.02706, 0.0261, 0.10872, 0.07548, 0.00518, 0.00794, 0.14296, 0.36269, 0.6949 ), tolerance = 1e-3) }) ## TODO: archieved on CRAN - add test back once ivprobit is back on CRAN. # ivprobit --------------------------- # test_that("model_parameters.ivprobit", { # skip_if_not_installed("ivprobit") # set.seed(123) # data(eco, package = "ivprobit") # # model # model <- ivprobit::ivprobit( # formula = d2 ~ ltass + roe + div | eqrat + bonus | ltass + roe + div + gap + cfa, # data = eco # ) # params <- suppressWarnings(model_parameters(model)) # expect_equal(params$df_error, c(789L, 789L, 789L, 789L, 789L, 789L), tolerance = 1e-3) # expect_equal(params$CI_low, c(-35.96484, -0.27082, -0.15557, -1e-05, -15.95755, -1e-05), tolerance = 1e-3) # expect_equal(params$p, c(0.08355, 0.12724, 0.55684, 0.63368, 0.46593, 0.61493), tolerance = 1e-3) # params <- suppressWarnings(model_parameters(model, ci_method = "normal")) # expect_equal(params$df_error, c(Inf, Inf, Inf, Inf, Inf, Inf), tolerance = 1e-3) # expect_equal(params$CI_low, c(-35.93553, -0.26895, -0.15522, -1e-05, -15.91859, -1e-05), tolerance = 1e-3) # expect_equal(params$p, c(0.08316, 0.12684, 0.55668, 0.63355, 0.46571, 0.61479), tolerance = 1e-3) # }) # biglm --------------------------- test_that("model_parameters.bigglm", { skip_if_not_installed("biglm") set.seed(123) data(trees) # model model <- biglm::bigglm( formula = log(Volume) ~ log(Girth) + log(Height), data = trees, chunksize = 10, sandwich = TRUE ) params <- suppressWarnings(model_parameters(model)) expect_equal(params$df_error, c(28, 28, 28), tolerance = 1e-3) expect_equal(params$CI_low, c(-8.12252, 1.86862, 0.72411), tolerance = 1e-3) expect_equal(params$p, c(0, 0, 0), tolerance = 1e-3) params <- suppressWarnings(model_parameters(model, ci_method = "normal")) expect_equal(params$df_error, c(Inf, Inf, Inf), tolerance = 1e-3) expect_equal(params$CI_low, c(-8.05815, 1.87355, 0.74108), tolerance = 1e-3) expect_equal(params$p, c(0, 0, 0), tolerance = 1e-3) }) # ivreg --------------------------- test_that("model_parameters.ivreg", { skip_if_not_installed("ivreg") data(CigaretteDemand, package = "ivreg") model <- ivreg::ivreg(log(packs) ~ log(rprice) + log(rincome) | salestax + log(rincome), data = CigaretteDemand ) params <- suppressWarnings(model_parameters(model)) expect_equal(params$df_error, c(45L, 45L, 45L), tolerance = 1e-3) expect_equal(params$CI_low, c(6.69477, -1.86742, -0.32644), tolerance = 1e-3) expect_equal(params$p, c(0, 0.00266, 0.42867), tolerance = 1e-3) params <- suppressWarnings(model_parameters(model, ci_method = "normal")) expect_equal(params$df_error, c(Inf, Inf, Inf), tolerance = 1e-3) expect_equal(params$CI_low, c(6.76831, -1.84795, -0.3119), tolerance = 1e-3) expect_equal(params$p, c(0, 0.00147, 0.42447), tolerance = 1e-3) }) # plm --------------------------- test_that("model_parameters.plm", { skip_if_not_installed("plm") data("Produc", package = "plm") set.seed(123) model <- suppressWarnings(plm::plm( formula = log(gsp) ~ log(pcap) + log(pc) + log(emp) + unemp, data = Produc, index = c("state", "year") )) params <- suppressWarnings(model_parameters(model)) expect_equal(params$df_error, c(764L, 764L, 764L, 764L), tolerance = 1e-3) expect_equal(params$CI_low, c(-0.08308, 0.2427, 0.70909, -0.00724), tolerance = 1e-3) expect_equal(params$p, c(0.36752, 0, 0, 0), tolerance = 1e-3) params <- suppressWarnings(model_parameters(model, ci_method = "normal")) expect_equal(params$df_error, c(Inf, Inf, Inf, Inf), tolerance = 1e-3) expect_equal(params$CI_low, c(-0.08299, 0.24277, 0.70918, -0.00724), tolerance = 1e-3) expect_equal(params$p, c(0.36724, 0, 0, 0), tolerance = 1e-3) }) # nlme --------------------------- test_that("model_parameters.gls", { skip_if_not_installed("nlme") data(Ovary, package = "nlme") model <- nlme::gls( follicles ~ sin(2 * pi * Time) + cos(2 * pi * Time), data = Ovary, correlation = nlme::corAR1(form = ~ 1 | Mare) ) params <- suppressWarnings(model_parameters(model)) expect_equal(params$df_error, c(305L, 305L, 305L), tolerance = 1e-3) expect_equal(params$CI_low, c(10.90853, -4.04402, -2.2722), tolerance = 1e-3) expect_equal(params$p, c(0, 2e-05, 0.19814), tolerance = 1e-3) params <- suppressWarnings(model_parameters(model, ci_method = "normal")) expect_equal(params$df_error, c(Inf, Inf, Inf), tolerance = 1e-3) expect_equal(params$CI_low, c(10.91372, -4.03898, -2.26675), tolerance = 1e-3) expect_equal(params$p, c(0, 2e-05, 0.19716), tolerance = 1e-3) }) # # complmrob --------------------------- # # test_that("model_parameters.complmrob", { # skip_if_not_installed("complmrob") # crimes <- data.frame( # lifeExp = state.x77[, "Life Exp"], # USArrests[, c("Murder", "Assault", "Rape")] # ) # # # model # model <- complmrob::complmrob(formula = lifeExp ~ ., data = crimes) # params <- suppressWarnings(model_parameters(model)) # expect_equal(params$df_error, c(46L, 46L, 46L, 46L), tolerance = 1e-3) # expect_equal(params$CI_low, c(69.79492, -3.09088, -2.91019, 2.05479), tolerance = 1e-3) # expect_equal(params$p, c(0, 0, 0.26437, 0), tolerance = 1e-3) # # params <- suppressWarnings(model_parameters(model, ci_method = "normal")) # expect_equal(params$df_error, c(Inf, Inf, Inf, Inf), tolerance = 1e-3) # expect_equal(params$CI_low, c(69.81747, -3.06832, -2.86118, 2.087), tolerance = 1e-3) # expect_equal(params$p, c(0, 0, 0.25851, 0), tolerance = 1e-3) # }) # drc --------------------------- test_that("model_parameters.drc", { skip_if_not_installed("drc") set.seed(123) data("selenium", package = "drc") model <- drc::drm( formula = dead / total ~ conc, curveid = type, weights = total, data = selenium, fct = drc::LL.2(), type = "binomial" ) params <- suppressWarnings(model_parameters(model)) expect_equal(params$df_error, c(17L, 17L, 17L, 17L, 17L, 17L, 17L, 17L), tolerance = 1e-3) expect_equal(params$CI_low, c( -1.83156, -1.13673, -2.4552, -1.80875, 223.0835, 295.39556, 107.25398, 70.62683 ), tolerance = 1e-3) expect_equal(params$p, c(0, 1e-05, 0, 0, 0, 0, 0, 0), tolerance = 1e-3) params <- suppressWarnings(model_parameters(model, ci_method = "normal")) expect_equal(params$df_error, c(Inf, Inf, Inf, Inf, Inf, Inf, Inf, Inf), tolerance = 1e-3) expect_equal(params$CI_low, c( -1.80826, -1.11588, -2.43449, -1.78349, 225.15547, 301.29532, 108.13891, 71.91797 ), tolerance = 1e-3) expect_equal(params$p, c(0, 0, 0, 0, 0, 0, 0, 0), tolerance = 1e-3) }) parameters/tests/testthat/test-model_parameters.anova.R0000644000176200001440000002257115057525051023161 0ustar liggesusersskip_on_cran() m <- glm(am ~ mpg + hp + factor(cyl), data = mtcars, family = binomial() ) test_that("model_parameters.anova", { a <- anova(m, test = "Chisq") mp <- model_parameters(a) expect_named(mp, c("Parameter", "df", "Deviance", "df_error", "Deviance_error", "p")) expect_equal(mp$Deviance_error, c(43.22973, 29.67517, 19.23255, 10.48692), tolerance = 1e-3) expect_equal(mp$p, c(NA, 0.00023, 0.00123, 0.01262), tolerance = 1e-3) expect_snapshot(mp) }) test_that("model_parameters.anova", { skip_if_not_installed("car") a <- car::Anova(m, type = 3, test.statistic = "F") mp <- model_parameters(a) expect_named(mp, c("Parameter", "Sum_Squares", "df", "Mean_Square", "F", "p")) expect_equal(mp[["F"]], c(53.40138, 60.42944, 13.96887, NA), tolerance = 1e-3) }) test_that("model_parameters.anova for mixed models", { skip_if_not_installed("lme4") skip_if_not_installed("lmerTest") m <- lmerTest::lmer(mpg ~ wt + (1 | gear), data = mtcars) out <- parameters::model_parameters(anova(m)) expect_named(out, c("Parameter", "Sum_Squares", "df", "df_error", "Mean_Square", "F", "p")) expect_equal(out$df_error, 21.92272, tolerance = 1e-4) }) test_that("linear hypothesis tests", { skip_if_not_installed("car") skip_if_not_installed("carData") data(Davis, package = "carData") data(Duncan, package = "carData") mod.davis <- lm(weight ~ repwt, data = Davis) ## the following are equivalent: p1 <- parameters(car::linearHypothesis(mod.davis, diag(2), c(0, 1))) p2 <- parameters(car::linearHypothesis(mod.davis, c("(Intercept) = 0", "repwt = 1"))) p3 <- parameters(car::linearHypothesis(mod.davis, c("(Intercept)", "repwt"), c(0, 1))) p4 <- parameters(car::linearHypothesis(mod.davis, c("(Intercept)", "repwt = 1"))) expect_equal(p1, p2, ignore_attr = TRUE) expect_equal(p1, p3, ignore_attr = TRUE) expect_equal(p1, p4, ignore_attr = TRUE) expect_identical(nrow(p1), 2L) ## FIXME: this has changed since {car} 3.1.3 # expect_identical(p1$Parameter, c("(Intercept) = 0", "repwt = 1")) expect_identical(p1$Parameter, c("1", "2")) mod.duncan <- lm(prestige ~ income + education, data = Duncan) p <- parameters(car::linearHypothesis(mod.duncan, "1*income - 1*education + 1 = 1")) expect_identical(nrow(p), 2L) ## FIXME: this has changed since {car} 3.1.3 # expect_identical(p$Parameter, "income - education = 0") expect_identical(p1$Parameter, c("1", "2")) }) test_that("print-model_parameters", { skip_if_not_installed("car") a <- car::Anova(m, type = 3, test.statistic = "F") mp <- model_parameters(a) expect_snapshot(mp) }) test_that("model_parameters_Anova.mlm-1", { skip_if_not_installed("car") m <- lm(cbind(hp, mpg) ~ factor(cyl) * am, data = mtcars) a <- car::Anova(m, type = 3, test.statistic = "Pillai") mp <- model_parameters(a, verbose = FALSE) expect_named(mp, c("Parameter", "df", "Statistic", "df_num", "df_error", "F", "p")) expect_equal(mp[["F"]], c(6.60593, 3.71327, 3.28975), tolerance = 1e-3) expect_equal(mp$Statistic, c(0.67387, 0.22903, 0.4039), tolerance = 1e-3) }) test_that("model_parameters_Anova.mlm-2", { skip_if_not_installed("MASS") skip_if_not_installed("car") data(housing, package = "MASS") m <- MASS::polr(Sat ~ Infl + Type + Cont, weights = Freq, data = housing) a <- car::Anova(m) mp <- model_parameters(a) expect_named(mp, c("Parameter", "Chi2", "df", "p")) expect_equal(mp$Chi2, c(108.2392, 55.91008, 14.30621), tolerance = 1e-3) }) test_that("model_parameters_Anova-effectsize", { skip_if_not_installed("lme4") skip_if_not_installed("effectsize") df <- iris df$Sepal.Big <- ifelse(df$Sepal.Width >= 3, "Yes", "No") mm <- suppressMessages(lme4::lmer(Sepal.Length ~ Sepal.Big + Petal.Width + (1 | Species), data = df)) model <- anova(mm) # parameters table including effect sizes mp <- model_parameters( model, es_type = "eta", ci = 0.9, df_error = dof_satterthwaite(mm)[2:3] ) expect_identical( colnames(mp), c( "Parameter", "Sum_Squares", "df", "Mean_Square", "F", "Eta2_partial", "Eta2_CI_low", "Eta2_CI_high" ) ) expect_equal(mp$Eta2_partial, c(0.03262, 0.6778), tolerance = 1e-3) }) # XXX ----- test_that("anova type | lm", { skip_if_not_installed("car") m <- lm(mpg ~ factor(cyl) * hp + disp, mtcars) a1 <- aov(m) expect_identical(attr(model_parameters(a1), "anova_type"), 1) a1 <- anova(m) expect_identical(attr(model_parameters(a1), "anova_type"), 1) a2 <- car::Anova(m, type = 2) a3 <- car::Anova(m, type = 3) expect_identical(attr(model_parameters(a2), "anova_type"), 2) expect_message( expect_identical(attr(model_parameters(a3), "anova_type"), 3), "Type 3 ANOVAs only give" ) m <- lm(mpg ~ factor(cyl) + hp + disp, mtcars) expect_warning(model_parameters(aov(m)), regexp = NA) # no need for warning, because no interactions m <- lm(mpg ~ factor(cyl) * scale(disp, TRUE, FALSE) + scale(disp, TRUE, FALSE), mtcars, contrasts = list("factor(cyl)" = contr.helmert) ) a3 <- car::Anova(m, type = 3) expect_message( model_parameters(a3), "Type 3 ANOVAs only give" ) }) test_that("anova type | mlm", { skip_if_not_installed("car") m <- lm(cbind(mpg, drat) ~ factor(cyl) * hp + disp, mtcars) a1 <- aov(m) expect_identical(attr(model_parameters(a1), "anova_type"), 1) a1 <- anova(m) expect_identical(attr(model_parameters(a1), "anova_type"), 1) a2 <- car::Anova(m, type = 2) a3 <- car::Anova(m, type = 3) expect_identical(attr(model_parameters(a2), "anova_type"), 2) expect_identical(attr(model_parameters(a3, verbose = FALSE), "anova_type"), 3) }) test_that("anova type | glm", { skip_if_not_installed("car") m <- suppressWarnings(glm(am ~ factor(cyl) * hp + disp, mtcars, family = binomial())) a1 <- anova(m) expect_identical(attr(model_parameters(a1), "anova_type"), 1) a2 <- suppressWarnings(car::Anova(m, type = 2)) a3 <- suppressWarnings(car::Anova(m, type = 3)) expect_identical(attr(model_parameters(a2), "anova_type"), 2) expect_message( expect_identical(attr(model_parameters(a3), "anova_type"), 3), "Type 3 ANOVAs only give" ) }) test_that("anova type | lme4", { skip_if_not_installed("lmerTest") skip_if_not_installed("lme4") skip_if_not_installed("car") m1 <- lme4::lmer(mpg ~ factor(cyl) * hp + disp + (1 | gear), mtcars) suppressMessages({ m2 <- lme4::glmer(carb ~ factor(cyl) * hp + disp + (1 | gear), mtcars, family = poisson() ) }) a1 <- anova(m1) expect_identical(attr(model_parameters(a1), "anova_type"), 1) a1 <- anova(m2) expect_identical(attr(model_parameters(a1), "anova_type"), 1) a3 <- anova(lmerTest::as_lmerModLmerTest(m1)) expect_message( expect_identical(attr(model_parameters(a3), "anova_type"), 3), "Type 3 ANOVAs only give" ) a2 <- car::Anova(m1, type = 2) a3 <- car::Anova(m1, type = 3) expect_identical(attr(model_parameters(a2), "anova_type"), 2) expect_message( expect_identical(attr(model_parameters(a3), "anova_type"), 3), "Type 3 ANOVAs only give" ) a2 <- car::Anova(m2, type = 2) a3 <- car::Anova(m2, type = 3) expect_identical(attr(model_parameters(a2), "anova_type"), 2) expect_message( expect_identical(attr(model_parameters(a3), "anova_type"), 3), "Type 3 ANOVAs only give" ) }) test_that("anova type | afex + Anova.mlm", { skip_if_not_installed("afex") data(obk.long, package = "afex") suppressMessages({ m <- afex::aov_ez("id", "value", obk.long, between = c("treatment", "gender"), within = c("phase", "hour"), observed = "gender" ) }) expect_identical(attr(model_parameters(m), "anova_type"), 3) expect_identical(attr(model_parameters(m$Anova, verbose = FALSE), "anova_type"), 3) }) test_that("anova rms", { skip_if_not_installed("rms") m <- rms::ols(mpg ~ cyl + disp + hp + drat, data = mtcars) a <- anova(m) mp <- model_parameters(a) expect_identical(attr(mp, "anova_type"), 2) expect_identical(mp$Parameter, c("cyl", "disp", "hp", "drat", "Total", "Residuals")) expect_identical(colnames(mp), c("Parameter", "Sum_Squares_Partial", "df", "Mean_Square", "F", "p")) expect_equal(mp$Sum_Squares_Partial, data.frame(a)$Partial.SS, tolerance = 1e-3) }) test_that("anova rms", { skip_if_not_installed("rms") skip_if(getRversion() < "4.2.0") m <- rms::orm(mpg ~ cyl + disp + hp + drat, data = mtcars) a <- anova(m) mp <- model_parameters(a) expect_identical(attr(mp, "anova_type"), 2) expect_identical(mp$Parameter, c("cyl", "disp", "hp", "drat", "Total")) expect_named(mp, c("Parameter", "Chi2", "df", "p")) expect_equal(mp$Chi2, data.frame(a)$Chi.Square, tolerance = 1e-3) }) skip_if_not_installed("withr") skip_if_not_installed("survey") withr::with_package( "survey", test_that("anova survey", { data(api, package = "survey") dclus2 <<- survey::svydesign(id = ~ dnum + snum, weights = ~pw, data = apiclus2) model0 <- survey::svyglm( I(sch.wide == "Yes") ~ ell * meals, design = dclus2, family = quasibinomial() ) out <- anova(model0) expect_snapshot(print(model_parameters(out))) out <- anova(model0, method = "Wald") expect_snapshot(print(model_parameters(out))) }) ) parameters/tests/testthat/test-model_parameters.logitr.R0000644000176200001440000000102214736731407023350 0ustar liggesusersskip_on_cran() skip_if_not_installed("logitr") test_that("model_parameters.logitr", { data(yogurt, package = "logitr") m <- logitr::logitr( data = yogurt, outcome = "choice", obsID = "obsID", pars = c("feat", "brand"), scalePar = "price", numMultiStarts = 5 ) params <- model_parameters(m, verbose = FALSE) expect_snapshot(params, variant = "windows") params <- model_parameters(m, verbose = FALSE, ci_method = "residual") expect_snapshot(params, variant = "windows") }) parameters/tests/testthat/test-model_parameters.fixest.R0000644000176200001440000001374715060247031023355 0ustar liggesuserstest_that("model_parameters.fixest-1", { skip_on_cran() skip_if_not_installed("fixest") skip_if_not_installed("carData") # avoid warnings fixest::setFixest_nthreads(1) data("qol_cancer") data(trade, package = "fixest") data(Greene, package = "carData") data(iris) d <- Greene d$dv <- as.numeric(Greene$decision == "yes") qol_cancer <- cbind( qol_cancer, datawizard::demean(qol_cancer, select = c("phq4", "QoL"), by = "ID") ) m1 <- fixest::feols(QoL ~ time + phq4 | ID, data = qol_cancer) m2 <- fixest::femlm(Euros ~ log(dist_km) | Origin + Destination + Product, data = trade) m3 <- fixest::femlm(log1p(Euros) ~ log(dist_km) | Origin + Destination + Product, data = trade, family = "gaussian") m4 <- fixest::feglm(Euros ~ log(dist_km) | Origin + Destination + Product, data = trade, family = "poisson") m5 <- fixest::feols(Sepal.Width ~ Petal.Length | Species | Sepal.Length ~ Petal.Width, data = iris) m6 <- fixest::feglm(dv ~ language | judge, data = d, cluster = "judge", family = "logit") params <- model_parameters(m1, verbose = FALSE) expect_identical(c(nrow(params), ncol(params)), c(2L, 9L)) expect_equal(params$p, as.vector(fixest::pvalue(m1)), tolerance = 1e-4) expect_equal(params$df_error[1], as.vector(fixest::degrees_freedom(m1, type = "t")), tolerance = 1e-4) expect_equal(params$Coefficient, as.vector(coef(m1)), tolerance = 1e-4) # currently, a bug for fixest 10.4 on R >= 4.3 # skip_if_not(getRversion() < "4.2.0") expect_snapshot( model_parameters(m1, include_info = TRUE, verbose = FALSE) ) # Poission, df = Inf params <- model_parameters(m2, verbose = FALSE) expect_identical(c(nrow(params), ncol(params)), c(1L, 9L)) expect_equal(params$p, as.vector(fixest::pvalue(m2)), tolerance = 1e-4) expect_identical(params$df_error[1], Inf) expect_equal(params$Coefficient, as.vector(coef(m2)), tolerance = 1e-4) params <- model_parameters(m3, verbose = FALSE) expect_identical(c(nrow(params), ncol(params)), c(1L, 9L)) expect_equal(params$p, as.vector(fixest::pvalue(m3)), tolerance = 1e-4) expect_equal(params$df_error[1], as.vector(fixest::degrees_freedom(m3, type = "t")), tolerance = 1e-4) expect_equal(params$Coefficient, as.vector(coef(m3)), tolerance = 1e-4) # Poission, df = Inf params <- model_parameters(m4, verbose = FALSE) expect_identical(c(nrow(params), ncol(params)), c(1L, 9L)) expect_equal(params$p, as.vector(fixest::pvalue(m4)), tolerance = 1e-4) expect_identical(params$df_error[1], Inf) expect_equal(params$Coefficient, as.vector(coef(m4)), tolerance = 1e-4) params <- model_parameters(m5, verbose = FALSE) expect_identical(c(nrow(params), ncol(params)), c(2L, 9L)) expect_equal(params$p, as.vector(fixest::pvalue(m5)), tolerance = 1e-4) expect_equal(params$df_error[1], as.vector(fixest::degrees_freedom(m5, type = "t")), tolerance = 1e-4) expect_equal(params$Coefficient, as.vector(coef(m5)), tolerance = 1e-4) # logit, df = Inf params <- model_parameters(m6, verbose = FALSE) expect_identical(c(nrow(params), ncol(params)), c(1L, 9L)) expect_equal(params$p, as.vector(fixest::pvalue(m6)), tolerance = 1e-4) expect_identical(params$df_error[1], Inf) expect_equal(params$Coefficient, as.vector(coef(m6)), tolerance = 1e-4) }) test_that("model_parameters.fixest-2", { skip_on_cran() skip_if_not_installed("fixest") skip_if_not_installed("carData") data(Greene, package = "carData") d <- Greene d$dv <- as.numeric(Greene$decision == "yes") mod1 <- fixest::feglm(dv ~ language | judge, data = d, cluster = "judge", family = "logit" ) out1 <- model_parameters(mod1) expect_equal(out1$p, as.vector(fixest::pvalue(mod1)), tolerance = 1e-4, ignore_attr = TRUE) expect_equal(out1$SE, as.vector(sqrt(diag(vcov(mod1)))), tolerance = 1e-4, ignore_attr = TRUE) }) test_that("robust standard errors", { skip_if_not_installed("fixest") mod <- fixest::feols(mpg ~ hp + am | cyl, data = mtcars) se1 <- sqrt(diag(vcov(mod))) se2 <- sqrt(diag(vcov(mod, vcov = "HC1"))) se3 <- sqrt(diag(vcov(mod, vcov = ~gear))) expect_equal(standard_error(mod)$SE, se1, ignore_attr = TRUE) expect_equal(standard_error(mod, vcov = "HC1")$SE, se2, ignore_attr = TRUE) expect_equal(standard_error(mod, vcov = ~gear)$SE, se3, ignore_attr = TRUE) p1 <- p_value(mod) p2 <- p_value(mod, vcov = "HC1") p3 <- p_value(mod, vcov = ~gear) expect_true(all(p1$p != p2$p)) expect_true(all(p2$p != p3$p)) expect_true(all(p1$p != p3$p)) # HC3 works since fixest 0.13.0 skip_if_not_installed("fixest", minimum_version = "0.13.0") se4 <- sqrt(diag(vcov(mod, vcov = "HC3"))) expect_equal(standard_error(mod, vcov = "HC3")$SE, se4, ignore_attr = TRUE, tolerance = 1e-4) expect_equal(parameters(mod, vcov = "HC3")$SE, se4, ignore_attr = TRUE, tolerance = 1e-4) expect_error(parameters(mod, vcov = "hetero"), NA) expect_error(parameters(mod, vcov = "iid"), NA) }) test_that("standard errors, Sun and Abraham", { skip_if_not_installed("did") data(mpdta, package = "did") m <- fixest::feols( lemp ~ sunab(first.treat, year, ref.p = -1:-4, att = TRUE) | countyreal + year, data = mpdta, cluster = ~countyreal ) out <- model_parameters(m) expect_equal(out$SE, m$coeftable[, "Std. Error"], tolerance = 1e-4, ignore_attr = TRUE) data(base_stagg, package = "fixest") m <- fixest::feols(y ~ x1 + sunab(year_treated, year) | id + year, base_stagg) out <- model_parameters(m) expect_equal(out$SE, m$coeftable[, "Std. Error"], tolerance = 1e-4, ignore_attr = TRUE) }) skip_if_not_installed("withr") skip_if_not_installed("glmmTMB") withr::with_options( list(parameters_warning_exponentiate = TRUE), test_that("model_parameters works for fixest-negbin", { data(Salamanders, package = "glmmTMB") mod <- fixest::fenegbin(count ~ mined + spp, data = Salamanders) out <- model_parameters(mod) expect_snapshot(print(out)) }) ) parameters/tests/testthat/test-model_parameters.htest.R0000644000176200001440000001173415001670564023202 0ustar liggesusersskip_if_not_installed("effectsize") ## TODO: add more tests for different htest objects and effectsize types test_that("model_parameters.htest", { params <- model_parameters(cor.test(mtcars$mpg, mtcars$cyl, method = "pearson")) expect_named( params, c( "Parameter1", "Parameter2", "r", "CI", "CI_low", "CI_high", "t", "df_error", "p", "Method", "Alternative" ) ) expect_equal(params$r, -0.852, tolerance = 0.05) expect_warning({ params <- model_parameters(cor.test(mtcars$mpg, mtcars$cyl, method = "spearman")) }) expect_equal(params$rho, -0.9108, tolerance = 0.05) expect_warning({ params <- model_parameters(cor.test(mtcars$mpg, mtcars$cyl, method = "kendall")) }) expect_equal(params$tau, -0.795, tolerance = 0.05) params <- model_parameters(t.test(iris$Sepal.Width, iris$Sepal.Length)) expect_equal(params$Difference, -2.786, tolerance = 0.05) params <- model_parameters(t.test(mtcars$mpg ~ mtcars$vs)) expect_equal(params$Difference, -7.940, tolerance = 0.05) params <- model_parameters(t.test(iris$Sepal.Width, mu = 1)) expect_equal(params$Difference, 2.0573, tolerance = 0.05) }) test_that("model_parameters.htest-2", { x <- c(A = 20, B = 15, C = 25) mp <- model_parameters(chisq.test(x)) expect_named(mp, c("Chi2", "df", "p", "Method")) }) test_that("model_parameters-chisq-test NULL", { mp <- model_parameters(stats::chisq.test(table(mtcars$am))) expect_equal(mp$Chi2, 1.125, tolerance = 1e-3) expect_named(mp, c("Chi2", "df", "p", "Method")) }) test_that("model_parameters-chisq-test two way table", { mp2 <- suppressWarnings(model_parameters(stats::chisq.test(table(mtcars$am, mtcars$cyl)))) expect_equal(mp2$Chi2, 8.740733, tolerance = 1e-3) expect_named(mp2, c("Chi2", "df", "p", "Method")) }) test_that("model_parameters-chisq-test works with `svychisq` objects", { skip_if_not_installed("survey") data(api, package = "survey") set.seed(123) dclus1 <- survey::svydesign(id = ~dnum, weights = ~pw, data = apiclus1, fpc = ~fpc) m <- survey::svychisq(~ sch.wide + stype, dclus1) mp <- model_parameters(m) expect_equal(mp$F, 5.19337, tolerance = 1e-3) expect_named(mp, c("F", "df", "df_error", "p", "Method")) }) test_that("model_parameters-chisq-test adjusted", { expect_message({ mp <- model_parameters(stats::chisq.test(table(mtcars$am)), es_type = "phi", ci = 0.95) }) expect_equal(mp$Chi2, 1.125, tolerance = 1e-3) expect_named(mp, c("Chi2", "df", "p", "Method")) }) test_that("model_parameters-t-test standardized d", { params <- model_parameters(t.test(iris$Sepal.Width, iris$Sepal.Length), es_type = "cohens_d") expect_equal(params$Cohens_d, -4.210417, tolerance = 0.05) expect_equal(params$d_CI_low, -4.655306, tolerance = 0.05) expect_named( params, c( "Parameter1", "Parameter2", "Mean_Parameter1", "Mean_Parameter2", "Difference", "CI", "CI_low", "CI_high", "Cohens_d", "d_CI_low", "d_CI_high", "t", "df_error", "p", "Method", "Alternative" ) ) }) test_that("model_parameters-t-test standardized d", { mp <- model_parameters(t.test(mtcars$mpg ~ mtcars$vs), es_type = "cohens_d", verbose = FALSE) expect_equal(mp$Cohens_d, -1.696032, tolerance = 1e-3) expect_named( mp, c( "Parameter", "Group", "Mean_Group1", "Mean_Group2", "Difference", "CI", "CI_low", "CI_high", "Cohens_d", "d_CI_low", "d_CI_high", "t", "df_error", "p", "Method", "Alternative" ) ) }) test_that("model_parameters-t-test reports the same unregarding of interface", { g1 <- 1:10 g2 <- 7:20 df <- data.frame(y = c(g1, g2), x = rep(c(0, 1), c(length(g1), length(g2)))) compare_only <- c("Difference", "CI", "CI_low", "CI_high", "t", "df_error", "p", "Method") default_ttest <- model_parameters(t.test(x = g1, y = g2))[compare_only] formula_ttest <- model_parameters(t.test(y ~ x, df))[compare_only] expect_equal(default_ttest, formula_ttest, ignore_attr = TRUE) }) test_that("model_parameters-Box.test works, and ignores partial matching", { set.seed(123) ts1 <- ts(rnorm(200, mean = 10, sd = 3)) result1 <- Box.test(ts1, lag = 5, type = "Box-Pierce", fitdf = 2) result2 <- Box.test(ts1, lag = 5, type = "Ljung-Box", fitdf = 2) out1 <- model_parameters(result1) out2 <- model_parameters(result1, effects = "all") expect_equal(out1, out2, ignore_attr = TRUE) expect_named(out1, c("Parameter", "Chi2", "df_error", "p", "Method")) out1 <- model_parameters(result2) out2 <- model_parameters(result2, effects = "all") expect_equal(out1, out2, ignore_attr = TRUE) }) test_that("model_parameters-htests removes $ from parameter and group names", { data(sleep) sleep2 <- reshape(sleep, direction = "wide", idvar = "ID", timevar = "group") out <- format(model_parameters(t.test(sleep2$extra.1, sleep2$extra.2, paired = TRUE))) expect_identical(out$Parameter, "extra.1") expect_identical(out$Group, "extra.2") }) parameters/tests/testthat.R0000644000176200001440000000010414413515226015540 0ustar liggesuserslibrary(parameters) library(testthat) test_check("parameters") parameters/MD50000644000176200001440000006573515111307212012736 0ustar liggesusersfc2a6818318a09a1c1bb09ec1f7bfbeb *DESCRIPTION 198a4ac99ca6cb9e5ffc4faf9033def8 *NAMESPACE c5bad93a61114b5910eb17fbd69b026f *NEWS.md 8045c79088bd13422dff42f3128cdf58 *R/1_model_parameters.R d2fd4b8b006cd6cb3119986e7ca96f81 *R/2_ci.R bb2f31f54a6f37c8df02a8cb4a1b1fd1 *R/3_p_value.R 2643e5bebc430f417aabd5ee8380f1af *R/4_standard_error.R 46b51118f9d818971c76f996c64b8676 *R/5_simulate_model.R cf8687d6ff8772e19ff77f55c06b6303 *R/bootstrap_model-emmeans.R f8706da7fb5f27eca4ffdf90d5231247 *R/bootstrap_model.R 9ef7d25364cac6d2a77a9e72029798c8 *R/bootstrap_parameters.R 42943322a8ab50aca3b38ae13a55ed11 *R/ci_betwithin.R 82dd6c6aeb6eadaaa4449a615c8f2370 *R/ci_generic.R dcfbdb24c335825fb422a443edd9fda9 *R/ci_kenward.R 3e7ef8844be9768179f886c716547cc3 *R/ci_ml1.R 2f80f24c32164b23ef07c5ee0d0e6b6d *R/ci_profile_boot.R a2e1759835e3a2f2d3e073a450755c64 *R/ci_satterthwaite.R bde5fa7f03d6235d824a72bcacc56663 *R/cluster_analysis.R 936fd6bd303e279a265753b6050cf302 *R/cluster_centers.R 2753e7a0cc667c3bc5900cfeff0656d3 *R/cluster_discrimination.R f922c3bd0206021178e8898b65440b67 *R/cluster_meta.R d8ef7424144ff1424d763f676eb81760 *R/cluster_performance.R 9331dfc111efb526baa20653c8155fc9 *R/compare_parameters.R 86306ff324f39c73d6b0525e6f1148f5 *R/convert_efa_to_cfa.R 55f0da00f816533dcd93dc5130a404da *R/datasets.R 6d3267bf22de352ca12726b5c6470892 *R/display.R 854396458db4451d30c9f3a0d95d6509 *R/dof.R f3b9f6f05029f6b12fedcde24e21b38c *R/dof_betwithin.R b30972bd42678608c22953458ebb29cb *R/dof_kenward.R b0abf838af917c66f8880a14f61a8a74 *R/dof_ml1.R e8fd4623ea268b3e69980bfb4bf45c89 *R/dof_satterthwaite.R 5c30b2624359197847e2eeeaf0584140 *R/dominance_analysis.R 910326f50628ef6a18d00984baef27d8 *R/equivalence_test.R 92c9ef885925252783f6730a069bd206 *R/extract_parameters.R 0698b521962129658c1710d4aa34aa22 *R/extract_parameters_anova.R fe14e14b7c816f251bba92702e33296b *R/extract_random_parameters.R e712e886d238872ba23fb66a30006fcf *R/extract_random_variances.R a39abc0ea8af47eed301aae1a231373d *R/factor_analysis.R ce30deb50765203517097ed5d996f24b *R/factor_scores.R ed9fc65af42807683c59b2ebdee2e31a *R/format.R be3ff96821e907efc62fd81837e16439 *R/format_df_adjust.R c18e18191213f065feded7446ee177f4 *R/format_order.R fca1e76e9599188703f6305518672841 *R/format_p_adjust.R 0c806cba004fe7291a463bfb61479a33 *R/format_parameters.R 2c912dbbd1423221764740cab8ce2aa6 *R/group_level_total.R 4eecf1ad1f11817a893222a8022cbefe *R/methods_AER.R 79b7e90bd54c447d3d2cb81c8328cc88 *R/methods_BBMM.R 46d47fb8941bea708028bb2e577e32ac *R/methods_BayesFM.R 62c0a0a9fbce9512488dc7e47a0f4cc6 *R/methods_BayesFactor.R f46fb8bfaac926285cc320ab1c7d01c6 *R/methods_BayesX.R 6d0ce7b953dc464061c26b3ef39aa172 *R/methods_DirichletReg.R bc598b46f444d379c1156811c8847519 *R/methods_FactoMineR.R d503b934a55c6c8a82083ec918d452ef *R/methods_MCMCglmm.R 38a86ed04315e7e728178aeca000556b *R/methods_PMCMRplus.R 07a7e856db7bb581c38fdec32712c683 *R/methods_aod.R 028b164142843029513adab1a7524a87 *R/methods_aov.R 7e4fa74e4fd1da5bad6c3f8a891ba1fe *R/methods_averaging.R 7865d12cb9d83b6f77f2891925c9024b *R/methods_bamlss.R b217531634123e21b45b248af2c24197 *R/methods_base.R 2b725b1ffaf9fecee20568f22500d22c *R/methods_bayesQR.R 079ada60c0566403a1903c42790d79d5 *R/methods_bayestestR.R 869569b19699269be9c2342656b33fae *R/methods_bbmle.R be1c3f7d3171d1f92f6ddc3767c86ec1 *R/methods_betareg.R 326dd60cf3bd3be3e15f57775834b19d *R/methods_bfsl.R de1cccfd5b748ec0a487eb689f7ab661 *R/methods_bggm.R dd541e83b7cc6ecbed94f57b2f709425 *R/methods_bife.R 1a7e3b0d6bc8b952a3ac2284f5af4689 *R/methods_biglm.R 6ce188ac0346f60c37abc10e7171d10c *R/methods_brglm2.R 0936019750a2a4fb03528f5b6d1d8fd6 *R/methods_brms.R b63821fb0a35a92665a0d1d97ae973b4 *R/methods_car.R c81b6e7e6c2097be6e56e44e62ac5c01 *R/methods_censReg.R bfa5e4ef53f799b1478bac387f65d443 *R/methods_cgam.R b37757fbeb09ed92449676646828a5dc *R/methods_coda.R b984cab733ccc93206e592850ff7a8cd *R/methods_coxme.R 114ae39e78c7b0d4edbc0d31673fb09a *R/methods_coxrobust.R e3a32531cf8ba9c5f78f8b3404dd61c0 *R/methods_cplm.R f97a38c25341b6fcea36385598e8d793 *R/methods_crch.R ec1c8164533bd06b7e105fbd2b1bc226 *R/methods_dbscan.R dee30efadd593dbfbbeac54cd478a95b *R/methods_effect_size.R 7ff045ef7750c54be0b047c7e39488e6 *R/methods_eflm.R d9359b80ed06f598b331f302dc58af83 *R/methods_emmeans.R 4ec7aea36ddadce2699d818bf29daf0a *R/methods_epi2x2.R bf2f271dd612a02880e6804a93c1382c *R/methods_ergm.R 4c1e815db38ffb6614a89627ba76ff1c *R/methods_estimatr.R 314b27e1991976ff63a249ab4e578fe1 *R/methods_fitdistr.R 49d233204e9cb2d403f9561743e276ff *R/methods_fixest.R d95cd35ecead1d0159e4c1260975ca5c *R/methods_flexsurvreg.R 39a4facdb3713a74b455004e7fdf0ad4 *R/methods_gam.R d62a2972b77a847ce40852ed60063f1d *R/methods_gamlss.R bad02a55f0d720ac67ff5176dee5320c *R/methods_gamm4.R b010bbaa7ee252ae1abd439e0e628ea6 *R/methods_gee.R 1852bc064dec2945aed31aae876afffb *R/methods_gjrm.R f4a2393725bfc40edd943617019e9e40 *R/methods_glm.R d4575c195cd86cdd9afce29d5eb722be *R/methods_glmgee.R 837465aa38819e5ea25036f0f2186151 *R/methods_glmm.R 89cec981d978264f64ac1e5bd3dfb068 *R/methods_glmmTMB.R 0cab414df241775b26348d4593cbc985 *R/methods_glmx.R 8eab71793b700f0aa4b6778aa36d61f7 *R/methods_gmnl.R 6b48654dc730d7ac84874d0ff7f416a6 *R/methods_hclust.R a6d992096f8e5bbdc7bf2da4e58074ff *R/methods_hglm.R 3b0ed757e21404cec1fcc25ee80105e9 *R/methods_htest.R a35ca918213a190eb6f5e46da57a95b8 *R/methods_ivfixed.R cb32343a61abac5e88f0a9f89e33aa90 *R/methods_ivprobit.R 55960685d785d71fdaa71a0156b1d955 *R/methods_ivreg.R d966034455f8f6e405caef9bde657bda *R/methods_kmeans.R 0cee7cf2c745b06eb950c1b567b3a210 *R/methods_lavaan.R 0255f221d4929a17b13808531779537f *R/methods_lcmm.R 487142aae40b25fdbb672d08f982f482 *R/methods_lm.R 7ca078d294d2d6f06e22987a93e2a447 *R/methods_lme4.R 42c06fdb8b2f906313a1381dd72d9b26 *R/methods_lmodel2.R ffb88aa5f89e838d4269fae424a5e8e2 *R/methods_lmtest.R 4c1cb4edf2684af9e47512b990d94a1b *R/methods_logistf.R 1dbb9bc5abf210082f27fb878b41b756 *R/methods_loo.R 4acbc5d57348a926901497f13804fba3 *R/methods_lqmm.R edebb6696258908f1cee51a4af079481 *R/methods_lrm.R 96e0215e26eed232f413239e0e5278af *R/methods_marginaleffects.R 54e755fd9e520d96a1c5972342fadf6c *R/methods_margins.R ddb47fa11799b4430d2231ccd8a1ee91 *R/methods_mass.R bb90294c2cfef72e27d30eb0bc8dde99 *R/methods_maxLik.R 81452349dc3f91d5a184e4bc42c72a71 *R/methods_mclogit.R 2035dedb2032732b8a0d3bc85dbc4b7d *R/methods_mclust.R de90f533bb91137ea8a2a0052e90b9c2 *R/methods_mcmc.R 27b689c089ddc5f33d34bb6409f21706 *R/methods_mediate.R 2acbdf600911350d1a794f4afa024a6a *R/methods_merTools.R f38d2d4a6f7ae8a6151847a384eebbd1 *R/methods_metafor.R c71e65bc9479224c12bc1de152cbd2ab *R/methods_metaplus.R 0f5b41b225ccfb42347e87cc870f9099 *R/methods_mfx.R f96c2df33c7dbca4ab6c880f8018c460 *R/methods_mgcv.R 256ce6b33f11e960d584726e85e5e4cc *R/methods_mhurdle.R 2512341f2d3bfba88aa4ce27836fbf1c *R/methods_mice.R 341b31ce1cb3c0f8718d85b6a80160f4 *R/methods_mixed.R 3ab1700d24ada08bf2293312e348866f *R/methods_mixmod.R d4f8b0ad7037d9d402b7089a801204b0 *R/methods_mixor.R 24500a4973ccd744733dda4ade8a1e93 *R/methods_mjoint.R ce803ef097139ef52a1f3bf6c2cbeb37 *R/methods_mlm.R 741eff53296afd0495141eada7e9dea1 *R/methods_mmrm.R ca2ada27cca91ff68d20513ba82747be *R/methods_model_fit.R 4aafb9fb79bedb9912ff3c4b746a396a *R/methods_modelbased.R 3243bbc7a7fc52ba90cbfa5b0a1f573e *R/methods_multcomp.R 9b018d99bbbffd473193b8a5cc45e1e0 *R/methods_multgee.R b29ad21d4b4a7dbfdc9f67eb392ef5b9 *R/methods_mvord.R 4440c0c736ca41de6cf099e8a63d402d *R/methods_nestedLogit.R ff00070329238881cd498cde436b332e *R/methods_nlme.R 2da48171460c27409524f2f83bf9ea4c *R/methods_ordinal.R e6a91d6f512fbc6c970792ffe9d75de4 *R/methods_other.R cd23ef88541390106c7be4fa28c4724c *R/methods_pam.R 20265e3fa2d58eb9e45983c4025e7d1b *R/methods_panelr.R 1d3b9a6ff9f725a708c660c7b908e310 *R/methods_pglm.R b4d66632f5743af68e6c29e75571129c *R/methods_phylolm.R f585d8c8e0039159d161766b637446e8 *R/methods_plm.R 416c565280aab1fe751398b306ec5a67 *R/methods_posterior.R 32d7e18fa51ac5746276f3074db5678a *R/methods_pscl.R e97d5b2771e9ab9e3aafd7f2a5959c82 *R/methods_psych.R 00611153a0796d302e4125246730735e *R/methods_quantreg.R 33495af1a6473d0f5122703b319f590c *R/methods_robmixglm.R 23f350a2e4e2cdb17b415a23110fb1dc *R/methods_robustlmm.R 1e94bc14383b82e0b18278be9dce4b3e *R/methods_rstan.R f2d3bde4ae1f730d046ec44f59776745 *R/methods_rstanarm.R 39def1ddae3f5c401c685045dfd894a2 *R/methods_sarlm.R 2e5db51c3752a810c83104226afd687c *R/methods_scam.R 7a6934ead7bc2ef8bbdabed6ed023798 *R/methods_selection.R 0a211abda37ad9ee17f5308a4d6282bf *R/methods_sem.R 0549bf0dbf62cb2915c1cb390fec9147 *R/methods_skewness_kurtosis.R 5371941d1513da06f0bdc10a57738bba *R/methods_spaMM.R 629f771c4b39682c5d40e82368341144 *R/methods_speedglm.R 72eeb0ed9eb2af20725df7ed9ed3ab8a *R/methods_stats4.R e4a2646bbfbc9adc0ff3aa0fb24a6a1f *R/methods_survey.R fee60b2d0f7ac1e243f35f673f4b1db6 *R/methods_survival.R 881e58753c3e7fba340a4740f1d53c7a *R/methods_svy2lme.R 4fc9ad072f6099b2aca42a2998d1e6bb *R/methods_systemfit.R 9e217dbd4d68cec8f0e67f6fca858ee1 *R/methods_truncreg.R 001e965b171d6e46f0694b0c084c78dd *R/methods_varest.R efc51f3786f6505c7311a8bd0f3c9248 *R/methods_vgam.R 9c22c8f4029bb16cb45429e748216895 *R/methods_weightit.R 30ddca116bd12ee907c7edb8d36720af *R/methods_wrs2.R 74ddba5250453dcfe9aed71f75c85d09 *R/n_clusters.R 20dab924384fb9abd24e143d9b7ab893 *R/n_clusters_easystats.R bac2d0b3754f5471db39903b22c94abd *R/n_factors.R 13d57d06de27880265635121c2c7bc0e *R/n_parameters.R c87eb6dd4e530919680616a35b9366b9 *R/options.R 6b2b16725f98fe68882596bd5ee5ff2d *R/p_calibrate.R 9f8aab9108af3a3e4e59e014aa6d6cfd *R/p_direction.R 0febfce73c3c8afe8a047ed588ea35e2 *R/p_function.R 68458517a259cb5a29ae0c6e2256ee1f *R/p_significance.R ad4a00785f3a30417c4932bbca203217 *R/p_value_betwithin.R 426dd9af33bc5df1842f4ea83e106c3c *R/p_value_kenward.R 339f24be8e8a13165b97390567b45153 *R/p_value_ml1.R bb499d331d58c81bb97e36fb03a25f85 *R/p_value_satterthwaite.R 5ba5bd8380a2b3c699e630aca9682f00 *R/parameters-package.R 2fe048a157e671c5ab2b50b32cdc3b3a *R/parameters_type.R f330102881006da7039429176475eee4 *R/plot.R 0a1f91097874183f700574dc25b166b2 *R/pool_parameters.R 26ec9dd083ddf4b1cdd74340b6410f65 *R/principal_components.R 86639ca448a0ef453e92b4804c9fd84f *R/print.compare_parameters.R 3572f04057af0a0aa867d72b661f893f *R/print.parameters_model.R 1a8895d6927fbe3c23899f579659b568 *R/print_html.R cd40cd65bf03657a51c2d4f89980e33e *R/print_md.R 4f2835839db69a98fc80f17a1a689722 *R/random_parameters.R 00493803f64747b7163596b3fc18b69d *R/reduce_parameters.R eb3bd78fdec490ec43097188b608cc4e *R/reexports.R 26024be7b956762a332bf600548bf09a *R/reshape_loadings.R eb3af6293ff4fec0fa16c65a1c82cbe8 *R/select_parameters.R a8680ff58d937a154806aaa1f51f97e1 *R/simulate_parameters.R 838493d0ba8bf34928efab47a3ecf7a1 *R/sort_parameters.R b19c7c24a39de18843109e026bd842a0 *R/standard_error_kenward.R a5eb604c29599ba9330354300314485f *R/standard_error_satterthwaite.R 2dc4bd329ad1b0635411548c1e50ffa2 *R/standardize_info.R f76f5b7bb0c0319b845a67b90b94fa69 *R/standardize_parameters.R e75d8e9e8fbf67797e67af723d7064b8 *R/standardize_posteriors.R cf52d803db10e79853c39ea27ca6d897 *R/utils.R dfc8118fcaaaed85eb4f6e6caf9a6bf3 *R/utils_cleaners.R 2b3b26fc248bbc95d4fc2d3177c300b6 *R/utils_clustering.R a1dbab3034efa27deecf2030c19f9559 *R/utils_format.R 60cd1df84d9c1f93d3b86462a8f19c2d *R/utils_model_parameters.R e9099e5c9b0e4721435656eed59cdd03 *R/utils_pca_efa.R 89895e2d88efc717efb4ff58bc1070b6 *R/zzz.R f81475734488a261fca809e1681c03d7 *README.md d54042561b90aafd8a9a202ea63e1e4b *build/partial.rdb 0ad22e3d9e320d162afe94b4a5726ea6 *build/vignette.rds fed293a745c7a75939e2f2156aaa9afe *data/fish.RData fca1e9b681b9f432165601e6510c483c *data/qol_cancer.RData d9a675761b0b4ec7816a274c92857f5e *inst/CITATION 099e94fc098793a4f1c7eee6fd1fc7a4 *inst/WORDLIST 7342a6d63f4cc4501afc55be1d148740 *inst/doc/overview_of_vignettes.R d9dd956924410f30851a0ed54d8bb41c *inst/doc/overview_of_vignettes.Rmd ac4d9be1298fc2bce2d12d10c4ec3c0f *inst/doc/overview_of_vignettes.html 2982b607b27d5dc369adcb93d726324f *man/bootstrap_model.Rd 44a27794e6fc22d06e73be6eeb5a44f0 *man/bootstrap_parameters.Rd 2025c713db2beb4da21fb252802355d9 *man/ci.default.Rd d111f5645d9a0341f361fbf450d3876e *man/cluster_analysis.Rd 469236e95f075b3f84dd06c23452e102 *man/cluster_centers.Rd 26b1ff0c44659ecb01542a257c698604 *man/cluster_discrimination.Rd 0b44c6af54fb0c2c270729f0717ee0a3 *man/cluster_meta.Rd 22d45ca9f9552b74c949621c2748b2e1 *man/cluster_performance.Rd e7de64f1d96496066656845eb12fa448 *man/compare_parameters.Rd 2adac4070069e3e98a8604bfec542390 *man/convert_efa_to_cfa.Rd 270e607de909386ffe161f86459c370f *man/degrees_of_freedom.Rd 99d2438f2577ad209fbe0d3b8fde66b5 *man/display.parameters_model.Rd 74951176afaa93a911e117b43ee97dc1 *man/dominance_analysis.Rd ada4525e7f51696872af8e67a11a8a03 *man/dot-data_frame.Rd d497a17727c64a877268d9e06f0cd046 *man/dot-factor_to_dummy.Rd 6f88971941994882a00c6f6fe9b59ea2 *man/dot-filter_component.Rd d345ca758850004bcc8f2dfbfa129a77 *man/dot-n_factors_bartlett.Rd d773be8d96a78e87754d7ffd0e64102b *man/dot-n_factors_bentler.Rd 8d7187c3a82dbaaea1fa0368bea3914a *man/dot-n_factors_cng.Rd 9545ed998c17d115ed152a3f269530fd *man/dot-n_factors_mreg.Rd 1e126d6996539eff78565cf11aeb80ec *man/dot-n_factors_scree.Rd 4e979353266706b75a554fe0b05420ae *man/dot-n_factors_sescree.Rd ba6177f91009ddd8318a79e4b77c0a6e *man/equivalence_test.lm.Rd 5d0674ede1ac4c8924c0e66bba190f54 *man/factor_scores.Rd b21e4345f92f98ffb97d0f3b06492a42 *man/figures/card.png 80c2c196a70f7211c90bf2c510d39d41 *man/figures/figure1.png 763d72787cad1fca6fc76de99da06308 *man/figures/figure2.png 34f97573ccc6dab523d52cbd156882dd *man/figures/logo.png 77bd5b3c932e7c6c922d81e01d4f22f4 *man/figures/unconditional_interpretation.png dfb7d2691aa5df65c196bdd538860c75 *man/fish.Rd ed5f06e2c08127ed5b98439ac44b725f *man/format_df_adjust.Rd 3551b5b180212346989acfd2bfb14485 *man/format_order.Rd 0d3085434ec747b5f23d015056783737 *man/format_p_adjust.Rd 69fbba3afb04972dadd4f4f1c23c1a4b *man/format_parameters.Rd b87cfe6d77ee4d4a0dce04a3178b2ac3 *man/get_scores.Rd 60ffaccdbe02d4eeb0f52807e9b02043 *man/model_parameters.BFBayesFactor.Rd 1acfc627e2c411ed252af5650317137c *man/model_parameters.Rd d59b3dfce2c2d06543f8aea0f7ae78f3 *man/model_parameters.aov.Rd a3e0a595bbf23868fade196517f1036b *man/model_parameters.befa.Rd 6aa778f1f13f6a35f6dcbfd6a66d0b8a *man/model_parameters.brmsfit.Rd 547da8c680f64155976afeee2564df9d *man/model_parameters.cgam.Rd c05f8d450e4343eff8ccd0fa7ac5ec42 *man/model_parameters.compare.loo.Rd a972dbe358f49671d2b1c7763a1f9434 *man/model_parameters.default.Rd 96d99d34e1e2d1e3e9a9096aec621cc7 *man/model_parameters.glht.Rd 3d8bd51d731db6f2eca42f3f0fc8c320 *man/model_parameters.glimML.Rd cae7399b1a53f31c0db6aa9a291dc049 *man/model_parameters.glmmTMB.Rd 7a491fc9638424b8a116a7bc1ae23c3d *man/model_parameters.hclust.Rd ed8631637c9697bd34fdb45a1b6bc41c *man/model_parameters.htest.Rd 7a2d84b89be983f7f2125f557e0acfee *man/model_parameters.mira.Rd b45967f189b2149a7dc4bf2714659a44 *man/model_parameters.mlm.Rd 5321f561bd0d2ef845375640a18aacd8 *man/model_parameters.principal.Rd be52b21c03b4fa778aa4f4505c9336c9 *man/model_parameters.rma.Rd 6263ffbfad14d845cee2c4d3a1794c46 *man/model_parameters.t1way.Rd 6d8830c85efd5de5e1c98d831a3a6bd1 *man/model_parameters.zcpglm.Rd 59c67bb9fa77eabe52de63a740001557 *man/n_clusters.Rd b119fd61a0d51406a679c6167a4b05fe *man/n_factors.Rd 4780eae6f9cb6ffa108cc6845be60779 *man/p_calibrate.Rd e800a9f12f902b2f8f0da7b6a31822fe *man/p_direction.lm.Rd 4e29b2a210e57a4461a5e60412c7fa9b *man/p_function.Rd 66c204951fc50171ade25e7acf30b575 *man/p_significance.lm.Rd 8a51a2f38a98f0b6d2a42dc789d598e0 *man/p_value.Rd 92c8ada9bace97d7fda5ba54f39e86b0 *man/p_value_betwithin.Rd 1f2e0fb5fc74e46b8d4f5f1d3c9f6b06 *man/p_value_kenward.Rd a9b83928c7683397e31cec0f4bb19b3c *man/p_value_ml1.Rd 3c985d78ae9007388d8df8937bc1e21d *man/p_value_satterthwaite.Rd 925aeca8f0466fed9f63991ced041d61 *man/parameters-options.Rd 8d270167fc4c621ef9b42edd8130f099 *man/parameters-package.Rd fe31fb663bdf80533d7e94f995c788b2 *man/parameters_type.Rd 58dcd6dcad87c1c41ec2a5ac28e19a11 *man/pool_parameters.Rd 96dbea0125a5bc4271953baa9f931b18 *man/predict.parameters_clusters.Rd 5d086eda415dfa658269d1f25b385e63 *man/principal_components.Rd bea7404aac7ac8eec708633d73e46827 *man/print.compare_parameters.Rd b51c5e9e008ba051d294823e995c8a1c *man/print.parameters_model.Rd d1e2d2ee9e66ebab0de28c9f432defb9 *man/qol_cancer.Rd 853a6a36449195197cac381737241a16 *man/random_parameters.Rd c81bc75d506609671a7537cc9a466c45 *man/reduce_parameters.Rd 4d978acca856b48c77434e9585634391 *man/reexports.Rd a1c585e98d43946f46e31a4e186f9a7e *man/reshape_loadings.Rd 9a6d66d9c4e6beff6d2a8644fe785f49 *man/select_parameters.Rd 98902a418c45cf5465da2aeb23f2e0b3 *man/simulate_model.Rd 2696d54fa4ea9a93b49eb9ebd128e95b *man/simulate_parameters.Rd 73e7d1ef700a874d334947bafcc09664 *man/sort_parameters.Rd 0a71ed94f2b09b4dee073c5197be88e9 *man/standard_error.Rd ff4ab22e9dd34f3289211a069ee543ac *man/standardize_info.Rd 1c8775185c7dfef6fa5ec2a4e5374b15 *man/standardize_parameters.Rd 5290644057e754afb8ed29ffbdd4b863 *tests/testthat.R 6d3b3fb3c94edf109bb203d4dfe39e19 *tests/testthat/test-GLMMadaptive.R 4707d67ac4e4f5aee5a9869acd9eafcf *tests/testthat/test-Hmisc.R 315816def4ab95999ca7ca12e802ea3d *tests/testthat/test-MCMCglmm.R 18339153557775cd0eb54ac37364c071 *tests/testthat/test-PMCMRplus.R 219100322f2f1374317147726d89ef30 *tests/testthat/test-averaging.R 079e987fe20981e86bb9a98d11cf96c1 *tests/testthat/test-backticks.R 9f21ee5706d3853493c8cbcef4f51663 *tests/testthat/test-base.R 158e1d4b1dce7b6ad86a50871e340f63 *tests/testthat/test-betareg.R 41573fc56eb4602c253afe36cc813830 *tests/testthat/test-bootstrap_emmeans.R 626c2b48f5898b44cee57e9d8b8ecada *tests/testthat/test-bootstrap_parameters.R 27b2ea108f43b89c64b0856208a75243 *tests/testthat/test-bracl.R 4e312be7be258caa5b8ee175cc164161 *tests/testthat/test-brms.R daab505765afa8b0eb8f0343cb67e70c *tests/testthat/test-car.R bdcc53713b0231c4dd4056f937b715ee *tests/testthat/test-ci.R 733ca026aadc081445ee63377db16425 *tests/testthat/test-cluster_analysis.R 402c36d8924915e3f6d20405eb90cdde *tests/testthat/test-compare_parameters.R 2da17d1fde6ebcdb7703ccf32b345654 *tests/testthat/test-complete_separation.R 2030d7ea9bb96de36e18a4b6d955c205 *tests/testthat/test-coxph.R 0ae8f50b51b31118249f0360b49b6089 *tests/testthat/test-dominance_analysis.R e585dc41f5950f04144b7de6e763d70b *tests/testthat/test-efa.R 3ea3d4f269f435a04b12697e58413cbc *tests/testthat/test-emmGrid-df_colname.R d914bff9fe3c97c983a8cc79fca86c1f *tests/testthat/test-equivalence_test.R 4fbec374b87f1977209322dc5aa8e5e7 *tests/testthat/test-estimatr.R 0039b64e4d1acfc672c448106cbf01d7 *tests/testthat/test-factor_analysis.R 80803d664707689aa9a1b6e9412d56b2 *tests/testthat/test-format.R dec63cbd4f3f4849ee5a5d67c3172c59 *tests/testthat/test-format_model_parameters.R e9e9a355c815ab56453ced52fff9ce77 *tests/testthat/test-format_model_parameters2.R 6ac043255de37a0a9577c72d2a4405da *tests/testthat/test-format_p_adjust.R a84f73d465aee8ccaf05b2931982b7ad *tests/testthat/test-format_parameters.R 4fd30b7e1520785801509d3d079ef625 *tests/testthat/test-gam.R ae818c8191e0535c2e663917868bd04b *tests/testthat/test-gamm.R 3dca0bcc3cc2b26d62136caffb5c9c9a *tests/testthat/test-gee.R 5798145d8f4e19d9f779016a6aebfe87 *tests/testthat/test-geeglm.R 720b827b9cb8de7914600b189efdb43c *tests/testthat/test-get_scores.R 744c86d4c939193757db8c039a158231 *tests/testthat/test-glmer.R eef813622ecc022c98d82e1d01e2ef5f *tests/testthat/test-glmmTMB-2.R a4af9d6e805201b8a6a11c1e11535063 *tests/testthat/test-glmmTMB-profile_CI.R 7171806448f47ecd57dfb145c9d653e8 *tests/testthat/test-glmmTMB.R 1bcbb7610f901a894465019cdbb1d1c3 *tests/testthat/test-gls.R c25d2e64f5103fd09fff82f7cce5bb48 *tests/testthat/test-group_level_total.R 2c20c542b5cea26bac9cfc311d492a97 *tests/testthat/test-helper.R 948d0ee9fb1ab06346870589b84e9142 *tests/testthat/test-include_reference.R ccfadf4697a7fd588004a6259408afab *tests/testthat/test-ivreg.R 378d92a77c563e829df688c8f933f10a *tests/testthat/test-lavaan.R d0523e0bfaf8a21a5c909d02e7a8b4db *tests/testthat/test-lcmm.R affc0de64eb9e599cf55d8a6c96e3c93 *tests/testthat/test-lme.R 94dde5f73d12dc23905879b3174575c2 *tests/testthat/test-lmerTest.R 659917c6b21f97a58e4bec8b89a045be *tests/testthat/test-marginaleffects.R b58b45bc8cdc1af80340d5c6f1cff1f1 *tests/testthat/test-mipo.R 6f6d859c2d6c26cce9514f323d853b30 *tests/testthat/test-mira.R 2314dfc155f4dd2946e6895b5e0a8946 *tests/testthat/test-mlm.R 6a202108929d57dbc276a59a0e5cfc80 *tests/testthat/test-mmrm.R e8d30117b9baf8bec77c79340ec4b398 *tests/testthat/test-model_parameters.BFBayesFactor.R 3283009a8e100a74645141047b63b4bd *tests/testthat/test-model_parameters.MASS.R f62965bfcee952d72115545eee1aeea4 *tests/testthat/test-model_parameters.afex_aov.R 872dbf02c1ae9a1249c71f2e8cbca226 *tests/testthat/test-model_parameters.anova.R b5db2e4d2536c7df0887c0ed7db7e9cf *tests/testthat/test-model_parameters.aov.R 02aa7eb4709ac1ae64047c89db6a218d *tests/testthat/test-model_parameters.aov_es_ci.R 073571251b3edbba2b0f973b359e74d9 *tests/testthat/test-model_parameters.blmerMod.R b2f17fcf0e6be6a6760e66ffa124774e *tests/testthat/test-model_parameters.bracl.R aa555b40a7835004afcc0935c31bed9a *tests/testthat/test-model_parameters.cgam.R c6016c54f02ab954ff264e6414257c33 *tests/testthat/test-model_parameters.coxme.R 187a7f403cc39b2494d0eee5227fd7a4 *tests/testthat/test-model_parameters.cpglmm.R 4e8e5bf2df790fbddd1b2f2dabf2d3b3 *tests/testthat/test-model_parameters.efa_cfa.R 7e04bd526faa08f939f70f712773f456 *tests/testthat/test-model_parameters.epi2x2.R 417cb54183a76d0f67474b154f21b772 *tests/testthat/test-model_parameters.fixest.R 73a9782862b23b827bb23ec6e91a16d1 *tests/testthat/test-model_parameters.fixest_multi.R 440ab1ca53a5827da01354acd9ac2f9d *tests/testthat/test-model_parameters.gam.R 36a864387a3438c6a00037eec1d39ed1 *tests/testthat/test-model_parameters.glht.R cf4e51d3fe31dd58bf99aaf4a65cf0b7 *tests/testthat/test-model_parameters.glm.R d3b8eae2e6bb53fac667c8ed093bc20e *tests/testthat/test-model_parameters.glmgee.R 704f7674f7ac1fefe7b6c8b33309bd57 *tests/testthat/test-model_parameters.htest.R 75371d99fa47b84d9dcff6b792bc2438 *tests/testthat/test-model_parameters.hurdle.R 99b54414602f84c56923e38883aa3e82 *tests/testthat/test-model_parameters.lme.R 974884fb67246b446a849fad852cc73f *tests/testthat/test-model_parameters.logistf.R 520d6c72e25b50f585ce8b0fb2c57b0a *tests/testthat/test-model_parameters.logitr.R e351cf250c300973605a89eb69ae3ed4 *tests/testthat/test-model_parameters.lqmm.R 5b84af88c0616dc21f7ae77d88f94f18 *tests/testthat/test-model_parameters.maov.R 2ed30cec02c47bb9a49eb3f0dedb5e1e *tests/testthat/test-model_parameters.mclogit.R f100087b9a91aa15827f3f8ca30d1552 *tests/testthat/test-model_parameters.mediate.R 32738a8e344a949d67d244717a83ea58 *tests/testthat/test-model_parameters.metaBMA.R 5d485be293f55a6aabf2ecf4bd8eec9d *tests/testthat/test-model_parameters.metafor.R 19dae6116410b5cf0b356411bb471c10 *tests/testthat/test-model_parameters.mfx.R de7f172b4f1d40188580d0c5c2cf224a *tests/testthat/test-model_parameters.mixed.R 3866468a65a2d36e17088fb97ef9cd38 *tests/testthat/test-model_parameters.mle2.R 6feb54b53bfb2669ae33a597518636c2 *tests/testthat/test-model_parameters.nnet.R 74a1bcd1d87197721099274975daa608 *tests/testthat/test-model_parameters.pairwise.htest.R 04fdb727fae1fafa1922da65b994e292 *tests/testthat/test-model_parameters.truncreg.R cd38a981c7ec8174cb32fe9617789ba7 *tests/testthat/test-model_parameters.vgam.R 2d84a833b48ba8b9eb894063e8a353f4 *tests/testthat/test-model_parameters_df.R 15dc2d9b4ae5aab8ebd39bd4fe796018 *tests/testthat/test-model_parameters_df_method.R 0c6fa6b65cf3ebf6adb702a26c631f4d *tests/testthat/test-model_parameters_labels.R 1d55a5ee1f08935aa5fa1f89f769f982 *tests/testthat/test-model_parameters_mixed_coeforder.R 4628f3860d9eda66a9fa773e5e91e695 *tests/testthat/test-model_parameters_ordinal.R 002c97429db93d2cfad3e57bf7c442f2 *tests/testthat/test-model_parameters_random_pars.R 889f27f66bb742bc1569a28406477bb8 *tests/testthat/test-model_parameters_robust.R 13284b8cbd5c933128d502f5f8e2fe56 *tests/testthat/test-model_parameters_std.R b5a29328aa30a45fd34d44d249d2a5bb *tests/testthat/test-model_parameters_std_mixed.R 26458b987225ff7cf5e8063d1d51bc5e *tests/testthat/test-n_factors.R 178bdad755a06c4c3e6670e181846a52 *tests/testthat/test-nestedLogit.R d682889a5ab2af44cb9c10f0c2c46f5f *tests/testthat/test-ordered.R a355652339007263b49dbe21219a54cf *tests/testthat/test-p_adjust.R 1862202aa53fdd2a7d75715f118985ee *tests/testthat/test-p_calibrate.R 66654efe6243a9ae2d8664cd80b3d08f *tests/testthat/test-p_direction.R e501a2a906fcaefb91c563edbf43732d *tests/testthat/test-p_function.R 401b6295abc16ca8eff67d89196a3d90 *tests/testthat/test-p_significance.R a84abee8338f726b8f33b73c26bf4d23 *tests/testthat/test-p_value.R 8df1623e825478b6a5cedb8c364c18fb *tests/testthat/test-panelr.R 90d7af6119b4923fb4fc5c5c43a2c486 *tests/testthat/test-parameters_selection.R f5ba81964037e535fda30f770014d9f8 *tests/testthat/test-parameters_table.R 58d35b25e974c09c2b3599d04c9cede6 *tests/testthat/test-parameters_type-2.R 772c95c63a8794717af82a43d0679a4c *tests/testthat/test-parameters_type.R 9fdedd4d8d01c55b37746891c3b92617 *tests/testthat/test-pca.R 05af5c5297e971349be0bd4736144e2d *tests/testthat/test-pipe.R a86c6a34819184c43f5dbd76f6f91b7c *tests/testthat/test-plm.R 7f0a5464254d8c02672accab5b8890db *tests/testthat/test-polr.R 619b177da84ad34cfe57458bcf507be8 *tests/testthat/test-pool_parameters.R aaf55fb84b9ff9fd0d24b159a777ccd5 *tests/testthat/test-posterior.R 1a8d9192f9d7966969f70ecceaf1b94d *tests/testthat/test-pretty_names.R 800b314e662bdd11a1d48617540425d2 *tests/testthat/test-print_AER_labels.R 611b44a95786d2b6ec0915230b3e9a9e *tests/testthat/test-printing-stan.R b5a8d639dab5f63ba767865d73ef6447 *tests/testthat/test-printing.R efa800d7fc84d3481d64f299de544bd9 *tests/testthat/test-printing2.R 4245ef6260c46b60141dbe958474db74 *tests/testthat/test-printing_reference_level.R 5fe32565c46646baf2a6b46e02524bc8 *tests/testthat/test-quantreg.R 741c58c9c019c78264f945fc984cee65 *tests/testthat/test-random_effects_ci-glmmTMB.R 99da6bdcadf9bc3f6bea9f18fa83ae23 *tests/testthat/test-random_effects_ci.R 60d5916e69d6e075bb2308fe4a877f19 *tests/testthat/test-rank_deficienty.R bf43313d85a006900cfd0bf4496998a5 *tests/testthat/test-robust.R 60f04bd34dba7669a611e7250b8d2cd5 *tests/testthat/test-rstanarm.R a6f627d4f7ddbb697a22994137d89bff *tests/testthat/test-sampleSelection.R 6f67e57cd8c25d65d11785a645d53adb *tests/testthat/test-serp.R 6afceb2d59443a7678ebdae942130840 *tests/testthat/test-simulate_model.R e74028738355c993259e0717957a7bea *tests/testthat/test-simulate_parameters.R 73c25244107f5213cd167431c460b50f *tests/testthat/test-sort_parameters.R 5b34c7190db60a53dec966a8bc5b58b4 *tests/testthat/test-standardize_info.R 6c3a2210e5ae1acbd00899d5c0dd56a1 *tests/testthat/test-standardize_parameters.R b9f53f0b2eae2bfbf416b7a53a6590d5 *tests/testthat/test-survey.R 098947ff29d64f3e280aff8c6d0992d8 *tests/testthat/test-svylme.R 28f6c7f83e389923cd818854f49117e6 *tests/testthat/test-svyolr.R 1b2d2b012db8111eb2dbd4a1133949c0 *tests/testthat/test-tobit.R 47287754f5609d2933523c8b94f64c02 *tests/testthat/test-visualisation_recipe.R a0c01b1f0965715a0b387d4d458d6f61 *tests/testthat/test-weightit.R f62cfb3b00df8800ccb745a762e72c78 *tests/testthat/test-wrs2.R 53804133b2114966510ba30647a53e83 *tests/testthat/test-zeroinfl.R d9dd956924410f30851a0ed54d8bb41c *vignettes/overview_of_vignettes.Rmd parameters/R/0000755000176200001440000000000015111054715012616 5ustar liggesusersparameters/R/methods_coxme.R0000644000176200001440000001437715057525051015620 0ustar liggesusers#' @export model_parameters.coxme <- function(model, ci = 0.95, ci_method = NULL, ci_random = NULL, bootstrap = FALSE, iterations = 1000, standardize = NULL, effects = "all", group_level = FALSE, exponentiate = FALSE, p_adjust = NULL, vcov = NULL, vcov_args = NULL, wb_component = FALSE, include_info = getOption("parameters_mixed_info", FALSE), include_sigma = FALSE, keep = NULL, drop = NULL, verbose = TRUE, ...) { insight::check_if_installed("lme4") dots <- list(...) # set default if (is.null(ci_method)) { if (isTRUE(bootstrap)) { ci_method <- "quantile" } else { ci_method <- switch(insight::find_statistic(model), `t-statistic` = "residual", "wald" ) } } # p-values, CI and se might be based of wald, or KR ci_method <- tolower(ci_method) if (isTRUE(bootstrap)) { ci_method <- insight::validate_argument( ci_method, c("hdi", "quantile", "ci", "eti", "si", "bci", "bcai") ) } else { ci_method <- insight::validate_argument( ci_method, c( "wald", "normal", "residual", "ml1", "betwithin", "satterthwaite", "kenward", "kr", "boot", "profile", "uniroot" ) ) } # which component to return? effects <- insight::validate_argument( effects, c("fixed", "random", "grouplevel", "total", "random_total", "all") ) params <- NULL # group level estimates ================================================= # ======================================================================= # for coef(), we don't need all the attributes and just stop here if (effects %in% c("total", "random_total")) { params <- .group_level_total(model) params$Effects <- "total" class(params) <- c("parameters_coef", "see_parameters_coef", class(params)) return(params) } # group grouplevel estimates (BLUPs), handle alias if (effects == "grouplevel") { effects <- "random" group_level <- TRUE } # post hoc standardize only works for fixed effects... if (!is.null(standardize) && standardize != "refit") { if (!missing(effects) && effects != "fixed" && verbose) { insight::format_alert( "Standardizing coefficients only works for fixed effects of the mixed model." ) } effects <- "fixed" } # for refit, we completely refit the model, than extract parameters, # ci etc. as usual - therefor, we set "standardize" to NULL if (!is.null(standardize) && standardize == "refit") { model <- datawizard::standardize(model, verbose = FALSE) standardize <- NULL } # fixed effects ================================================= # =============================================================== if (effects %in% c("fixed", "all")) { # Processing if (bootstrap) { params <- bootstrap_parameters( model, iterations = iterations, ci = ci, ... ) if (effects != "fixed") { effects <- "fixed" if (verbose) { insight::format_alert("Bootstrapping only returns fixed effects of the mixed model.") } } } else { fun_args <- list( model, ci = ci, ci_method = ci_method, standardize = standardize, p_adjust = p_adjust, wb_component = wb_component, keep_parameters = keep, drop_parameters = drop, verbose = verbose, include_sigma = include_sigma, include_info = include_info, vcov = vcov, vcov_args = vcov_args ) fun_args <- c(fun_args, dots) params <- do.call(".extract_parameters_mixed", fun_args) } params$Effects <- "fixed" # exponentiate coefficients and SE/CI, if requested params <- .exponentiate_parameters(params, model, exponentiate) } att <- attributes(params) # add random effects, either group level or re variances # ====================================================== params <- .add_random_effects_lme4( model, params, ci, ci_method, ci_random, effects, group_level, verbose ) # clean-up # ====================================================== # remove empty column if (!is.null(params$Level) && all(is.na(params$Level))) { params$Level <- NULL } # due to rbind(), we lose attributes from "extract_parameters()", # so we add those attributes back here... if (!is.null(att)) { attributes(params) <- utils::modifyList(att, attributes(params)) } params <- .add_model_parameters_attributes( params, model, ci = ci, exponentiate, bootstrap, iterations, ci_method = ci_method, p_adjust = p_adjust, verbose = verbose, include_info = include_info, group_level = group_level, wb_component = wb_component, ... ) attr(params, "object_name") <- insight::safe_deparse_symbol(substitute(model)) class(params) <- c("parameters_model", "see_parameters_model", class(params)) params } #' @export standard_error.coxme <- function(model, ...) { beta_coef <- model$coefficients if (length(beta_coef) > 0) { .data_frame( Parameter = .remove_backticks_from_string(names(beta_coef)), SE = sqrt(diag(stats::vcov(model))) ) } } ## TODO add ci_method later? #' @export p_value.coxme <- function(model, ...) { stat <- insight::get_statistic(model) if (!is.null(stat)) { .data_frame( Parameter = stat$Parameter, p = as.vector(1 - stats::pchisq(stat$Statistic^2, df = 1)) ) } } parameters/R/methods_gmnl.R0000644000176200001440000000171214355245205015427 0ustar liggesusers#' @export standard_error.gmnl <- function(model, ...) { cs <- summary(model)$CoefTable se <- cs[, 2] pv <- .data_frame( Parameter = .remove_backticks_from_string(names(se)), SE = as.vector(se) ) # rename intercepts intercepts <- grepl(":(intercept)", pv$Parameter, fixed = TRUE) pv$Parameter[intercepts] <- sprintf( "(Intercept: %s)", sub(":(intercept)", replacement = "", pv$Parameter[intercepts], fixed = TRUE) ) pv } #' @export p_value.gmnl <- function(model, ...) { cs <- summary(model)$CoefTable p <- cs[, 4] # se <- cs[, 2] pv <- .data_frame( Parameter = .remove_backticks_from_string(rownames(cs)), p = as.vector(p) ) # rename intercepts intercepts <- grepl(":(intercept)", pv$Parameter, fixed = TRUE) pv$Parameter[intercepts] <- sprintf( "(Intercept: %s)", sub(":(intercept)", replacement = "", pv$Parameter[intercepts], fixed = TRUE) ) pv } parameters/R/format_p_adjust.R0000644000176200001440000001616615033425412016133 0ustar liggesusers#' Format the name of the p-value adjustment methods #' #' Format the name of the p-value adjustment methods. #' #' @param method Name of the method. #' #' @examples #' library(parameters) #' #' format_p_adjust("holm") #' format_p_adjust("bonferroni") #' @return A string with the full surname(s) of the author(s), including year of publication, for the adjustment-method. #' @export format_p_adjust <- function(method) { method <- tolower(method) switch(method, holm = "Holm (1979)", hochberg = "Hochberg (1988)", hommel = "Hommel (1988)", bonferroni = "Bonferroni", fdr = "Benjamini & Hochberg (1995)", bh = "Benjamini & Hochberg (1995)", by = "Benjamini & Yekutieli (2001)", tukey = "Tukey", scheffe = "Scheffe", sidak = "Sidak", `sup-t` = "Simultaneous confidence bands", method ) } # p-value adjustment ----- .p_adjust <- function(params, p_adjust, model = NULL, verbose = TRUE) { # check if we have any adjustment at all, and a p-column if (!is.null(p_adjust) && "p" %in% colnames(params) && p_adjust != "none") { ## TODO add "mvt" method from emmeans # prepare arguments all_methods <- c(stats::p.adjust.methods, "tukey", "scheffe", "sidak", "sup-t") # for interaction terms, e.g. for "by" argument in emmeans # pairwise comparison, we have to adjust the rank resp. the # number of estimates in a comparison family rank_adjust <- .p_adjust_rank(model, params) # only proceed if valid argument-value if (tolower(p_adjust) %in% tolower(all_methods)) { # save old values, to check if p-adjustment worked old_p_vals <- params$p # find statistic column stat_column <- match(c("F", "t", "Statistic"), colnames(params)) stat_column <- stat_column[!is.na(stat_column)] if (tolower(p_adjust) %in% tolower(stats::p.adjust.methods)) { # base R adjustments params$p <- stats::p.adjust(params$p, method = p_adjust) } else if (tolower(p_adjust) == "tukey") { # tukey adjustment result <- .p_adjust_tukey(params, stat_column, rank_adjust, verbose) params <- result$params verbose <- result$verbose } else if (tolower(p_adjust) == "scheffe" && !is.null(model)) { # scheffe adjustment params <- .p_adjust_scheffe(model, params, stat_column, rank_adjust) } else if (tolower(p_adjust) == "sidak") { # sidak adjustment params$p <- 1 - (1 - params$p)^(nrow(params) / rank_adjust) } else if (tolower(p_adjust) == "sup-t") { # sup-t adjustment params <- .p_adjust_supt(model, params) } if (isTRUE(all(old_p_vals == params$p)) && !identical(p_adjust, "none") && verbose) { insight::format_warning(paste0("Could not apply ", p_adjust, "-adjustment to p-values. Either something went wrong, or the non-adjusted p-values were already very large.")) # nolint } } else if (verbose) { insight::format_alert(paste0("`p_adjust` must be one of ", toString(all_methods))) } } params } # calculate rank adjustment ----- .p_adjust_rank <- function(model, params) { tryCatch( { correction <- 1 by_vars <- model@misc$by.vars if (!is.null(by_vars) && by_vars %in% colnames(params)) { correction <- insight::n_unique(params[[by_vars]]) } correction }, error = function(e) { 1 } ) } # tukey adjustment ----- .p_adjust_tukey <- function(params, stat_column, rank_adjust = 1, verbose = TRUE) { df_column <- colnames(params)[stats::na.omit(match(c("df", "df_error"), colnames(params)))][1] if (!is.na(df_column) && length(stat_column)) { params$p <- suppressWarnings(stats::ptukey( sqrt(2) * abs(params[[stat_column]]), nmeans = nrow(params) / rank_adjust, df = params[[df_column]], lower.tail = FALSE )) # for specific contrasts, ptukey might fail, and the tukey-adjustement # could just be simple p-value calculation if (all(is.na(params$p))) { params$p <- 2 * stats::pt( abs(params[[stat_column]]), df = params[[df_column]], lower.tail = FALSE ) verbose <- FALSE } } list(params = params, verbose = verbose) } # scheffe adjustment ----- .p_adjust_scheffe <- function(model, params, stat_column, rank_adjust = 1) { df_column <- colnames(params)[stats::na.omit(match(c("df", "df_error"), colnames(params)))][1] if (!is.na(df_column) && length(stat_column)) { # 1st try scheffe_ranks <- try(qr(model@linfct)$rank, silent = TRUE) # 2nd try if (inherits(scheffe_ranks, "try-error") || is.null(scheffe_ranks)) { scheffe_ranks <- try(model$qr$rank, silent = TRUE) } if (inherits(scheffe_ranks, "try-error") || is.null(scheffe_ranks)) { scheffe_ranks <- nrow(params) } scheffe_ranks <- scheffe_ranks / rank_adjust params$p <- stats::pf(params[[stat_column]]^2 / scheffe_ranks, df1 = scheffe_ranks, df2 = params[[df_column]], lower.tail = FALSE ) } params } # sup-t adjustment ----- .p_adjust_supt <- function(model, params) { if ("Component" %in% colnames(params) && insight::n_unique(params$Component) > 1) { # if we have multiple components, we adjust each component separately for (component in unique(params$Component)) { if (!is.na(component)) { params[which(params$Component == component), ] <- .supt_adjust( params[which(params$Component == component), ], model, component = component ) } } params } else { .supt_adjust(params, model) } } .supt_adjust <- function(params, model, component = NULL) { insight::check_if_installed("mvtnorm") # get correlation matrix, based on the covariance matrix vc <- .safe(stats::cov2cor(insight::get_varcov(model, component = component))) if (is.null(vc)) { insight::format_warning("Could not calculate covariance matrix for `sup-t` adjustment.") return(params) } # get confidence interval level, or set default ci_level <- .safe(params$CI[1]) if (is.null(ci_level)) { ci_level <- 0.95 } # find degrees of freedom column, if available df_column <- intersect(c("df", "df_error"), colnames(params))[1] if (is.na(df_column)) { return(params) } # calculate updated confidence interval level, based on simultaenous # confidence intervals (https://onlinelibrary.wiley.com/doi/10.1002/jae.2656) crit <- mvtnorm::qmvt(ci_level, df = params[[df_column]][1], tail = "both.tails", corr = vc)$quantile # update confidence intervals params$CI_low <- params$Coefficient - crit * params$SE params$CI_high <- params$Coefficient + crit * params$SE # udpate p-values for (i in 1:nrow(params)) { params$p[i] <- 1 - mvtnorm::pmvt( lower = rep(-abs(stats::qt(params$p[i] / 2, df = params[[df_column]][i])), nrow(vc)), upper = rep(abs(stats::qt(params$p[i] / 2, df = params[[df_column]][i])), nrow(vc)), corr = vc, df = params[[df_column]][i] ) } params } parameters/R/dof_satterthwaite.R0000644000176200001440000000020415073732442016464 0ustar liggesusers#' @rdname p_value_satterthwaite #' @export dof_satterthwaite <- function(model) { insight::get_df(model, "satterthwaite") } parameters/R/format.R0000644000176200001440000007460315053035103014236 0ustar liggesusers# usual models --------------------------------- #' @inheritParams print.parameters_model #' @rdname print.parameters_model #' @export format.parameters_model <- function(x, pretty_names = TRUE, split_components = TRUE, select = NULL, digits = 2, ci_digits = digits, p_digits = 3, ci_width = NULL, ci_brackets = NULL, zap_small = FALSE, format = NULL, groups = NULL, include_reference = FALSE, ...) { # save attributes coef_name <- attributes(x)$coefficient_name coef_name2 <- attributes(x)$coefficient_name2 s_value <- attributes(x)$s_value m_class <- attributes(x)$model_class htest_type <- attributes(x)$htest_type mixed_model <- attributes(x)$mixed_model random_variances <- isTRUE(attributes(x)$ran_pars) dist_params <- isTRUE(attributes(x)$dpars) mean_group_values <- attributes(x)$mean_group_values # process selection of columns style <- NULL if (!is.null(select) && # glue-like syntax, so we switch to "style" argument here length(select) == 1 && is.character(select) && (grepl("{", select, fixed = TRUE) || select %in% .style_shortcuts)) { style <- select select <- NULL } # is information about grouped parameters stored as attribute? if (is.null(groups) && !is.null(attributes(x)$coef_groups)) { groups <- attributes(x)$coef_groups } # rename random effect parameters names for stan models if (isTRUE(random_variances) && any(c("brmsfit", "stanreg", "stanmvreg") %in% m_class)) { x <- .format_stan_parameters(x, dist_params) } # for the current HTML backend we use (package "gt"), we cannot change # the column header for subtables, so we need to remove the attributes # for the "Coefficient" column here, which else allows us to use different # column labels for subtables by model components if (identical(format, "html")) { coef_name <- NULL coef_name2 <- NULL attr(x, "coefficient_name") <- NULL attr(x, "coefficient_name2") <- NULL attr(x, "zi_coefficient_name") <- NULL } # remove method columns for htest and friends - this should be printed as footer if (!is.null(m_class) && any(m_class %in% c( "BFBayesFactor", "htest", "rma", "t1way", "yuen", "PMCMR", "osrt", "trendPMCMR", "anova", "afex_aov" ))) { x$Method <- NULL x$Alternative <- NULL } # remove response for mvord if (!is.null(m_class) && any(m_class == "mvord")) { x$Response <- NULL } # remove component for nestedLogit if (!is.null(m_class) && any(m_class == "nestedLogit")) { x$Component <- NULL if (insight::has_single_value(x$Response, remove_na = TRUE)) { x$Response <- NULL } } # remove type for comparisons() if (!is.null(m_class) && any(m_class == "comparisons")) { x$Type <- NULL } # rename columns for t-tests if (!is.null(htest_type) && htest_type == "ttest" && !is.null(mean_group_values) && all(c("Mean_Group1", "Mean_Group2") %in% colnames(x))) { colnames(x)[which(colnames(x) == "Mean_Group1")] <- paste0(x$Group, " = ", mean_group_values[1]) colnames(x)[which(colnames(x) == "Mean_Group2")] <- paste0(x$Group, " = ", mean_group_values[2]) } # for htests, remove "$" from variable name, since this can make troubles # when rendering into different output formats if (!is.null(htest_type)) { if ("Parameter" %in% colnames(x) && grepl("$", x$Parameter, fixed = TRUE)) { x$Parameter <- gsub("(.*)\\$(.*)", "\\2", x$Parameter) } if ("Group" %in% colnames(x) && grepl("$", x$Group, fixed = TRUE)) { x$Group <- gsub("(.*)\\$(.*)", "\\2", x$Group) } } # check if we have mixed models with random variance parameters # in such cases, we don't need the group-column, but we rather # merge it with the parameter column if (isTRUE(random_variances)) { x <- .format_ranef_parameters(x) } # prepare output, to have in shape for printing. this function removes # empty columns, or selects only those columns that should be printed x <- .prepare_x_for_print(x, select, coef_name, s_value) # check whether to split table by certain factors/columns (like component, response...) split_by <- .prepare_splitby_for_print(x) # format everything now... if (split_components && !is.null(split_by) && length(split_by)) { # this function mainly sets the appropriate column names for each # "sub table" (i.e. we print a table for each model component, like count, # zero-inflation, smooth, random, ...) and formats some parameter labels. # moreover, insight::format_table() is called to do the final formatting # and .format_model_component_header() is called to set captions for each # "sub table". formatted_table <- .format_columns_multiple_components( x, pretty_names, split_column = split_by, digits = digits, ci_digits = ci_digits, p_digits = p_digits, coef_column = coef_name, format = format, ci_width = ci_width, ci_brackets = ci_brackets, zap_small = zap_small, include_reference = include_reference, style = style, ... ) } else { # for tables that don't have multiple components, formatting is rather # easy, since we don't need to split the data frame into "sub tables" formatted_table <- .format_columns_single_component( x, pretty_names = pretty_names, digits = digits, ci_width = ci_width, ci_brackets = ci_brackets, ci_digits = ci_digits, p_digits = p_digits, format = format, coef_name = coef_name, zap_small = zap_small, include_reference = include_reference, style = style, ... ) } # remove unique columns if (insight::has_single_value(formatted_table$Component, remove_na = TRUE)) formatted_table$Component <- NULL if (insight::has_single_value(formatted_table$Effects, remove_na = TRUE)) formatted_table$Effects <- NULL if (insight::has_single_value(formatted_table$Group, remove_na = TRUE) && isTRUE(mixed_model)) formatted_table$Group <- NULL # no column with CI-level in output if (!is.null(formatted_table$CI) && insight::has_single_value(formatted_table$CI, remove_na = TRUE)) { formatted_table$CI <- NULL } # information about indention / row groups attr(formatted_table, "indent_rows") <- groups # vertical layout possible, if these have just one row if (identical(list(...)$layout, "vertical")) { if ("Parameter" %in% colnames(formatted_table)) { new_colnames <- c("", formatted_table$Parameter) formatted_table$Parameter <- NULL } else { new_colnames <- c("Type", paste0("Value ", seq_len(nrow(formatted_table)))) } formatted_table <- datawizard::rownames_as_column(as.data.frame(t(formatted_table)), "Type") colnames(formatted_table) <- new_colnames } formatted_table } #' @export format.parameters_simulate <- format.parameters_model #' @export format.parameters_brms_meta <- format.parameters_model #' @export format.parameters_coef <- function(x, format = NULL, ...) { insight::format_table(x, format = format, ...) } # Compare parameters ---------------------- #' @rdname print.compare_parameters #' @inheritParams print.parameters_model #' @export format.compare_parameters <- function(x, split_components = TRUE, select = NULL, digits = 2, ci_digits = digits, p_digits = 3, ci_width = NULL, ci_brackets = NULL, zap_small = FALSE, format = NULL, groups = NULL, ...) { m_class <- attributes(x)$model_class x$Method <- NULL # remove response for mvord if (!is.null(m_class) && any(m_class == "mvord")) { x$Response <- NULL } out <- data.frame( Parameter = x$Parameter, Effects = x$Effects, Component = x$Component, stringsAsFactors = FALSE ) # remove zi-suffix if we split components anyway if (isTRUE(split_components)) { out$Parameter <- insight::trim_ws(gsub(" (zi)", "", out$Parameter, fixed = TRUE)) out$Effects <- NULL } # save model names models <- attributes(x)$model_names # save model parameters attributes parameters_attributes <- attributes(x)$all_attributes # is information about grouped parameters stored as attribute? if (is.null(groups) && !is.null(parameters_attributes[[1]]$coef_groups)) { groups <- parameters_attributes[[1]]$coef_groups } # locate random effects rows ran_pars <- which(x$Effects == "random") # find all random effect groups if (is.null(x$Group)) { ran_groups <- NULL ran_group_rows <- NULL } else { ran_groups <- unique(insight::compact_character(x$Group)) ran_group_rows <- which(nzchar(x$Group, keepNA = TRUE)) } for (i in models) { # each column is suffixed with ".model_name", so we extract # columns for each model separately here pattern <- paste0("\\.\\Q", i, "\\E$") cols <- x[grepl(pattern, colnames(x))] # since we now have the columns for a single model, we clean the # column names (i.e. remove suffix), so we can use "format_table" function colnames(cols) <- gsub(pattern, "", colnames(cols)) # find coefficient column, check which rows have non-NA values # since we merged all models together, and we only have model-specific # columns for estimates, CI etc. but not for Effects and Component, we # extract "valid" rows via non-NA values in the coefficient column coef_column <- which(colnames(cols) %in% c(.all_coefficient_types, "Coefficient")) valid_rows <- which(!is.na(cols[[coef_column]])) # check if we have mixed models with random variance parameters # in such cases, we don't need the group-column, but we rather # merge it with the parameter column ran_pars_rows <- NULL if (length(ran_pars) && length(ran_group_rows) && any(ran_group_rows %in% valid_rows)) { # ran_pars has row indices for *all* models in this function - # make sure we have only valid rows for this particular model ran_pars_rows <- intersect(valid_rows, intersect(ran_pars, ran_group_rows)) } if (!is.null(ran_pars_rows) && length(ran_pars_rows)) { # find SD random parameters stddevs <- startsWith(out$Parameter[ran_pars_rows], "SD (") # check if we already fixed that name in a previous loop fixed_name <- unlist(lapply( ran_groups, grep, x = out$Parameter[ran_pars_rows[stddevs]], fixed = TRUE )) if (length(fixed_name)) { stddevs[fixed_name] <- FALSE } # collapse parameter name with RE grouping factor if (length(stddevs)) { out$Parameter[ran_pars_rows[stddevs]] <- paste0( gsub("(.*)\\)", "\\1", out$Parameter[ran_pars_rows[stddevs]]), ": ", x$Group[ran_pars_rows[stddevs]], ")" ) } # same for correlations corrs <- startsWith(out$Parameter[ran_pars_rows], "Cor (") # check if we already fixed that name in a previous loop fixed_name <- unlist(lapply( ran_groups, grep, x = out$Parameter[ran_pars_rows[corrs]], fixed = TRUE )) if (length(fixed_name)) { corrs[fixed_name] <- FALSE } # collapse parameter name with RE grouping factor if (length(corrs)) { out$Parameter[ran_pars_rows[corrs]] <- paste0( gsub("(.*)\\)", "\\1", out$Parameter[ran_pars_rows[corrs]]), ": ", x$Group[ran_pars_rows[corrs]], ")" ) } out$Parameter[out$Parameter == "SD (Observations: Residual)"] <- "SD (Residual)" } attributes(cols)$coef_name <- colnames(cols)[coef_column] # save p-stars in extra column cols <- insight::format_table( cols, digits = digits, ci_width = ci_width, ci_brackets = ci_brackets, ci_digits = ci_digits, p_digits = p_digits, zap_small = zap_small, select = select, ... ) # add modelname to column names; for single column layout per model, we just # need the column name. If the layout contains more than one column per model, # add modelname in parenthesis. if (ncol(cols) > 1) { colnames(cols) <- paste0(colnames(cols), " (", i, ")") } else { colnames(cols) <- i } out <- cbind(out, cols) } # remove group column out$Group <- NULL x$Group <- NULL # sort by effects and component if (isFALSE(split_components)) { out <- datawizard::data_arrange(out, c("Effects", "Component")) } # check whether to split table by certain factors/columns (like component, response...) split_by <- split_column <- .prepare_splitby_for_print(x) if (length(split_by) > 0L && isTRUE(split_components)) { # set up split-factor if (length(split_column) > 1L) { split_by <- lapply(split_column, function(i) x[[i]]) } else { split_by <- list(x[[split_column]]) } names(split_by) <- split_column # make sure we have correct sorting here... formatted_table <- split(out, f = split_by) formatted_table <- lapply(names(formatted_table), function(tab) { i <- formatted_table[[tab]] # check if data frame is empty - this may happen if not all combinations # of split_by factors are present in the data (e.g., zero-inflated mixed # models, that have random effects for the count, but not for the zero- # inflation component) if (nrow(i) == 0L) { return(NULL) } # remove unique columns if (insight::has_single_value(i$Component, remove_na = TRUE)) i$Component <- NULL if (insight::has_single_value(i$Effects, remove_na = TRUE)) i$Effects <- NULL # format table captions for sub tables table_caption <- .format_model_component_header( x, type = tab, split_column = tab, is_zero_inflated = FALSE, is_ordinal_model = FALSE, is_multivariate = FALSE, ran_pars = FALSE, formatted_table = i ) # add as attribute, so table captions are printed if (identical(format, "html")) { i$Component <- table_caption$name } else if (identical(format, "md") || identical(format, "markdown")) { attr(i, "table_caption") <- table_caption$name } else { attr(i, "table_caption") <- c(paste("#", table_caption$name), "blue") } i }) # remove empty tables formatted_table <- insight::compact_list(formatted_table) # for HTML, bind data frames if (identical(format, "html")) { # fix non-equal length of columns and bind data frames formatted_table <- do.call(rbind, .fix_nonmatching_columns(formatted_table)) } } else { formatted_table <- out # remove unique columns if (insight::has_single_value(formatted_table$Component, remove_na = TRUE)) formatted_table$Component <- NULL if (insight::has_single_value(formatted_table$Effects, remove_na = TRUE)) formatted_table$Effects <- NULL # add line with info about observations formatted_table <- .add_obs_row(formatted_table, parameters_attributes, style = select) } # information about indention / row groups attr(formatted_table, "indent_rows") <- groups formatted_table } # sem-models --------------------------------- #' @export format.parameters_sem <- function(x, digits = 2, ci_digits = digits, p_digits = 3, format = NULL, ci_width = NULL, ci_brackets = TRUE, pretty_names = TRUE, ...) { if (missing(digits)) { digits <- .additional_arguments(x, "digits", 2) } if (missing(ci_digits)) { ci_digits <- .additional_arguments(x, "ci_digits", digits) } if (missing(p_digits)) { p_digits <- .additional_arguments(x, "p_digits", 3) } .format_columns_multiple_components( x, pretty_names = TRUE, split_column = "Component", digits = digits, ci_digits = ci_digits, p_digits = p_digits, format = format, ci_width = ci_width, ci_brackets = ci_brackets, ... ) } # footer functions ------------------ .format_footer <- function(x, digits = 3, verbose = TRUE, show_sigma = FALSE, show_formula = FALSE, show_r2 = FALSE, show_rmse = FALSE, format = "text") { # prepare footer footer <- NULL type <- tolower(format) sigma_value <- attributes(x)$sigma r2 <- attributes(x)$r2 rmse <- attributes(x)$rmse residual_df <- attributes(x)$residual_df p_adjust <- attributes(x)$p_adjust model_formula <- attributes(x)$model_formula anova_test <- attributes(x)$anova_test anova_type <- attributes(x)$anova_type prediction_type <- attributes(x)$prediction_type footer_text <- attributes(x)$footer_text text_alternative <- attributes(x)$text_alternative n_obs <- attributes(x)$n_obs # footer: model formula if (isTRUE(show_formula)) { footer <- .add_footer_formula(footer, model_formula, n_obs, type) } # footer: residual standard deviation if (isTRUE(show_sigma)) { footer <- .add_footer_sigma(footer, digits, sigma_value, residual_df, type) } # footer: r-squared if (isTRUE(show_rmse)) { footer <- .add_footer_values(footer, digits, value = rmse, text = "RMSE ", type) } # footer: r-squared if (isTRUE(show_r2)) { footer <- .add_footer_r2(footer, digits, r2, type) } # footer: p-adjustment if ("p" %in% colnames(x) && isTRUE(verbose) && !is.null(p_adjust) && p_adjust != "none") { footer <- .add_footer_text(footer, text = paste("p-value adjustment method:", format_p_adjust(p_adjust))) } # footer: anova test if (!is.null(anova_test)) { footer <- .add_footer_text(footer, text = sprintf("%s test statistic", anova_test)) } # footer: anova type if (!is.null(anova_type)) { footer <- .add_footer_text(footer, text = sprintf("Anova Table (Type %s tests)", anova_type)) } # footer: marginaleffects::comparisons() if (!is.null(prediction_type)) { footer <- .add_footer_text(footer, text = sprintf("Prediction type: %s", prediction_type)) } # footer: htest alternative if (!is.null(text_alternative)) { footer <- .add_footer_text(footer, text = text_alternative) } # footer: generic text if (!is.null(footer_text)) { footer <- .add_footer_text(footer, footer_text, type) } # if we have two trailing newlines, remove one if (identical(type, "text") && !is.null(footer) && endsWith(footer[1], "\n\n")) { footer[1] <- substr(footer[1], 0, nchar(x) - 1) } footer } # footer: generic text .add_footer_text <- function(footer = NULL, text = NULL, type = "text") { if (!is.null(text) && length(text)) { if (type == "text" || type == "markdown") { if (is.null(footer)) { fill <- "\n" } else { fill <- "" } footer <- paste0(footer, sprintf("%s%s\n", fill, text)) } else if (type == "html") { footer <- c(footer, gsub("\n", "", text, fixed = TRUE)) } } footer } # footer: generic values .add_footer_values <- function(footer = NULL, digits = 3, value = NULL, text = NULL, type = "text") { if (!is.null(value) && !is.null(text)) { string <- sprintf("%s: %s", text, insight::format_value(value, digits = digits)) if (type == "text" || type == "markdown") { if (is.null(footer)) { fill <- "\n" } else { fill <- "" } footer <- paste0(footer, fill, string, "\n") } else if (type == "html") { footer <- c(footer, string) } } footer } # footer: residual standard deviation .add_footer_sigma <- function(footer = NULL, digits = 3, sigma = NULL, residual_df = NULL, type = "text") { if (!is.null(sigma)) { # format residual df if (is.null(residual_df)) { res_df <- "" } else { res_df <- paste0(" (df = ", residual_df, ")") } if (type == "text" || type == "markdown") { if (is.null(footer)) { fill <- "\n" } else { fill <- "" } footer <- paste0(footer, sprintf("%sSigma: %.*f%s\n", fill, digits, sigma, res_df)) } else if (type == "html") { footer <- c(footer, insight::trim_ws(sprintf("Sigma: %.*f%s", digits, sigma, res_df))) } } footer } # footer: r-squared .add_footer_r2 <- function(footer = NULL, digits = 3, r2 = NULL, type = "text") { if (!is.null(r2)) { rsq <- .safe(paste(unlist(lapply(r2, function(i) { paste0(attributes(i)$names, ": ", insight::format_value(i, digits = digits)) })), collapse = "; ")) if (!is.null(rsq)) { if (type == "text" || type == "markdown") { if (is.null(footer)) { fill <- "\n" } else { fill <- "" } footer <- paste0(footer, fill, rsq, "\n") } else if (type == "html") { footer <- c(footer, rsq) } } } footer } # footer: model formula .add_footer_formula <- function(footer = NULL, model_formula = NULL, n_obs = NULL, type = "text") { if (!is.null(model_formula)) { # format n of observations if (is.null(n_obs)) { n <- "" } else { n <- paste0(" (", n_obs, " Observations)") } if (type == "text" || type == "markdown") { if (is.null(footer)) { fill <- "\n" } else { fill <- "" } footer <- paste0(footer, fill, "Model: ", model_formula, n, "\n") } else if (type == "html") { footer <- c(footer, insight::trim_ws(paste0("Model: ", model_formula, n))) } } footer } # footer: type of uncertainty interval .print_footer_cimethod <- function(x) { if (isTRUE(getOption("parameters_cimethod", TRUE))) { # get attributes ci_method <- .additional_arguments(x, "ci_method", NULL) test_statistic <- .additional_arguments(x, "test_statistic", NULL) bootstrap <- .additional_arguments(x, "bootstrap", FALSE) is_bayesian <- .additional_arguments(x, "is_bayesian", FALSE) simulated <- .additional_arguments(x, "simulated", FALSE) residual_df <- .additional_arguments(x, "residual_df", NULL) random_variances <- .additional_arguments(x, "ran_pars", FALSE) model_class <- .additional_arguments(x, "model_class", NULL) # prepare strings if (!is.null(ci_method)) { # only random effects? no message for fixed effects ci-approximation if (!is.null(x$Effects) && all(x$Effects == "random")) { msg <- "\n" string_method <- "" # here we have fixed effects only, or fixed and random effects } else { # since `.format_ci_method_name()` changes the CI method names to have a # mix of cases, standardize them by converting to lower case ci_method <- tolower(ci_method) # in case of glm's that have df.residual(), and where residual df where requested is_test_statistic_t <- ci_method == "residual" && test_statistic == "z-statistic" && !is.null(residual_df) && !is.infinite(residual_df) && !is.na(residual_df) if (is_test_statistic_t) { test_statistic <- "t-statistic" } string_tailed <- switch(ci_method, hdi = "highest-density", uniroot = , profile = "profile-likelihood", "equal-tailed" ) # sampling method if (isTRUE(bootstrap)) { sampling_method <- ifelse(isTRUE(.unicode_symbols()), "na\u0131ve bootstrap", "naive bootstrap") } else if (isTRUE(simulated)) { sampling_method <- "simulated multivariate normal" } else { sampling_method <- "MCMC" } string_method <- switch(ci_method, bci = , bcai = "bias-corrected accelerated bootstrap", si = , ci = , quantile = , eti = , hdi = sampling_method, normal = "Wald normal", boot = "parametric bootstrap", "Wald" ) if (toupper(ci_method) %in% c("KENWARD", "KR", "KENWARD-ROGER", "KENWARD-ROGERS", "SATTERTHWAITE")) { string_approx <- paste0("with ", format_df_adjust(ci_method, approx_string = "", dof_string = ""), " ") } else { string_approx <- "" } if (!is.null(test_statistic) && ci_method != "normal" && !isTRUE(bootstrap)) { string_statistic <- switch(tolower(test_statistic), `t-statistic` = "t", `chi-squared statistic` = , `z-statistic` = "z", "" ) string_method <- paste0(string_method, " ", string_statistic, "-") } else { string_method <- paste0(string_method, " ") } # bootstrapped intervals if (isTRUE(bootstrap)) { msg <- paste0("\nUncertainty intervals (", string_tailed, ") are ", string_method, "intervals.") } else if (isTRUE(is_bayesian)) { msg <- paste0("\nUncertainty intervals (", string_tailed, ") computed using a ", string_method, "distribution ", string_approx, "approximation.") # nolint } else { msg <- paste0("\nUncertainty intervals (", string_tailed, ") and p-values (two-tailed) computed using a ", string_method, "distribution ", string_approx, "approximation.") # nolint } } # do we have random effect variances from lme4/glmmTMB? # must be glmmTMB show_re_msg <- (identical(model_class, "glmmTMB") && # and not Wald-/normalCIs (!string_method %in% c("Wald z-", "Wald normal") || !ci_method %in% c("wald", "normal"))) || # OR must be merMod ((identical(model_class, "lmerMod") || identical(model_class, "glmerMod")) && # and not Wald CIs !ci_method %in% c("wald", "normal", "profile", "boot")) if (show_re_msg && isTRUE(random_variances) && !is.null(x$Effects) && "random" %in% x$Effects) { msg <- paste(msg, "Uncertainty intervals for random effect variances computed using a Wald z-distribution approximation.") # nolint } insight::format_alert(insight::color_text(msg, "yellow")) } } } .print_footer_exp <- function(x) { # we need this to check whether we have extremely large cofficients if (isTRUE(getOption("parameters_exponentiate", TRUE))) { msg <- NULL # try to find out the name of the coefficient column coef_column <- intersect(colnames(x), .all_coefficient_names) if (length(coef_column) && "Parameter" %in% colnames(x)) { spurious_coefficients <- abs(x[[coef_column[1]]][!.in_intercepts(x$Parameter)]) } else { spurious_coefficients <- NULL } exponentiate <- .additional_arguments(x, "exponentiate", FALSE) if (!.is_valid_exponentiate_argument(exponentiate)) { if (isTRUE(.additional_arguments(x, "log_link", FALSE))) { msg <- "The model has a log- or logit-link. Consider using `exponentiate = TRUE` to interpret coefficients as ratios." # nolint # we only check for exp(coef), so exp() here since coefficients are on logit-scale if (!is.null(spurious_coefficients)) { spurious_coefficients <- exp(spurious_coefficients) } } else if (isTRUE(.additional_arguments(x, "log_response", FALSE))) { msg <- "The model has a log-transformed response variable. Consider using `exponentiate = TRUE` to interpret coefficients as ratios." # nolint # don't show warning about complete separation spurious_coefficients <- NULL } } else if (.is_valid_exponentiate_argument(exponentiate) && isTRUE(.additional_arguments(x, "log_response", FALSE))) { # nolint # don't show warning about complete separation spurious_coefficients <- NULL } # following check only for models with logit-link logit_model <- isTRUE(.additional_arguments(x, "logit_link", FALSE)) || isTRUE(attributes(x)$coefficient_name %in% c("Log-Odds", "Odds Ratio")) # remove NA and infinite values from spurios coefficients if (!is.null(spurious_coefficients)) { spurious_coefficients <- spurious_coefficients[!is.na(spurious_coefficients) & !is.infinite(spurious_coefficients)] # nolint } # check for complete separation coefficients or possible issues with # too few data points if (!is.null(spurious_coefficients) && length(spurious_coefficients) && logit_model) { if (any(spurious_coefficients > 50)) { msg <- c(msg, "Some coefficients are very large, which may indicate issues with complete separation.") # nolint } else if (any(spurious_coefficients > 15)) { msg <- c(msg, "Some coefficients seem to be rather large, which may indicate issues with (quasi) complete separation. Consider using bias-corrected or penalized regression models.") # nolint } } if (!is.null(msg) && isTRUE(getOption("parameters_warning_exponentiate", TRUE))) { insight::format_alert(paste0("\n", msg)) # set flag, so message only displayed once per session options(parameters_warning_exponentiate = FALSE) } } } parameters/R/methods_cplm.R0000644000176200001440000002452614761570351015441 0ustar liggesusers# classes: .cpglm, .bcpglm, .zcpglm, .cpglmm ########## .zcpglm --------------- #' @title Parameters from Zero-Inflated Models #' @name model_parameters.zcpglm #' #' @description #' Parameters from zero-inflated models (from packages like **pscl**, #' **cplm** or **countreg**). #' #' @param model A model with zero-inflation component. #' @inheritParams model_parameters.default #' @inheritParams simulate_model #' #' @seealso [insight::standardize_names()] to rename #' columns into a consistent, standardized naming scheme. #' #' @section Model components: #' Possible values for the `component` argument depend on the model class. #' Following are valid options: #' - `"all"`: returns all model components, applies to all models, but will only #' have an effect for models with more than just the conditional model component. #' - `"conditional"`: only returns the conditional component, i.e. "fixed effects" #' terms from the model. Will only have an effect for models with more than #' just the conditional model component. #' - `"smooth_terms"`: returns smooth terms, only applies to GAMs (or similar #' models that may contain smooth terms). #' - `"zero_inflated"` (or `"zi"`): returns the zero-inflation component. #' - `"dispersion"`: returns the dispersion model component. This is common #' for models with zero-inflation or that can model the dispersion parameter. #' - `"instruments"`: for instrumental-variable or some fixed effects regression, #' returns the instruments. #' - `"nonlinear"`: for non-linear models (like models of class `nlmerMod` or #' `nls`), returns staring estimates for the nonlinear parameters. #' - `"correlation"`: for models with correlation-component, like `gls`, the #' variables used to describe the correlation structure are returned. #' #' **Special models** #' #' Some model classes also allow rather uncommon options. These are: #' - **mhurdle**: `"infrequent_purchase"`, `"ip"`, and `"auxiliary"` #' - **BGGM**: `"correlation"` and `"intercept"` #' - **BFBayesFactor**, **glmx**: `"extra"` #' - **averaging**:`"conditional"` and `"full"` #' - **mjoint**: `"survival"` #' - **mfx**: `"precision"`, `"marginal"` #' - **betareg**, **DirichletRegModel**: `"precision"` #' - **mvord**: `"thresholds"` and `"correlation"` #' - **clm2**: `"scale"` #' - **selection**: `"selection"`, `"outcome"`, and `"auxiliary"` #' - **lavaan**: One or more of `"regression"`, `"correlation"`, `"loading"`, #' `"variance"`, `"defined"`, or `"mean"`. Can also be `"all"` to include #' all components. #' #' For models of class `brmsfit` (package **brms**), even more options are #' possible for the `component` argument, which are not all documented in detail #' here. #' #' @examplesIf require("pscl") #' data("bioChemists", package = "pscl") #' model <- pscl::zeroinfl( #' art ~ fem + mar + kid5 + ment | kid5 + phd, #' data = bioChemists #' ) #' model_parameters(model) #' @return A data frame of indices related to the model's parameters. #' @export model_parameters.zcpglm <- function(model, ci = 0.95, bootstrap = FALSE, iterations = 1000, component = "all", standardize = NULL, exponentiate = FALSE, p_adjust = NULL, include_info = getOption("parameters_info", FALSE), keep = NULL, drop = NULL, verbose = TRUE, ...) { component <- insight::validate_argument(component, c("all", "conditional", "zi", "zero_inflated")) # fix argument, if model has no zi-part if (!insight::model_info(model, verbose = FALSE)$is_zero_inflated && component != "conditional") { component <- "conditional" } # Processing if (bootstrap) { params <- bootstrap_parameters(model, iterations = iterations, ci = ci, ...) } else { params <- .extract_parameters_generic( model, ci = ci, component = component, standardize = standardize, p_adjust = p_adjust, keep_parameters = keep, drop_parameters = drop, verbose = verbose, ... ) } # exponentiate coefficients and SE/CI, if requested params <- .exponentiate_parameters(params, model, exponentiate) params <- .add_model_parameters_attributes( params, model, ci, exponentiate, p_adjust = p_adjust, include_info = include_info, verbose = verbose, ... ) attr(params, "object_name") <- insight::safe_deparse_symbol(substitute(model)) class(params) <- c("parameters_model", "see_parameters_model", class(params)) params } #' @export standard_error.zcpglm <- function(model, component = "all", ...) { insight::check_if_installed("cplm") component <- insight::validate_argument( component, c("all", "conditional", "zi", "zero_inflated") ) junk <- utils::capture.output(stats <- cplm::summary(model)$coefficients) # nolint params <- insight::get_parameters(model) tweedie <- .data_frame( Parameter = params$Parameter[params$Component == "conditional"], SE = as.vector(stats$tweedie[, "Std. Error"]), Component = "conditional" ) zero <- .data_frame( Parameter = params$Parameter[params$Component == "zero_inflated"], SE = as.vector(stats$zero[, "Std. Error"]), Component = "zero_inflated" ) out <- .filter_component(rbind(tweedie, zero), component) out } #' @export p_value.zcpglm <- function(model, component = "all", ...) { insight::check_if_installed("cplm") component <- insight::validate_argument( component, c("all", "conditional", "zi", "zero_inflated") ) junk <- utils::capture.output(stats <- cplm::summary(model)$coefficients) # nolint params <- insight::get_parameters(model) tweedie <- .data_frame( Parameter = params$Parameter[params$Component == "conditional"], p = as.vector(stats$tweedie[, "Pr(>|z|)"]), Component = "conditional" ) zero <- .data_frame( Parameter = params$Parameter[params$Component == "zero_inflated"], p = as.vector(stats$zero[, "Pr(>|z|)"]), Component = "zero_inflated" ) out <- .filter_component(rbind(tweedie, zero), component) out } ########## .bcpglm --------------- #' @export model_parameters.bcplm <- model_parameters.bayesQR #' @export p_value.bcplm <- p_value.brmsfit ########## .cpglm --------------- #' @export p_value.cpglm <- function(model, ...) { insight::check_if_installed("cplm") junk <- utils::capture.output(stats <- cplm::summary(model)$coefficients) # nolint params <- insight::get_parameters(model) .data_frame( Parameter = params$Parameter, p = as.vector(stats[, "Pr(>|t|)"]) ) } #' @export standard_error.cpglm <- function(model, ...) { insight::check_if_installed("cplm") junk <- utils::capture.output(stats <- cplm::summary(model)$coefficients) # nolint params <- insight::get_parameters(model) .data_frame( Parameter = params$Parameter, SE = as.vector(stats[, "Std. Error"]) ) } ########## .cpglmm --------------- #' @export model_parameters.cpglmm <- function(model, ci = 0.95, ci_method = NULL, ci_random = NULL, bootstrap = FALSE, iterations = 1000, standardize = NULL, effects = "all", group_level = FALSE, exponentiate = FALSE, p_adjust = NULL, include_sigma = FALSE, keep = NULL, drop = NULL, verbose = TRUE, ...) { # p-values, CI and se might be based on different df-methods ci_method <- .check_df_method(ci_method) effects <- insight::validate_argument(effects, c("fixed", "random", "all")) # standardize only works for fixed effects... if (!is.null(standardize) && standardize != "refit") { if (!missing(effects) && effects != "fixed" && verbose) { insight::format_alert("Standardizing coefficients only works for fixed effects of the mixed model.") } effects <- "fixed" } params <- .mixed_model_parameters_generic( model = model, ci = ci, bootstrap = bootstrap, iterations = iterations, merge_by = "Parameter", standardize = standardize, exponentiate = exponentiate, effects = effects, p_adjust = p_adjust, group_level = group_level, ci_method = ci_method, include_sigma = include_sigma, ci_random = ci_random, keep_parameters = keep, drop_parameters = drop, verbose = verbose, ... ) attr(params, "object_name") <- insight::safe_deparse_symbol(substitute(model)) class(params) <- c("parameters_model", "see_parameters_model", "data.frame") params } #' @export p_value.cpglmm <- function(model, method = "wald", ...) { p_value.default(model, method = method, ...) } #' @export standard_error.cpglmm <- function(model, ...) { insight::check_if_installed("cplm") stats <- cplm::summary(model)$coefs params <- insight::get_parameters(model) .data_frame( Parameter = params$Parameter, SE = as.vector(stats[, "Std. Error"]) ) } # tools -------------------- .check_df_method <- function(df_method) { if (!is.null(df_method)) { df_method <- tolower(df_method) if (df_method %in% c("satterthwaite", "kenward", "kr")) { insight::format_alert("Satterthwaite or Kenward-Rogers approximation of degrees of freedom is only available for linear mixed models.") df_method <- "wald" } df_method <- insight::validate_argument( df_method, c( "wald", "normal", "residual", "ml1", "betwithin", "profile", "boot", "uniroot" ) ) } df_method } parameters/R/n_parameters.R0000644000176200001440000000011014037763760015426 0ustar liggesusers#' @importFrom insight n_parameters #' @export insight::n_parameters parameters/R/methods_loo.R0000644000176200001440000000615214777431265015301 0ustar liggesusers#' Bayesian Model Comparison #' #' Make a table of Bayesian model comparisons using the `loo` package. #' #' @param model An object of class [brms::loo_compare]. #' @param include_IC Whether to include the information criteria (IC). #' @param include_ENP Whether to include the effective number of parameters (ENP). #' @param ... Additional arguments (not used for now). #' # nolint start #' @examplesIf all(insight::check_if_installed(c("brms", "RcppEigen", "BH"), quietly = TRUE)) # nolint end #' \donttest{ #' library(brms) #' #' m1 <- brms::brm(mpg ~ qsec, data = mtcars) #' m2 <- brms::brm(mpg ~ qsec + drat, data = mtcars) #' m3 <- brms::brm(mpg ~ qsec + drat + wt, data = mtcars) #' #' x <- suppressWarnings(brms::loo_compare( #' brms::add_criterion(m1, "loo"), #' brms::add_criterion(m2, "loo"), #' brms::add_criterion(m3, "loo"), #' model_names = c("m1", "m2", "m3") #' )) #' model_parameters(x) #' model_parameters(x, include_IC = FALSE, include_ENP = TRUE) #' } #' #' @details #' The rule of thumb is that the models are "very similar" if |elpd_diff| (the #' absolute value of elpd_diff) is less than 4 (Sivula, Magnusson and Vehtari, 2020). #' If superior to 4, then one can use the SE to obtain a standardized difference #' (Z-diff) and interpret it as such, assuming that the difference is normally #' distributed. The corresponding p-value is then calculated as `2 * pnorm(-abs(Z-diff))`. #' However, note that if the raw ELPD difference is small (less than 4), it doesn't #' make much sense to rely on its standardized value: it is not very useful to #' conclude that a model is much better than another if both models make very #' similar predictions. #' #' @return Objects of `parameters_model`. #' @export model_parameters.compare.loo <- function(model, include_IC = TRUE, include_ENP = FALSE, ...) { # nolint start # https://stats.stackexchange.com/questions/608881/how-to-interpret-elpd-diff-of-bayesian-loo-estimate-in-bayesian-logistic-regress # nolint end # https://users.aalto.fi/%7Eave/CV-FAQ.html#12_What_is_the_interpretation_of_ELPD__elpd_loo__elpd_diff # https://users.aalto.fi/%7Eave/CV-FAQ.html#se_diff # The difference in expected log predictive density (elpd) between each model # and the best model as well as the standard error of this difference (assuming # the difference is approximately normal). # The values in the first row are 0s because the models are ordered from best to worst according to their elpd. x <- as.data.frame(model) out <- data.frame(Name = rownames(x), stringsAsFactors = FALSE) if ("looic" %in% colnames(x)) { if (include_IC) out$LOOIC <- x[["looic"]] if (include_ENP) out$ENP <- x[["p_loo"]] out$ELPD <- x[["elpd_loo"]] } else { if (include_IC) out$WAIC <- x[["waic"]] if (include_ENP) out$ENP <- x[["p_waic"]] out$ELPD <- x[["elpd_waic"]] } out$Difference <- x[["elpd_diff"]] out$Difference_SE <- x[["se_diff"]] z_elpd_diff <- x[["elpd_diff"]] / x[["se_diff"]] out$p <- 2 * stats::pnorm(-abs(z_elpd_diff)) class(out) <- c("parameters_model", "data.frame") out } parameters/R/methods_BayesFM.R0000644000176200001440000001112014775505314015760 0ustar liggesusers#' Parameters from Bayesian Exploratory Factor Analysis #' #' Format Bayesian Exploratory Factor Analysis objects from the BayesFM package. #' #' @param model Bayesian EFA created by the `BayesFM::befa`. #' @inheritParams principal_components #' @inheritParams bayestestR::describe_posterior #' @inheritParams model_parameters.default #' @param ... Arguments passed to or from other methods. #' #' @examples #' library(parameters) #' \donttest{ #' if (require("BayesFM")) { #' efa <- BayesFM::befa(mtcars, iter = 1000) #' results <- model_parameters(efa, sort = TRUE, verbose = FALSE) #' results #' efa_to_cfa(results, verbose = FALSE) #' } #' } #' @return A data frame of loadings. #' @export model_parameters.befa <- function(model, sort = FALSE, centrality = "median", dispersion = FALSE, ci = 0.95, ci_method = "eti", test = NULL, verbose = TRUE, ...) { if (!attr(model, "post.column.switch") || !attr(model, "post.sign.switch")) { insight::check_if_installed("BayesFM") if (!attr(model, "post.column.switch")) model <- BayesFM::post.column.switch(model) if (!attr(model, "post.sign.switch")) model <- BayesFM::post.sign.switch(model) } factor_loadings <- as.data.frame(model$alpha) names(factor_loadings) <- gsub("alpha:", "", names(factor_loadings), fixed = TRUE) factor_loadings <- stats::reshape( factor_loadings, direction = "long", varying = list(names(factor_loadings)), sep = "_", timevar = "Variable", v.names = "Loading", idvar = "Draw", times = names(factor_loadings) ) components <- as.data.frame(model$dedic) names(components) <- gsub("dedic:", "", names(components), fixed = TRUE) components <- stats::reshape( components, direction = "long", varying = list(names(components)), sep = "_", timevar = "Variable", v.names = "Component", idvar = "Draw", times = names(components) ) factor_loadings <- merge(components, factor_loadings) # Compute posterior by dedic long_loadings <- data.frame() for (var in unique(factor_loadings$Variable)) { for (comp in unique(factor_loadings$Component)) { chunk <- factor_loadings[factor_loadings$Variable == var & factor_loadings$Component == comp, ] # nolint if (nrow(chunk) == 0) { rez <- bayestestR::describe_posterior( factor_loadings$Loading, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, verbose = verbose, ... ) rez[1, ] <- NA } else { rez <- bayestestR::describe_posterior( chunk$Loading, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, verbose = verbose, ... ) } long_loadings <- rbind( long_loadings, cbind(data.frame(Component = comp, Variable = var), rez) ) } } long_loadings$Component <- paste0("F", long_loadings$Component) # Clean long_loadings$Parameter <- NULL if ("CI" %in% names(long_loadings) && insight::has_single_value(long_loadings$CI, remove_na = TRUE)) { long_loadings$CI <- NULL } long_loadings <- long_loadings[long_loadings$Component != 0, ] factor_loadings <- .wide_loadings( long_loadings, loadings_columns = names(long_loadings)[3], component_column = "Component", variable_column = "Variable" ) # Add attributes attr(factor_loadings, "model") <- model attr(factor_loadings, "additional_arguments") <- list(...) attr(factor_loadings, "n") <- insight::n_unique(long_loadings$Component) attr(factor_loadings, "loadings_columns") <- names(factor_loadings)[2:ncol(factor_loadings)] attr(factor_loadings, "ci") <- ci # Sorting if (isTRUE(sort)) { factor_loadings <- .sort_loadings(factor_loadings) } # Add some more attributes long_loadings <- stats::na.omit(long_loadings) row.names(long_loadings) <- NULL attr(factor_loadings, "loadings_long") <- long_loadings # add class-attribute for printing class(factor_loadings) <- c("parameters_efa", class(factor_loadings)) factor_loadings } parameters/R/plot.R0000644000176200001440000000255415001670564013731 0ustar liggesusers#' @export plot.parameters_sem <- function(x, ...) { insight::check_if_installed("see") NextMethod() } #' @export plot.parameters_model <- function(x, ...) { insight::check_if_installed("see") NextMethod() } #' @export plot.compare_parameters <- function(x, ...) { insight::check_if_installed("see") NextMethod() } #' @export plot.parameters_simulate <- function(x, ...) { insight::check_if_installed("see") NextMethod() } #' @export plot.parameters_brms_meta <- function(x, ...) { insight::check_if_installed("see") NextMethod() } #' @export plot.n_factors <- function(x, ...) { insight::check_if_installed("see") NextMethod() } #' @export plot.n_clusters <- function(x, ...) { insight::check_if_installed("see") NextMethod() } #' @export plot.parameters_pca <- function(x, ...) { insight::check_if_installed("see") NextMethod() } #' @export plot.parameters_efa <- function(x, ...) { insight::check_if_installed("see") NextMethod() } #' @importFrom graphics plot #' @export plot.cluster_analysis <- function(x, ...) { insight::check_if_installed("see") plot(datawizard::visualisation_recipe(x, ...)) } #' @export plot.cluster_analysis_summary <- function(x, ...) { insight::check_if_installed("see") plot(datawizard::visualisation_recipe(x, ...)) } parameters/R/methods_fixest.R0000644000176200001440000001556214761570351016010 0ustar liggesusers# .fixest ----------------------- #' @export model_parameters.fixest <- function(model, ci = 0.95, ci_method = NULL, bootstrap = FALSE, iterations = 1000, standardize = NULL, exponentiate = FALSE, p_adjust = NULL, vcov = NULL, vcov_args = NULL, include_info = getOption("parameters_info", FALSE), keep = NULL, drop = NULL, verbose = TRUE, ...) { # default ci-method, based on statistic if (is.null(ci_method)) { if (identical(insight::find_statistic(model), "t-statistic")) { ci_method <- "wald" } else { ci_method <- "normal" } } # extract model parameters table, as data frame out <- .model_parameters_generic( model = model, ci = ci, ci_method = ci_method, bootstrap = bootstrap, iterations = iterations, merge_by = "Parameter", standardize = standardize, exponentiate = exponentiate, p_adjust = p_adjust, include_info = include_info, keep_parameters = keep, drop_parameters = drop, vcov = vcov, vcov_args = vcov_args, verbose = verbose, ... ) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(model)) out } #' @export standard_error.fixest <- function(model, vcov = NULL, vcov_args = NULL, ...) { params <- insight::get_parameters(model) if (is.null(vcov)) { # get standard errors from summary # see https://github.com/easystats/parameters/issues/1039 stats <- summary(model) SE <- stats$coeftable[, "Std. Error"] } else { # we don't want to wrap this in a tryCatch because the `fixest` error is # informative when `vcov` is wrong. V <- insight::get_varcov(model, vcov = vcov, vcov_args = vcov_args) SE <- sqrt(diag(V)) } # remove .theta parameter if (".theta" %in% names(SE)) { SE <- SE[names(SE) != ".theta"] } .data_frame( Parameter = params$Parameter, SE = as.vector(SE) ) } # .feglm ----------------------- #' @export model_parameters.feglm <- model_parameters.fixest #' @export standard_error.feglm <- function(model, ...) { stats <- stats::coef(summary(model)) params <- insight::get_parameters(model) .data_frame( Parameter = params$Parameter, SE = as.vector(stats[, "Std. error"]) ) } ## TODO add ci_method later? #' @export p_value.feglm <- function(model, ...) { stats <- stats::coef(summary(model)) params <- insight::get_parameters(model) .data_frame( Parameter = params$Parameter, p = as.vector(stats[, 4]) ) } # .fixest_multi ----------------------------------- #' @export model_parameters.fixest_multi <- function(model, ci = 0.95, ci_method = NULL, bootstrap = FALSE, iterations = 1000, standardize = NULL, exponentiate = FALSE, p_adjust = NULL, vcov = NULL, vcov_args = NULL, include_info = getOption("parameters_info", FALSE), keep = NULL, drop = NULL, verbose = TRUE, ...) { # iterate over responses out <- lapply( model, model_parameters.default, ci = ci, ci_method = ci_method, bootstrap = bootstrap, iterations = iterations, standardize = standardize, exponentiate = exponentiate, p_adjust = p_adjust, include_info = include_info, keep = keep, drop = drop, verbose = verbose, vcov = vcov, vcov_args = vcov_args, ... ) # bind lists together to one data frame, save attributes att <- attributes(out[[1]]) params <- do.call(rbind, out) # add response and group columns id_columns <- .get_fixest_multi_columns(model) params$Response <- id_columns$Response params$Group <- id_columns$Group attributes(params) <- utils::modifyList(att, attributes(params)) attr(params, "model_class") <- "fixest_multi" params } #' @export ci.fixest_multi <- function(x, ...) { out <- do.call(rbind, lapply(x, ci, ...)) # add response and group columns id_columns <- .get_fixest_multi_columns(x) # add response column out$Response <- id_columns$Response out$Group <- id_columns$Group row.names(out) <- NULL out } #' @export standard_error.fixest_multi <- function(model, ...) { out <- do.call(rbind, lapply(model, standard_error, ...)) # add response and group columns id_columns <- .get_fixest_multi_columns(model) # add response column out$Response <- id_columns$Response out$Group <- id_columns$Group row.names(out) <- NULL out } #' @export p_value.fixest_multi <- function(model, ...) { out <- do.call(rbind, lapply(model, p_value, ...)) # add response and group columns id_columns <- .get_fixest_multi_columns(model) # add response column out$Response <- id_columns$Response out$Group <- id_columns$Group row.names(out) <- NULL out } #' @export simulate_model.fixest_multi <- function(model, ...) { lapply(model, simulate_model, ...) } # helper --------------------------------- .get_fixest_multi_columns <- function(model) { # add response and group columns s <- summary(model) l <- lengths(lapply(s, stats::coef)) parts <- strsplit(names(l), ";", fixed = TRUE) id_columns <- Map(function(i, j) { if (length(j) == 1 && startsWith(j, "rhs")) { data.frame( Group = rep(insight::trim_ws(sub("rhs:", "", j, fixed = TRUE)), i), stringsAsFactors = FALSE ) } else if (length(j) == 1 && startsWith(j, "lhs")) { data.frame( Response = rep(insight::trim_ws(sub("lhs:", "", j, fixed = TRUE)), i), stringsAsFactors = FALSE ) } else { data.frame( Response = rep(insight::trim_ws(sub("lhs:", "", j[1], fixed = TRUE)), i), Group = rep(insight::trim_ws(sub("rhs:", "", j[2], fixed = TRUE)), i), stringsAsFactors = FALSE ) } }, unname(l), parts) do.call(rbind, id_columns) } parameters/R/p_direction.R0000644000176200001440000001365515033425412015251 0ustar liggesusers#' @importFrom bayestestR p_direction #' @export bayestestR::p_direction #' @title Probability of Direction (pd) #' @name p_direction.lm #' #' @description Compute the **Probability of Direction** (*pd*, also known as #' the Maximum Probability of Effect - *MPE*). This can be interpreted as the #' probability that a parameter (described by its full confidence, or #' "compatibility" interval) is strictly positive or negative (whichever is the #' most probable). Although differently expressed, this index is fairly similar #' (i.e., is strongly correlated) to the frequentist *p-value* (see 'Details'). #' #' @param x A statistical model. #' @inheritParams bayestestR::p_direction #' @inheritParams model_parameters.default #' @param ... Arguments passed to other methods, e.g. `ci()`. Arguments like #' `vcov` or `vcov_args` can be used to compute confidence intervals using a #' specific variance-covariance matrix for the standard errors. #' #' @seealso See also [`equivalence_test()`], [`p_function()`] and #' [`p_significance()`] for functions related to checking effect existence and #' significance. #' #' @inheritSection bayestestR::p_direction What is the *pd*? #' #' @inheritSection bayestestR::p_direction Relationship with the p-value #' #' @inheritSection bayestestR::p_direction Possible Range of Values #' #' @inheritSection model_parameters Statistical inference - how to quantify evidence #' #' @references #' #' - Amrhein, V., Korner-Nievergelt, F., and Roth, T. (2017). The earth is #' flat (p > 0.05): Significance thresholds and the crisis of unreplicable #' research. PeerJ, 5, e3544. \doi{10.7717/peerj.3544} #' #' - Greenland S, Rafi Z, Matthews R, Higgs M. To Aid Scientific Inference, #' Emphasize Unconditional Compatibility Descriptions of Statistics. (2022) #' https://arxiv.org/abs/1909.08583v7 (Accessed November 10, 2022) #' #' - Lakens, D. (2024). Improving Your Statistical Inferences (Version v1.5.1). #' Retrieved from https://lakens.github.io/statistical_inferences/. #' \doi{10.5281/ZENODO.6409077} #' #' - Lakens, D., Scheel, A. M., and Isager, P. M. (2018). Equivalence Testing #' for Psychological Research: A Tutorial. Advances in Methods and Practices #' in Psychological Science, 1(2), 259–269. #' #' - Makowski, D., Ben-Shachar, M. S., Chen, S. H. A., and Lüdecke, D. (2019). #' Indices of Effect Existence and Significance in the Bayesian Framework. #' Frontiers in Psychology, 10, 2767. \doi{10.3389/fpsyg.2019.02767} #' #' - Rafi Z, Greenland S. Semantic and cognitive tools to aid statistical #' science: replace confidence and significance by compatibility and surprise. #' BMC Medical Research Methodology (2020) 20:244. #' #' - Schweder T. Confidence is epistemic probability for empirical science. #' Journal of Statistical Planning and Inference (2018) 195:116–125. #' \doi{10.1016/j.jspi.2017.09.016} #' #' - Schweder T, Hjort NL. Frequentist analogues of priors and posteriors. #' In Stigum, B. (ed.), Econometrics and the Philosophy of Economics: Theory #' Data Confrontation in Economics, pp. 285-217. Princeton University Press, #' Princeton, NJ, 2003 #' #' - Vos P, Holbert D. Frequentist statistical inference without repeated sampling. #' Synthese 200, 89 (2022). \doi{10.1007/s11229-022-03560-x} #' #' @return A data frame. #' #' @examplesIf requireNamespace("bayestestR") && require("see", quietly = TRUE) && requireNamespace("sandwich") #' data(qol_cancer) #' model <- lm(QoL ~ time + age + education, data = qol_cancer) #' p_direction(model) #' #' # based on heteroscedasticity-robust standard errors #' p_direction(model, vcov = "HC3") #' #' result <- p_direction(model) #' plot(result) #' @export p_direction.lm <- function(x, ci = 0.95, method = "direct", null = 0, vcov = NULL, vcov_args = NULL, ...) { # generate normal distribution based on CI range result <- .posterior_ci(x, ci, vcov = vcov, vcov_args = vcov_args, ...) # copy out <- result$out posterior <- result$posterior # add pd out$pd <- as.numeric(bayestestR::p_direction( posterior, method = method, null = null, ... )) # reorder out <- out[intersect(c("Parameter", "CI", "CI_low", "CI_high", "pd", "Effects", "Component"), colnames(out))] attr(out, "data") <- posterior attr(out, "null") <- null class(out) <- c("p_direction_lm", "p_direction", "see_p_direction", "data.frame") out } # methods --------------------------------------------------------------------- #' @export print.p_direction_lm <- function(x, digits = 2, p_digits = 3, ...) { null <- attributes(x)$null caption <- sprintf( "Probability of Direction (null: %s)", insight::format_value(null, digits = digits, protect_integer = TRUE) ) x <- insight::format_table(x, digits = digits, p_digits = p_digits) cat(insight::export_table(x, title = caption, ...)) } # other classes -------------------------------------------------------------- #' @export p_direction.glm <- p_direction.lm #' @export p_direction.coxph <- p_direction.lm #' @export p_direction.svyglm <- p_direction.lm #' @export p_direction.glmmTMB <- p_direction.lm #' @export p_direction.merMod <- p_direction.lm #' @export p_direction.wbm <- p_direction.lm #' @export p_direction.lme <- p_direction.lm #' @export p_direction.gee <- p_direction.lm #' @export p_direction.gls <- p_direction.lm #' @export p_direction.feis <- p_direction.lm #' @export p_direction.felm <- p_direction.lm #' @export p_direction.mixed <- p_direction.lm #' @export p_direction.hurdle <- p_direction.lm #' @export p_direction.zeroinfl <- p_direction.lm #' @export p_direction.rma <- p_direction.lm parameters/R/methods_stats4.R0000644000176200001440000000022714507235543015717 0ustar liggesusers#' @export ci.mle <- ci.glm #' @export standard_error.mle <- standard_error.mle2 #' @export model_parameters.mle <- model_parameters.glm parameters/R/methods_glmx.R0000644000176200001440000000445114736731407015453 0ustar liggesusers#' @export model_parameters.glmx <- function(model, ci = 0.95, bootstrap = FALSE, iterations = 1000, component = "all", standardize = NULL, exponentiate = FALSE, p_adjust = NULL, keep = NULL, drop = NULL, verbose = TRUE, ...) { component <- insight::validate_argument(component, c("all", "conditional", "extra")) if (component == "all") { merge_by <- c("Parameter", "Component") } else { merge_by <- "Parameter" } out <- .model_parameters_generic( model = model, ci = ci, component = component, bootstrap = bootstrap, iterations = iterations, merge_by = merge_by, standardize = standardize, exponentiate = exponentiate, keep_parameters = keep, drop_parameters = drop, p_adjust = p_adjust, ... ) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(model)) out } #' @export standard_error.glmx <- function(model, ...) { stats <- stats::coef(summary(model)) params <- insight::get_parameters(model) .data_frame( Parameter = params$Parameter, SE = c(as.vector(stats$glm[, "Std. Error"]), as.vector(stats$extra[, "Std. Error"])), Component = params$Component ) } #' @export p_value.glmx <- function(model, ...) { stats <- stats::coef(summary(model)) params <- insight::get_parameters(model) .data_frame( Parameter = params$Parameter, p = c(as.vector(stats$glm[, "Pr(>|z|)"]), as.vector(stats$extra[, "Pr(>|z|)"])), Component = params$Component ) } #' @export simulate_model.glmx <- function(model, iterations = 1000, component = "all", ...) { component <- insight::validate_argument( component, c("all", "conditional", "extra") ) out <- .simulate_model(model, iterations, component = component, ...) class(out) <- c("parameters_simulate_model", class(out)) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(model)) out } parameters/R/methods_mixed.R0000644000176200001440000000010014717111737015572 0ustar liggesusers#' @export model_parameters.mixed <- model_parameters.glmmTMB parameters/R/methods_car.R0000644000176200001440000000461514716604200015237 0ustar liggesusers#' @export model_parameters.deltaMethod <- function(model, p_adjust = NULL, verbose = TRUE, ...) { dots <- list(...) if ("ci" %in% names(dots)) { insight::format_warning( "The `ci` argument is not supported by `model_parameters` for objects of this class. Use the `level` argument of the `deltaMethod` function instead." # nolint ) dots[["ci"]] <- NULL } # tweak column names params <- insight::standardize_names(datawizard::rownames_as_column(model, "Parameter")) # find CIs ci_cols <- endsWith(colnames(params), "%") cis <- as.numeric(gsub("%", "", colnames(params)[ci_cols], fixed = TRUE)) / 100 ci <- diff(cis) # rename CI columns colnames(params)[ci_cols] <- c("CI_low", "CI_high") # check if statistic is available if (is.null(params$Statistic)) { params <- merge(params, insight::get_statistic(model), by = "Parameter", sort = FALSE) } # check if statistic is available if (is.null(params$p)) { params$p <- as.vector(2 * stats::pnorm(abs(params$Statistic), lower.tail = FALSE)) } # rename statistic column names(params) <- gsub("Statistic", "z", names(params), fixed = TRUE) # adjust p? if (!is.null(p_adjust)) { params <- .p_adjust(params, p_adjust, model, verbose) } fun_args <- list( params, model, ci = ci, exponentiate = FALSE, bootstrap = FALSE, iterations = NULL, ci_method = "residual", p_adjust = p_adjust, include_info = FALSE, verbose = verbose ) fun_args <- c(fun_args, dots) params <- do.call(".add_model_parameters_attributes", fun_args) class(params) <- c("parameters_model", "see_parameters_model", class(params)) attr(params, "object_name") <- insight::safe_deparse_symbol(substitute(model)) attr(params, "no_caption") <- TRUE params } #' @export ci.deltaMethod <- function(x, ...) { params <- model_parameters(x, ...) ci <- attributes(params)$ci params$CI <- ci as.data.frame(params[c("Parameter", "CI", "CI_low", "CI_high")]) } #' @export standard_error.deltaMethod <- function(model, ...) { params <- model_parameters(model, ...) as.data.frame(params[c("Parameter", "SE")]) } #' @export p_value.deltaMethod <- function(model, ...) { params <- model_parameters(model, ...) if (is.null(params[["p"]])) { return(NULL) } as.data.frame(params[c("Parameter", "p")]) } parameters/R/methods_truncreg.R0000644000176200001440000000021614716604200016314 0ustar liggesusers# classes: .truncreg #' @export standard_error.truncreg <- standard_error.default #' @export p_value.truncreg <- p_value.default parameters/R/methods_betareg.R0000644000176200001440000001034014761570351016104 0ustar liggesusers## TODO add ci_method later? #' @export model_parameters.betareg <- function(model, ci = 0.95, bootstrap = FALSE, iterations = 1000, component = "conditional", standardize = NULL, exponentiate = FALSE, p_adjust = NULL, include_info = getOption("parameters_info", FALSE), keep = NULL, drop = NULL, verbose = TRUE, ...) { # validation check, warn if unsupported argument is used. dot_args <- .check_dots( dots = list(...), not_allowed = c("vcov", "vcov_args"), class(model)[1], verbose = verbose ) component <- insight::validate_argument(component, c("conditional", "precision", "all")) if (component == "all") { merge_by <- c("Parameter", "Component") } else { merge_by <- "Parameter" } ## TODO check merge by fun_args <- list( model, ci = ci, component = component, bootstrap = bootstrap, iterations = iterations, merge_by = c("Parameter", "Component"), standardize = standardize, exponentiate = exponentiate, p_adjust = p_adjust, keep_parameters = keep, drop_parameters = drop, include_info = include_info, vcov = NULL, vcov_args = NULL ) fun_args <- c(fun_args, dot_args) out <- do.call(".model_parameters_generic", fun_args) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(model)) out } #' @export ci.betareg <- function(x, ci = 0.95, component = "all", verbose = TRUE, ...) { # validation check, warn if unsupported argument is used. dot_args <- .check_dots( dots = list(...), not_allowed = c("vcov", "vcov_args"), class(x)[1], function_name = "ci", verbose = verbose ) component <- match.arg(component, choices = c("all", "conditional", "precision")) .ci_generic(model = x, ci = ci, dof = Inf, component = component, verbose = verbose) } #' @export standard_error.betareg <- function(model, component = "all", verbose = TRUE, ...) { # validation check, warn if unsupported argument is used. dot_args <- .check_dots( dots = list(...), not_allowed = c("vcov", "vcov_args"), class(model)[1], function_name = "standard_error", verbose = verbose ) component <- match.arg(component, choices = c("all", "conditional", "precision")) params <- insight::get_parameters(model) cs <- do.call(rbind, stats::coef(summary(model))) se <- cs[, 2] out <- .data_frame( Parameter = .remove_backticks_from_string(names(se)), Component = params$Component, SE = as.vector(se) ) if (component != "all") { out <- out[out$Component == component, ] } out } #' @export p_value.betareg <- function(model, component = "all", verbose = TRUE, ...) { # validation check, warn if unsupported argument is used. dot_args <- .check_dots( dots = list(...), not_allowed = c("vcov", "vcov_args"), class(model)[1], function_name = "p_value", verbose = verbose ) component <- insight::validate_argument( component, c("all", "conditional", "precision") ) params <- insight::get_parameters(model) cs <- do.call(rbind, stats::coef(summary(model))) p <- cs[, 4] out <- .data_frame( Parameter = params$Parameter, Component = params$Component, p = as.vector(p) ) if (component != "all") { out <- out[out$Component == component, ] } out } #' @export simulate_model.betareg <- function(model, iterations = 1000, component = "all", ...) { component <- insight::validate_argument( component, c("all", "conditional", "precision") ) out <- .simulate_model(model, iterations, component = component, ...) class(out) <- c("parameters_simulate_model", class(out)) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(model)) out } parameters/R/n_factors.R0000644000176200001440000006645015024500057014730 0ustar liggesusers#' Number of components/factors to retain in PCA/FA #' #' This function runs many existing procedures for determining how many factors #' to retain/extract from factor analysis (FA) or dimension reduction (PCA). It #' returns the number of factors based on the maximum consensus between methods. #' In case of ties, it will keep the simplest model and select the solution #' with the fewer factors. #' #' @param x A data frame. #' @param type Can be `"FA"` or `"PCA"`, depending on what you want to do. #' @param rotation Only used for VSS (Very Simple Structure criterion, see #' [psych::VSS()]). The rotation to apply. Can be `"none"`, `"varimax"`, #' `"quartimax"`, `"bentlerT"`, `"equamax"`, `"varimin"`, `"geominT"` and #' `"bifactor"` for orthogonal rotations, and `"promax"`, `"oblimin"`, #' `"simplimax"`, `"bentlerQ"`, `"geominQ"`, `"biquartimin"` and `"cluster"` #' for oblique transformations. #' @param algorithm Factoring method used by VSS. Can be `"pa"` for Principal #' Axis Factor Analysis, `"minres"` for minimum residual (OLS) factoring, #' `"mle"` for Maximum Likelihood FA and `"pc"` for Principal Components. #' `"default"` will select `"minres"` if `type = "FA"` and `"pc"` if #' `type = "PCA"`. #' @param package Package from which respective methods are used. Can be #' `"all"` or a vector containing `"nFactors"`, `"psych"`, `"PCDimension"`, #' `"fit"` or `"EGAnet"`. Note that `"fit"` (which actually also relies on the #' `psych` package) and `"EGAnet"` can be very slow for bigger datasets. Thus, #' the default is `c("nFactors", "psych")`. You must have the respective #' packages installed for the methods to be used. #' @param safe If `TRUE`, the function will run all the procedures in try #' blocks, and will only return those that work and silently skip the ones #' that may fail. #' @param correlation_matrix An optional correlation matrix that can be used #' (note that the data must still be passed as the first argument). If `NULL`, #' will compute it by running `cor()` on the passed data. #' @param n_max If set to a value (e.g., `10`), will drop from the results all #' methods that suggest a higher number of components. The interpretation becomes #' 'from all the methods that suggested a number lower than n_max, the results #' are ...'. #' @param ... Arguments passed to or from other methods. #' #' @details `n_components()` is actually an alias for `n_factors()`, with #' different defaults for the function arguments. #' #' @note There is also a #' [`plot()`-method](https://easystats.github.io/see/articles/parameters.html) #' implemented in the [**see**-package](https://easystats.github.io/see/). #' `n_components()` is a convenient short-cut for `n_factors(type = "PCA")`. #' #' @examplesIf require("PCDimension", quietly = TRUE) && require("nFactors", quietly = TRUE) && require("EGAnet", quietly = TRUE) && require("psych", quietly = TRUE) #' library(parameters) #' n_factors(mtcars, type = "PCA") #' #' result <- n_factors(mtcars[1:5], type = "FA") #' as.data.frame(result) #' summary(result) #' \donttest{ #' # Setting package = 'all' will increase the number of methods (but is slow) #' n_factors(mtcars, type = "PCA", package = "all") #' n_factors(mtcars, type = "FA", algorithm = "mle", package = "all") #' } #' #' @return A data frame. #' #' @references #' #' - Bartlett, M. S. (1950). Tests of significance in factor analysis. #' British Journal of statistical psychology, 3(2), 77-85. #' #' - Bentler, P. M., & Yuan, K. H. (1996). Test of linear trend in #' eigenvalues of a covariance matrix with application to data analysis. #' British Journal of Mathematical and Statistical Psychology, 49(2), 299-312. #' #' - Cattell, R. B. (1966). The scree test for the number of factors. #' Multivariate behavioral research, 1(2), 245-276. #' #' - Finch, W. H. (2019). Using Fit Statistic Differences to Determine the #' Optimal Number of Factors to Retain in an Exploratory Factor Analysis. #' Educational and Psychological Measurement. #' #' - Zoski, K. W., & Jurs, S. (1996). An objective counterpart to the #' visual scree test for factor analysis: The standard error scree. #' Educational and Psychological Measurement, 56(3), 443-451. #' #' - Zoski, K., & Jurs, S. (1993). Using multiple regression to determine #' the number of factors to retain in factor analysis. Multiple Linear #' Regression Viewpoints, 20(1), 5-9. #' #' - Nasser, F., Benson, J., & Wisenbaker, J. (2002). The performance of #' regression-based variations of the visual scree for determining the number #' of common factors. Educational and psychological measurement, 62(3), #' 397-419. #' #' - Golino, H., Shi, D., Garrido, L. E., Christensen, A. P., Nieto, M. #' D., Sadana, R., & Thiyagarajan, J. A. (2018). Investigating the performance #' of Exploratory Graph Analysis and traditional techniques to identify the #' number of latent factors: A simulation and tutorial. #' #' - Golino, H. F., & Epskamp, S. (2017). Exploratory graph analysis: A #' new approach for estimating the number of dimensions in psychological #' research. PloS one, 12(6), e0174035. #' #' - Revelle, W., & Rocklin, T. (1979). Very simple structure: An #' alternative procedure for estimating the optimal number of interpretable #' factors. Multivariate Behavioral Research, 14(4), 403-414. #' #' - Velicer, W. F. (1976). Determining the number of components from the #' matrix of partial correlations. Psychometrika, 41(3), 321-327. #' #' @export n_factors <- function(x, type = "FA", rotation = "varimax", algorithm = "default", package = c("nFactors", "psych"), correlation_matrix = NULL, safe = TRUE, n_max = NULL, ...) { if (all(package == "all")) { package <- c("nFactors", "EGAnet", "psych", "fit", "pcdimension") } # Get number of observations if (is.data.frame(x)) { n_obs <- nrow(x) } else if (is.numeric(x) && !is.null(correlation_matrix)) { n_obs <- x package <- package[!package %in% c("pcdimension", "PCDimension")] } else if (is.matrix(x) || inherits(x, "easycormatrix")) { insight::format_error( "Please input the correlation matrix via the `correlation_matrix` argument and the number of rows / observations via the first argument." # nolint ) } # Get only numeric numerics <- vapply(x, is.numeric, TRUE) if (!all(numerics)) { insight::format_warning(paste0( "Some variables are not numeric (", toString(names(x)[!numerics]), "). Dropping them." )) } x <- x[numerics] # Correlation matrix if (is.null(correlation_matrix)) { correlation_matrix <- stats::cor(x, use = "pairwise.complete.obs", ...) } eigen_values <- eigen(correlation_matrix)$values # Smooth matrix if negative eigen values if (any(eigen_values < 0)) { insight::check_if_installed("psych") correlation_matrix <- psych::cor.smooth(correlation_matrix, ...) eigen_values <- eigen(correlation_matrix)$values } # Initialize dataframe out <- data.frame() # nFactors ------------------------------------------- if ("nFactors" %in% package) { insight::check_if_installed("nFactors") # Model if (tolower(type) %in% c("fa", "factor", "efa")) { model <- "factors" } else { model <- "components" } # Compute all if (safe) { out <- rbind( out, tryCatch( .n_factors_bartlett(eigen_values, model, n_obs), warning = function(w) data.frame(), error = function(e) data.frame() ) ) out <- rbind( out, tryCatch( .n_factors_bentler(eigen_values, model, n_obs), warning = function(w) data.frame(), error = function(e) data.frame() ) ) out <- rbind( out, tryCatch( .n_factors_cng(eigen_values, model), warning = function(w) data.frame(), error = function(e) data.frame() ) ) out <- rbind( out, tryCatch( .n_factors_mreg(eigen_values, model), warning = function(w) data.frame(), error = function(e) data.frame() ) ) out <- rbind( out, tryCatch( .n_factors_scree(eigen_values, model), warning = function(w) data.frame(), error = function(e) data.frame() ) ) out <- rbind( out, tryCatch( .n_factors_sescree(eigen_values, model), warning = function(w) data.frame(), error = function(e) data.frame() ) ) } else { out <- rbind(out, .n_factors_bartlett(eigen_values, model, n_obs)) out <- rbind(out, .n_factors_bentler(eigen_values, model, n_obs)) out <- rbind(out, .n_factors_cng(eigen_values, model)) out <- rbind(out, .n_factors_mreg(eigen_values, model)) out <- rbind(out, .n_factors_scree(eigen_values, model)) out <- rbind(out, .n_factors_sescree(eigen_values, model)) } } # EGAnet ------------------------------------------- if ("EGAnet" %in% package) { insight::check_if_installed("EGAnet") if (safe) { out <- rbind( out, tryCatch( .n_factors_ega(x, correlation_matrix, n_obs, eigen_values, type), # warning = function(w) data.frame(), error = function(e) data.frame() ) ) } else { out <- rbind(out, .n_factors_ega(x, correlation_matrix, n_obs, eigen_values, type)) } } # psych ------------------------------------------- if ("psych" %in% package) { insight::check_if_installed("psych") if (safe) { out <- rbind( out, tryCatch( .n_factors_vss(x, correlation_matrix, n_obs, type, rotation, algorithm), # warning = function(w) data.frame(), error = function(e) data.frame() ) ) } else { out <- rbind(out, .n_factors_vss(x, correlation_matrix, n_obs, type, rotation, algorithm)) } } # fit ------------------------------------------- if ("fit" %in% package) { insight::check_if_installed("psych") if (safe) { out <- rbind( out, tryCatch( .n_factors_fit(x, correlation_matrix, n_obs, type, rotation, algorithm), warning = function(w) data.frame(), error = function(e) data.frame() ) ) } else { out <- rbind(out, .n_factors_fit(x, correlation_matrix, n_obs, type, rotation, algorithm)) } } # pcdimension ------------------------------------------- if ("pcdimension" %in% tolower(package)) { insight::check_if_installed("PCDimension") if (safe) { out <- rbind( out, tryCatch( .n_factors_PCDimension(x, type), warning = function(w) data.frame(), error = function(e) data.frame() ) ) } else { out <- rbind(out, .n_factors_PCDimension(x, type)) } } # OUTPUT ---------------------------------------------- # TODO created weighted composite score out <- out[!is.na(out$n_Factors), ] # Remove empty methods out <- out[order(out$n_Factors), ] # Arrange by n factors row.names(out) <- NULL # Reset row index if (!is.null(n_max)) { out <- out[out$n_Factors <= n_max, ] } # Add summary by_factors <- .data_frame( n_Factors = as.numeric(unique(out$n_Factors)), n_Methods = as.numeric(by(out, as.factor(out$n_Factors), function(out) n <- nrow(out))) ) # Add cumulative percentage of variance explained fa <- psych::fa( correlation_matrix, nfactors = max(by_factors$n_Factors), n.obs = nrow(x), rotate = "none" ) varex <- .get_fa_variance_summary(fa) # Extract number of factors from EFA output (usually MR1, ML1, etc.) varex$n_Factors <- as.numeric(gsub("[^\\d]+", "", varex$Component, perl = TRUE)) # Merge (and like that filter out empty methods) by_factors <- merge(by_factors, varex[, c("n_Factors", "Variance_Cumulative")], by = "n_Factors") attr(out, "Variance_Explained") <- varex # We add all the variance explained (for plotting) attr(out, "summary") <- by_factors attr(out, "n") <- min(as.numeric(as.character(by_factors[ by_factors$n_Methods == max(by_factors$n_Methods), "n_Factors" ]))) class(out) <- c("n_factors", "see_n_factors", class(out)) out } #' @rdname n_factors #' @export n_components <- function(x, type = "PCA", rotation = "varimax", algorithm = "default", package = c("nFactors", "psych"), correlation_matrix = NULL, safe = TRUE, ...) { n_factors( x, type = type, rotation = rotation, algorithm = algorithm, package = package, correlation_matrix = correlation_matrix, safe = safe, ... ) } #' @export print.n_factors <- function(x, ...) { results <- attributes(x)$summary # Extract info max_methods <- max(results$n_Methods) best_n <- attributes(x)$n # Extract methods if ("n_Factors" %in% names(x)) { type <- "factor" methods_text <- toString(as.character(x[x$n_Factors == best_n, "Method"])) } else { type <- "cluster" methods_text <- toString(as.character(x[x$n_Clusters == best_n, "Method"])) } # Text msg_text <- paste0( "The choice of ", as.character(best_n), ifelse(type == "factor", " dimensions ", " clusters "), "is supported by ", max_methods, " (", sprintf("%.2f", max_methods / nrow(x) * 100), "%) methods out of ", nrow(x), " (", methods_text, ").\n" ) insight::print_color("# Method Agreement Procedure:\n\n", "blue") cat(msg_text) invisible(x) } #' @export summary.n_factors <- function(object, ...) { attributes(object)$summary } #' @export as.numeric.n_factors <- function(x, ...) { attributes(x)$n } #' @export as.double.n_factors <- as.numeric.n_factors #' @export summary.n_clusters <- summary.n_factors #' @export as.numeric.n_clusters <- as.numeric.n_factors #' @export as.double.n_clusters <- as.double.n_factors #' @export print.n_clusters <- print.n_factors # Methods ----------------------------------------------------------------- #' Bartlett, Anderson and Lawley Procedures #' @keywords internal .n_factors_bartlett <- function(eigen_values = NULL, model = "factors", nobs = NULL) { nfac <- nFactors::nBartlett( eigen_values, N = nobs, cor = TRUE, alpha = 0.05, details = FALSE )$nFactors .data_frame( n_Factors = as.numeric(nfac), Method = insight::format_capitalize(names(nfac)), Family = "Barlett" ) } #' Bentler and Yuan's Procedure #' @keywords internal .n_factors_bentler <- function(eigen_values = NULL, model = "factors", nobs = NULL) { nfac <- .nBentler( x = eigen_values, N = nobs, model = model, alpha = 0.05, details = FALSE )$nFactors .data_frame( n_Factors = as.numeric(nfac), Method = "Bentler", Family = "Bentler" ) } #' Cattell-Nelson-Gorsuch CNG Indices #' @keywords internal .n_factors_cng <- function(eigen_values = NULL, model = "factors") { if (length(eigen_values) < 6) { nfac <- NA } else { nfac <- nFactors::nCng(x = eigen_values, cor = TRUE, model = model)$nFactors } .data_frame( n_Factors = as.numeric(nfac), Method = "CNG", Family = "CNG" ) } #' Multiple Regression Procedure #' @keywords internal .n_factors_mreg <- function(eigen_values = NULL, model = "factors") { if (length(eigen_values) < 6) { nfac <- NA } else { nfac <- nFactors::nMreg(x = eigen_values, cor = TRUE, model = model)$nFactors } .data_frame( n_Factors = as.numeric(nfac), Method = c("beta", "t", "p"), Family = "Multiple_regression" ) } #' Non Graphical Cattell's Scree Test #' @keywords internal .n_factors_scree <- function(eigen_values = NULL, model = "factors") { nfac <- unlist(nFactors::nScree(x = eigen_values, cor = TRUE, model = model)$Components) .data_frame( n_Factors = as.numeric(nfac), Method = c("Optimal coordinates", "Acceleration factor", "Parallel analysis", "Kaiser criterion"), Family = "Scree" ) } #' Standard Error Scree and Coefficient of Determination Procedures #' @keywords internal .n_factors_sescree <- function(eigen_values = NULL, model = "factors") { nfac <- nFactors::nSeScree(x = eigen_values, cor = TRUE, model = model)$nFactors .data_frame( n_Factors = as.numeric(nfac), Method = c("Scree (SE)", "Scree (R2)"), Family = "Scree_SE" ) } # EGAnet ------------------------ .n_factors_ega <- function(x = NULL, correlation_matrix = NULL, nobs = NULL, eigen_values = NULL, type = "FA") { # Replace with own correlation matrix junk <- utils::capture.output(suppressWarnings(suppressMessages( nfac_glasso <- EGAnet::EGA( correlation_matrix, n = nobs, model = "glasso", plot.EGA = FALSE )$n.dim ))) junk <- utils::capture.output(suppressWarnings(suppressMessages( nfac_TMFG <- .safe( EGAnet::EGA(correlation_matrix, n = nobs, model = "TMFG", plot.EGA = FALSE)$n.dim, NA ) ))) .data_frame( n_Factors = as.numeric(c(nfac_glasso, nfac_TMFG)), Method = c("EGA (glasso)", "EGA (TMFG)"), Family = "EGA" ) } # psych ------------------------ #' @keywords internal .n_factors_parallel <- function(x = NULL, correlation_matrix = NULL, nobs = NULL, type = "FA") { # Altnerative version of parralel analysis # Not used because already included in nFactors if (tolower(type) %in% c("fa", "factor", "efa")) { fa <- "fa" } else { fa <- "pc" } insight::check_if_installed("psych") out <- psych::fa.parallel(correlation_matrix, n.obs = nobs, fa = fa, plot = FALSE, fm = "ml") .data_frame( n_Factors = as.numeric(stats::na.omit(c(out$nfact, out$ncomp))), Method = "Parallel", Family = "psych" ) } #' @keywords internal .n_factors_vss <- function(x = NULL, correlation_matrix = NULL, nobs = NULL, type = "FA", rotation = "varimax", algorithm = "default") { if (algorithm == "default") { if (tolower(type) %in% c("fa", "factor", "efa")) { algorithm <- "minres" } else { algorithm <- "pc" } } insight::check_if_installed("psych") # Compute VSS vss <- psych::VSS( correlation_matrix, n = ncol(x) - 1, n.obs = nobs, rotate = rotation, fm = algorithm, plot = FALSE ) # Format results stats <- vss$vss.stats stats$map <- vss$map stats$n_Factors <- seq_len(nrow(stats)) names(stats) <- gsub("cfit.", "VSS_Complexity_", names(stats)) # Indices vss_1 <- which.max(stats$VSS_Complexity_1) vss_2 <- which.max(stats$VSS_Complexity_2) velicer_MAP <- which.min(stats$map) BIC_reg <- which.min(stats$BIC) BIC_adj <- which.min(stats$SABIC) BIC_reg <- ifelse(length(BIC_reg) == 0, NA, BIC_reg) BIC_adj <- ifelse(length(BIC_adj) == 0, NA, BIC_adj) .data_frame( n_Factors = as.numeric(c(vss_1, vss_2, velicer_MAP, BIC_reg, BIC_adj)), Method = c("VSS complexity 1", "VSS complexity 2", "Velicer's MAP", "BIC", "BIC (adjusted)"), Family = c("VSS", "VSS", "Velicers_MAP", "BIC", "BIC") ) } #' @keywords internal .n_factors_fit <- function(x = NULL, correlation_matrix = NULL, nobs = NULL, type = "FA", rotation = "varimax", algorithm = "default", threshold = 0.1) { if (algorithm == "default") { if (tolower(type) %in% c("fa", "factor", "efa")) { algorithm <- "minres" } else { algorithm <- "pc" } } insight::check_if_installed("psych") rez <- data.frame() for (n in 1:(ncol(correlation_matrix) - 1)) { if (tolower(type) %in% c("fa", "factor", "efa")) { factors <- tryCatch( suppressWarnings( psych::fa( correlation_matrix, nfactors = n, n.obs = nobs, rotate = rotation, fm = algorithm ) ), error = function(e) NA ) } else { factors <- tryCatch( suppressWarnings( psych::pca( correlation_matrix, nfactors = n, n.obs = nobs, rotate = rotation ) ), error = function(e) NA ) } if (all(is.na(factors))) { next } rmsea <- ifelse(is.null(factors$RMSEA), NA, factors$RMSEA[1]) rmsr <- ifelse(is.null(factors$rms), NA, factors$rms) crms <- ifelse(is.null(factors$crms), NA, factors$crms) bic <- ifelse(is.null(factors$BIC), NA, factors$BIC) tli <- ifelse(is.null(factors$TLI), NA, factors$TLI) rez <- rbind( rez, .data_frame( n = n, Fit = factors$fit.off, TLI = tli, RMSEA = rmsea, RMSR = rmsr, CRMS = crms, BIC = bic ) ) } # For fit indices that constantly increase / decrease, we need to find # an "elbow"/"knee". Here we take the first value that reaches 90 percent # of the range between the max and the min (when 'threshold = 0.1'). # Fit if (all(is.na(rez$Fit))) { fit_off <- NA } else { target <- max(rez$Fit, na.rm = TRUE) - threshold * diff(range(rez$Fit, na.rm = TRUE)) fit_off <- rez[!is.na(rez$Fit) & rez$Fit >= target, "n"][1] } # TLI if (all(is.na(rez$TLI))) { TLI <- NA } else { target <- max(rez$TLI, na.rm = TRUE) - threshold * diff(range(rez$TLI, na.rm = TRUE)) TLI <- rez[!is.na(rez$TLI) & rez$TLI >= target, "n"][1] } # RMSEA if (all(is.na(rez$RMSEA))) { RMSEA <- NA } else { target <- min(rez$RMSEA, na.rm = TRUE) + threshold * diff(range(rez$RMSEA, na.rm = TRUE)) RMSEA <- rez[!is.na(rez$RMSEA) & rez$RMSEA <= target, "n"][1] } # RMSR if (all(is.na(rez$RMSR))) { RMSR <- NA } else { target <- min(rez$RMSR, na.rm = TRUE) + threshold * diff(range(rez$RMSR, na.rm = TRUE)) RMSR <- rez[!is.na(rez$RMSR) & rez$RMSR <= target, "n"][1] } # CRMS if (all(is.na(rez$CRMS))) { CRMS <- NA } else { target <- min(rez$CRMS, na.rm = TRUE) + threshold * diff(range(rez$CRMS, na.rm = TRUE)) CRMS <- rez[!is.na(rez$CRMS) & rez$CRMS <= target, "n"][1] } # BIC (this is a penalized method so we can just take the one that minimizes it) BayIC <- ifelse(all(is.na(rez$BIC)), NA, rez[!is.na(rez$BIC) & rez$BIC == min(rez$BIC, na.rm = TRUE), "n"]) .data_frame( n_Factors = c(fit_off, TLI, RMSEA, RMSR, CRMS, BayIC), Method = c("Fit_off", "TLI", "RMSEA", "RMSR", "CRMS", "BIC"), Family = c("Fit", "Fit", "Fit", "Fit", "Fit", "Fit") ) } # PCDimension ------------------------ #' @keywords internal .n_factors_PCDimension <- function(x = NULL, type = "PCA") { # This package is a strict dependency of PCDimension so if users have the # former they should have it insight::check_if_installed(c("ClassDiscovery", "PCDimension")) # Only applies to PCA with full data if (tolower(type) %in% c("fa", "factor", "efa") || !is.data.frame(x)) { return(data.frame()) } # Randomization-Based Methods rez_rnd <- PCDimension::rndLambdaF(x) # Broken-Stick spca <- ClassDiscovery::SamplePCA(t(x)) lambda <- spca@variances[1:(ncol(x) - 1)] rez_bokenstick <- PCDimension::bsDimension(lambda) # Auer-Gervini ag <- PCDimension::AuerGervini(spca) agfuns <- list( twice = PCDimension::agDimTwiceMean, specc = PCDimension::agDimSpectral, km = PCDimension::agDimKmeans, km3 = PCDimension::agDimKmeans3, # tt=PCDimension::agDimTtest, # known to overestimate # cpm=PCDimension::makeAgCpmFun("Exponential"), # known to overestimate tt2 = PCDimension::agDimTtest2, cpt = PCDimension::agDimCPT ) rez_ag <- PCDimension::compareAgDimMethods(ag, agfuns) .data_frame( n_Factors = as.numeric(c(rez_rnd, rez_bokenstick, rez_ag)), Method = c( "Random (lambda)", "Random (F)", "Broken-Stick", "Auer-Gervini (twice)", "Auer-Gervini (spectral)", "Auer-Gervini (kmeans-2)", "AuerGervini (kmeans-3)", "Auer-Gervini (T)", "AuerGervini (CPT)" ), Family = "PCDimension" ) } # Re-implementation of nBentler in nFactors ------------------------ #' @keywords internal .nBentler <- function(x, N, model = model, log = TRUE, alpha = 0.05, cor = TRUE, details = TRUE, ...) { insight::check_if_installed("nFactors") lambda <- nFactors::eigenComputes(x, cor = cor, model = model, ...) if (any(lambda < 0)) { insight::format_error( "These indices are only valid with a principal component solution. So, only positive eigenvalues are permitted." ) } minPar <- c(min(lambda) - abs(min(lambda)) + 0.001, 0.001) maxPar <- c(max(lambda), stats::lm(lambda ~ I(rev(seq_along(lambda))))$coef[2]) n <- N significance <- alpha min.k <- 3 LRT <- .data_frame( q = numeric(length(lambda) - min.k), k = numeric(length(lambda) - min.k), LRT = numeric(length(lambda) - min.k), a = numeric(length(lambda) - min.k), b = numeric(length(lambda) - min.k), p = numeric(length(lambda) - min.k), convergence = numeric(length(lambda) - min.k) ) bentler.n <- 0 for (i in 1:(length(lambda) - min.k)) { temp <- nFactors::bentlerParameters( x = lambda, N = n, nFactors = i, log = log, cor = cor, minPar = minPar, maxPar = maxPar, graphic = FALSE ) LRT[i, 3] <- temp$lrt LRT[i, 4] <- ifelse(is.null(temp$coef[1]), NA, temp$coef[1]) LRT[i, 5] <- ifelse(is.null(temp$coef[2]), NA, temp$coef[2]) LRT[i, 6] <- ifelse(is.null(temp$p.value), NA, temp$p.value) LRT[i, 7] <- ifelse(is.null(temp$convergence), NA, temp$convergence) LRT[i, 2] <- i LRT[i, 1] <- length(lambda) - i } # LRT <- LRT[order(LRT[,1],decreasing = TRUE),] for (i in 1:(length(lambda) - min.k)) { if (i == 1) bentler.n <- bentler.n + as.numeric(LRT$p[i] <= significance) if (i > 1 && LRT$p[i - 1] <= 0.05) { bentler.n <- bentler.n + as.numeric(LRT$p[i] <= significance) } } if (bentler.n == 0) { bentler.n <- length(lambda) } if (isTRUE(details)) { details <- LRT } else { details <- NULL } res <- list(detail = details, nFactors = bentler.n) class(res) <- c("nFactors", "list") res } parameters/R/bootstrap_model-emmeans.R0000644000176200001440000000235314317274256017577 0ustar liggesusers#' @keywords emmeans_methods emm_basis.bootstrap_model <- function(object, trms, xlev, grid, ...) { insight::check_if_installed("emmeans") model <- attr(object, "original_model") emb <- emmeans::emm_basis(model, trms, xlev, grid, ...) if (ncol(object) != ncol(emb$V) || !all(colnames(object) == colnames(emb$V))) { insight::format_error( "Oops! Cannot create the reference grid. Please open an issue at {.url https://github.com/easystats/parameters/issues}." ) } emb$post.beta <- as.matrix(object) emb$misc$is_boot <- TRUE emb } #' @keywords emmeans_methods recover_data.bootstrap_model <- function(object, ...) { insight::check_if_installed("emmeans") model <- attr(object, "original_model") emmeans::recover_data(model, ...) } #' @keywords emmeans_methods emm_basis.bootstrap_parameters <- function(object, trms, xlev, grid, ...) { insight::check_if_installed("emmeans") model <- attr(object, "boot_samples") emmeans::emm_basis(model, trms, xlev, grid, ...) } #' @keywords emmeans_methods recover_data.bootstrap_parameters <- function(object, ...) { insight::check_if_installed("emmeans") model <- attr(object, "boot_samples") emmeans::recover_data(model, ...) } parameters/R/methods_cgam.R0000644000176200001440000001267114717111737015413 0ustar liggesusers#' @title Parameters from Generalized Additive (Mixed) Models #' @name model_parameters.cgam #' #' @description Extract and compute indices and measures to describe parameters #' of generalized additive models (GAM(M)s). #' #' @param model A gam/gamm model. #' @inheritParams model_parameters.default #' #' @seealso [insight::standardize_names()] to rename #' columns into a consistent, standardized naming scheme. #' #' @details The reporting of degrees of freedom *for the spline terms* #' slightly differs from the output of `summary(model)`, for example in the #' case of `mgcv::gam()`. The *estimated degrees of freedom*, column #' `edf` in the summary-output, is named `df` in the returned data #' frame, while the column `df_error` in the returned data frame refers to #' the residual degrees of freedom that are returned by `df.residual()`. #' Hence, the values in the the column `df_error` differ from the column #' `Ref.df` from the summary, which is intentional, as these reference #' degrees of freedom \dQuote{is not very interpretable} #' ([web](https://stat.ethz.ch/pipermail/r-help/2019-March/462135.html)). #' #' @return A data frame of indices related to the model's parameters. #' #' @examples #' library(parameters) #' if (require("mgcv")) { #' dat <- gamSim(1, n = 400, dist = "normal", scale = 2) #' model <- gam(y ~ s(x0) + s(x1) + s(x2) + s(x3), data = dat) #' model_parameters(model) #' } #' @export model_parameters.cgam <- function(model, ci = 0.95, ci_method = "residual", bootstrap = FALSE, iterations = 1000, standardize = NULL, exponentiate = FALSE, p_adjust = NULL, keep = NULL, drop = NULL, verbose = TRUE, ...) { # validation check, warn if unsupported argument is used. dot_args <- .check_dots( dots = list(...), not_allowed = c("vcov", "vcov_args", "component"), class(model)[1], verbose = verbose ) # Processing if (bootstrap) { params <- bootstrap_parameters( model, iterations = iterations, ci = ci, ... ) } else { fun_args <- list( model, ci = ci, ci_method = ci_method, component = "all", merge_by = c("Parameter", "Component"), standardize = standardize, p_adjust = p_adjust, keep_parameters = keep, drop_parameters = drop, verbose = verbose, vcov = NULL, vcov_args = NULL ) fun_args <- c(fun_args, dot_args) params <- do.call(".extract_parameters_generic", fun_args) } # fix statistic column if ("t" %in% names(params) && !is.null(params$Component) && "smooth_terms" %in% params$Component) { names(params)[names(params) == "t"] <- "t / F" } # fix estimated df column if (inherits(model, c("gam", "cgam", "scam", "rqss")) && "smooth_terms" %in% params$Component && !("df" %in% names(params))) { # nolint params$df <- params$Coefficient params$df[params$Component != "smooth_terms"] <- NA params$df_error[params$Component == "smooth_terms"] <- NA params$Coefficient[params$Component == "smooth_terms"] <- NA # reorder insert_column <- which(names(params) == "df_error") if (!length(insert_column)) { insert_column <- which(names(params) == "p") } if (length(insert_column)) { n_col <- ncol(params) params <- params[c(1:(insert_column - 1), n_col, insert_column:(n_col - 1))] } } else if (all(c("df", "df_error") %in% names(params)) && "smooth_terms" %in% params$Component) { params$df_error[params$Component == "smooth_terms"] <- NA } # exponentiate coefficients and SE/CI, if requested params <- .exponentiate_parameters(params, model, exponentiate) params <- .add_model_parameters_attributes( params, model, ci, exponentiate, p_adjust = p_adjust, verbose = verbose, ... ) if ("CI" %in% colnames(params)) { params$CI[is.na(params$CI_low)] <- NA } attr(params, "object_name") <- insight::safe_deparse_symbol(substitute(model)) class(params) <- c("parameters_model", "see_parameters_model", class(params)) params } #' @export p_value.cgam <- function(model, component = "all", ...) { component <- insight::validate_argument( component, c("all", "conditional", "smooth_terms") ) params <- insight::get_parameters(model, component = "all") cs <- summary(model) p <- as.vector(cs$coefficients[, 4]) if (!is.null(cs$coefficients2)) p <- c(p, as.vector(cs$coefficients2[, "p.value"])) out <- .data_frame( Parameter = params$Parameter, Component = params$Component, p = as.vector(p) ) if (component != "all") { out <- out[out$Component == component, ] } out } #' @export standard_error.cgam <- function(model, ...) { sc <- summary(model) se <- as.vector(sc$coefficients[, "StdErr"]) params <- insight::get_parameters(model, component = "all") if (!is.null(sc$coefficients2)) se <- c(se, rep(NA, nrow(sc$coefficients2))) .data_frame( Parameter = params$Parameter, SE = se, Component = params$Component ) } parameters/R/standardize_posteriors.R0000644000176200001440000000632114716604200017544 0ustar liggesusers#' @rdname standardize_parameters #' @export #' @aliases standardise_posteriors standardize_posteriors <- function(model, method = "refit", robust = FALSE, two_sd = FALSE, include_response = TRUE, verbose = TRUE, ...) { object_name <- insight::safe_deparse_symbol(substitute(model)) m_info <- .get_model_info(model, ...) include_response <- include_response && .safe_to_standardize_response(m_info, verbose = verbose) if (method == "refit") { model <- datawizard::standardize( model, robust = robust, two_sd = two_sd, include_response = include_response, verbose = verbose, m_info = m_info ) } pars <- insight::get_parameters(model) if (method %in% c("posthoc", "smart", "basic", "classic", "pseudo")) { pars <- .standardize_posteriors_posthoc(pars, method, model, m_info, robust, two_sd, include_response, verbose) method <- attr(pars, "std_method") robust <- attr(pars, "robust") } ## attributes attr(pars, "std_method") <- method attr(pars, "two_sd") <- two_sd attr(pars, "robust") <- robust attr(pars, "include_response") <- include_response attr(pars, "object_name") <- object_name class(pars) <- c("parameters_standardized", class(pars)) pars } #' @export standardise_posteriors <- standardize_posteriors #' @keywords internal .standardize_posteriors_posthoc <- function(pars, method, model, mi, robust, two_sd, include_response, verbose) { # validation check for "pseudo" method <- .should_pseudo(method, model) method <- .cant_smart_or_posthoc(method, model, mi, pars$Parameter) if (robust && method == "pseudo") { insight::format_alert("`robust` standardization not available for `pseudo` method.") robust <- FALSE } ## Get scaling factors deviations <- standardize_info( model, robust = robust, include_pseudo = method == "pseudo", two_sd = two_sd, model_info = mi ) i <- match(deviations$Parameter, colnames(pars)) pars <- pars[, i] if (method == "basic") { # nolint col_dev_resp <- "Deviation_Response_Basic" col_dev_pred <- "Deviation_Basic" } else if (method == "posthoc") { col_dev_resp <- "Deviation_Response_Basic" col_dev_pred <- "Deviation_Smart" } else if (method == "smart") { col_dev_resp <- "Deviation_Response_Smart" col_dev_pred <- "Deviation_Smart" } else if (method == "pseudo") { col_dev_resp <- "Deviation_Response_Pseudo" col_dev_pred <- "Deviation_Pseudo" } else { insight::format_error("`method` must be one of \"basic\", \"posthoc\", \"smart\" or \"pseudo\".") } .dev_pred <- deviations[[col_dev_pred]] .dev_resp <- deviations[[col_dev_resp]] if (!include_response) .dev_resp <- 1 .dev_factor <- .dev_pred / .dev_resp # Sapply standardization pars <- t(t(pars) * .dev_factor) pars <- as.data.frame(pars) attr(pars, "std_method") <- method attr(pars, "two_sd") <- two_sd attr(pars, "robust") <- robust pars } parameters/R/ci_profile_boot.R0000644000176200001440000001143614736731407016120 0ustar liggesusers.ci_profiled <- function(model, ci) { glm_ci <- tryCatch( { out <- as.data.frame( suppressWarnings(stats::confint(model, level = ci)), stringsAsFactors = FALSE ) names(out) <- c("CI_low", "CI_high") out$CI <- ci out$Parameter <- insight::get_parameters(model, effects = "fixed", component = "conditional", verbose = FALSE )$Parameter out <- out[c("Parameter", "CI", "CI_low", "CI_high")] rownames(out) <- NULL out }, error = function(e) { NULL } ) if (is.null(glm_ci)) { glm_ci <- .ci_generic(model, ci = ci) } glm_ci } # we need this function for models where confint and get_parameters return # different length (e.g. as for "polr" models) .ci_profiled2 <- function(model, ci) { glm_ci <- tryCatch( { out <- as.data.frame(stats::confint(model, level = ci), stringsAsFactors = FALSE) names(out) <- c("CI_low", "CI_high") out$CI <- ci out$Parameter <- .remove_backticks_from_string(rownames(out)) out <- out[c("Parameter", "CI", "CI_low", "CI_high")] rownames(out) <- NULL out }, error = function(e) { NULL } ) if (is.null(glm_ci)) { glm_ci <- .ci_generic(model, ci = ci) } glm_ci } #' @keywords internal .ci_profile_merMod <- function(x, ci, profiled, ...) { out <- as.data.frame(suppressWarnings(stats::confint(profiled, level = ci, ...))) rownames(out) <- gsub("`", "", rownames(out), fixed = TRUE) out <- out[rownames(out) %in% insight::find_parameters(x, effects = "fixed")$conditional, ] names(out) <- c("CI_low", "CI_high") # Clean up out$Parameter <- row.names(out) out$CI <- ci out <- out[c("Parameter", "CI", "CI_low", "CI_high")] row.names(out) <- NULL out } #' @keywords internal .ci_profile_glmmTMB <- function(x, ci, profiled, component, ...) { # make sure "..." doesn't pass invalid arguments to package TMB dot_args <- .check_profile_uniroot_args(...) if (is.null(profiled)) { fun_args <- list(x, method = "profile", level = ci, dot_args) out <- as.data.frame(do.call(stats::confint, fun_args)) } else { fun_args <- list(profiled, level = ci, dot_args) out <- .safe(as.data.frame(do.call(stats::confint, fun_args))) if (is.null(out)) { fun_args <- list(x, method = "profile", level = ci, dot_args) out <- as.data.frame(do.call(stats::confint, fun_args)) } } .process_glmmTMB_CI(x, out, ci, component) } #' @keywords internal .ci_uniroot_glmmTMB <- function(x, ci, component, ...) { # make sure "..." doesn't pass invalid arguments to package TMB dot_args <- .check_profile_uniroot_args(...) fun_args <- list(x, level = ci, method = "uniroot", dot_args) out <- as.data.frame(do.call(stats::confint, fun_args)) .process_glmmTMB_CI(x, out, ci, component) } .check_profile_uniroot_args <- function(...) { .profile_formals <- c( "cl", "fitted", "h", "level_max", "lincomb", "maxit", "name", "ncpus", "npts", "obj", "parallel", "parm", "parm.range", "slice", "stderr", "stepfac", "trace", "ystep", "ytol" ) dots <- list(...) dot_args <- intersect(names(dots), .profile_formals) out <- dots[dot_args] if (!length(out)) { return(NULL) } out } .process_glmmTMB_CI <- function(x, out, ci, component) { rownames(out) <- gsub("`", "", rownames(out), fixed = TRUE) pars <- insight::get_parameters(x, effects = "fixed", component = component, verbose = FALSE ) param_names <- switch(component, conditional = pars$Parameter, zi = , zero_inflated = paste0("zi~", pars$Parameter), c( pars$Parameter[pars$Component == "conditional"], paste0("zi~", pars$Parameter[pars$Component == "zero_inflated"]) ) ) out <- out[rownames(out) %in% param_names, ] names(out) <- c("CI_low", "CI_high") # Clean up out$Parameter <- pars$Parameter out$CI <- ci out <- out[c("Parameter", "CI", "CI_low", "CI_high")] out$Component <- pars$Component row.names(out) <- NULL out } #' @keywords internal .ci_boot_merMod <- function(x, ci, iterations = 500, effects = "fixed", ...) { insight::check_if_installed("lme4") # Compute out <- suppressWarnings(suppressMessages(as.data.frame( lme4::confint.merMod(x, level = ci, method = "boot", nsim = iterations, ...) ))) rownames(out) <- gsub("`", "", rownames(out), fixed = TRUE) out <- out[rownames(out) %in% insight::find_parameters(x, effects = "fixed")$conditional, ] names(out) <- c("CI_low", "CI_high") # Clean up out$Parameter <- row.names(out) out$CI <- ci out <- out[c("Parameter", "CI", "CI_low", "CI_high")] row.names(out) <- NULL out } parameters/R/methods_model_fit.R0000644000176200001440000000326114736731407016444 0ustar liggesusers## tidymodels (.model_fit) # model parameters --------------------- #' @export model_parameters.model_fit <- function(model, ci = 0.95, effects = "fixed", component = "conditional", ci_method = "profile", bootstrap = FALSE, iterations = 1000, standardize = NULL, exponentiate = FALSE, p_adjust = NULL, verbose = TRUE, ...) { model_parameters( model$fit, ci = ci, effects = effects, component = component, ci_method = ci_method, bootstrap = bootstrap, iterations = iterations, standardize = standardize, exponentiate = exponentiate, p_adjust = p_adjust, verbose = verbose, ... ) } # ci ------------------ #' @export ci.model_fit <- function(x, ci = 0.95, method = NULL, ...) { ci(x$fit, ci = ci, method = method, ...) } # standard error ------------------ #' @export standard_error.model_fit <- function(model, ...) { standard_error(model$fit, ...) } # p values ------------------ #' @export p_value.model_fit <- function(model, ...) { p_value(model$fit, ...) } # simulate model ------------------ #' @export simulate_model.model_fit <- function(model, iterations = 1000, ...) { simulate_model(model$fit, iterations = iterations, ...) } parameters/R/methods_speedglm.R0000644000176200001440000000032014355513424016265 0ustar liggesusers#' @export p_value.speedlm <- function(model, ...) { p <- p_value.default(model, ...) if (!is.numeric(p$p)) { p$p <- tryCatch(as.numeric(as.character(p$p)), error = function(e) p$p) } p } parameters/R/methods_eflm.R0000644000176200001440000000042414030655331015407 0ustar liggesusers# eflm (.eglm) ----------------- #' @export p_value.eglm <- function(model, ...) { stats <- stats::coef(summary(model)) params <- insight::get_parameters(model) .data_frame( Parameter = params$Parameter, p = as.numeric(as.vector(stats[, 4])) ) } parameters/R/sort_parameters.R0000644000176200001440000000275614477616760016207 0ustar liggesusers#' Sort parameters by coefficient values #' #' @param x A data frame or a `parameters_model` object. #' @param ... Arguments passed to or from other methods. #' #' @examples #' # creating object to sort (can also be a regular data frame) #' mod <- model_parameters(stats::lm(wt ~ am * cyl, data = mtcars)) #' #' # original output #' mod #' #' # sorted outputs #' sort_parameters(mod, sort = "ascending") #' sort_parameters(mod, sort = "descending") #' #' @return A sorted data frame or original object. #' #' @export sort_parameters <- function(x, ...) { UseMethod("sort_parameters") } #' @rdname sort_parameters #' #' @param sort If `"none"` (default) do not sort, `"ascending"` sort by #' increasing coefficient value, or `"descending"` sort by decreasing #' coefficient value. #' @param column The column containing model parameter estimates. This will be #' `"Coefficient"` (default) in *easystats* packages, `"estimate"` in *broom* #' package, etc. #' #' @export sort_parameters.default <- function(x, sort = "none", column = "Coefficient", ...) { sort <- match.arg(tolower(sort), choices = c("none", "ascending", "descending")) if (sort == "none") { return(x) } # new row indices to use for sorting new_row_order <- switch(sort, ascending = order(x[[column]], decreasing = FALSE), descending = order(x[[column]], decreasing = TRUE) ) x[new_row_order, ] } #' @export sort_parameters.data.frame <- sort_parameters.default parameters/R/methods_logistf.R0000644000176200001440000000212714716604200016135 0ustar liggesusers# model_parameters -------------------- #' @export model_parameters.logistf <- model_parameters.glm #' @export model_parameters.flic <- model_parameters.glm #' @export model_parameters.flac <- model_parameters.glm # ci -------------------- #' @export ci.logistf <- ci.glm #' @export ci.flic <- ci.glm #' @export ci.flac <- ci.glm # SE -------------------- #' @export standard_error.logistf <- function(model, ...) { vc <- insight::get_varcov(model, ...) se <- sqrt(diag(vc)) .data_frame( Parameter = .remove_backticks_from_string(names(se)), SE = as.vector(se) ) } #' @export standard_error.flic <- standard_error.logistf #' @export standard_error.flac <- standard_error.logistf # p -------------------- #' @export p_value.logistf <- function(model, ...) { utils::capture.output(s <- summary(model)) # nolint .data_frame( Parameter = .remove_backticks_from_string(names(s$coefficients)), p = as.vector(s$prob) ) } #' @export p_value.flic <- p_value.logistf #' @export p_value.flac <- p_value.logistf parameters/R/methods_mlm.R0000644000176200001440000001667614717111737015302 0ustar liggesusers# classes: .mlm #################### .mlm #' Parameters from multinomial or cumulative link models #' #' Parameters from multinomial or cumulative link models #' #' @param model A model with multinomial or categorical response value. #' @inheritParams model_parameters.default #' @inheritParams simulate_model #' #' @details Multinomial or cumulative link models, i.e. models where the #' response value (dependent variable) is categorical and has more than two #' levels, usually return coefficients for each response level. Hence, the #' output from `model_parameters()` will split the coefficient tables #' by the different levels of the model's response. #' #' @seealso [insight::standardize_names()] to rename #' columns into a consistent, standardized naming scheme. #' #' @inheritSection model_parameters.zcpglm Model components #' #' @examplesIf require("brglm2", quietly = TRUE) #' data("stemcell", package = "brglm2") #' model <- brglm2::bracl( #' research ~ as.numeric(religion) + gender, #' weights = frequency, #' data = stemcell, #' type = "ML" #' ) #' model_parameters(model) #' @return A data frame of indices related to the model's parameters. #' @inheritParams simulate_model #' @export model_parameters.mlm <- function(model, ci = 0.95, vcov = NULL, vcov_args = NULL, bootstrap = FALSE, iterations = 1000, standardize = NULL, exponentiate = FALSE, p_adjust = NULL, keep = NULL, drop = NULL, verbose = TRUE, ...) { out <- .model_parameters_generic( model = model, ci = ci, vcov = vcov, vcov_args = vcov_args, bootstrap = bootstrap, iterations = iterations, merge_by = c("Parameter", "Response"), standardize = standardize, exponentiate = exponentiate, p_adjust = p_adjust, keep_parameters = keep, drop_parameters = drop, ... ) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(model)) out } #' @export standard_error.mlm <- function(model, vcov = NULL, vcov_args = NULL, ...) { se <- standard_error.default(model, vcov = vcov, vcov_args = vcov_args, ...) est <- insight::get_parameters(model, ...) # assumes se and est are sorted the same way if (isTRUE(nrow(se) == nrow(est)) && "Parameter" %in% colnames(est) && "Response" %in% colnames(est)) { se$Parameter <- est$Parameter se$Response <- est$Response return(se) } else { # manually if (!is.null(vcov)) { insight::format_warning( "Unable to extract the variance-covariance matrix requested in `vcov`." ) } cs <- stats::coef(summary(model)) se <- lapply(names(cs), function(x) { params <- cs[[x]] .data_frame( Parameter = rownames(params), SE = params[, "Std. Error"], Response = gsub("^Response (.*)", "\\1", x) ) }) se <- insight::text_remove_backticks(do.call(rbind, se), verbose = FALSE) return(se) } } #' @export p_value.mlm <- function(model, vcov = NULL, vcov_args = NULL, ...) { out <- p_value.default(model, vcov = vcov, vcov_args = vcov_args, ...) est <- insight::get_parameters(model, ...) # assumes out and est are sorted the same way if (isTRUE(nrow(out) == nrow(est)) && "Parameter" %in% colnames(est) && "Response" %in% colnames(est)) { out$Parameter <- est$Parameter out$Response <- est$Response # manually } else { if (!is.null(vcov)) { insight::format_warning( "Unable to extract the variance-covariance matrix requested in `vcov`." ) } cs <- stats::coef(summary(model)) p <- lapply(names(cs), function(x) { params <- cs[[x]] .data_frame( Parameter = rownames(params), p = params[, "Pr(>|t|)"], Response = gsub("^Response (.*)", "\\1", x) ) }) out <- insight::text_remove_backticks(do.call(rbind, p), verbose = FALSE) } return(out) } #' @export ci.mlm <- function(x, vcov = NULL, vcov_args = NULL, ci = 0.95, ...) { # .ci_generic may not handle weights properly (not sure) if (is.null(insight::find_weights(x)) && is.null(vcov)) { out <- lapply(ci, function(i) { .ci <- stats::confint(x, level = i, ...) rn <- rownames(.ci) .data_frame( Parameter = gsub("([^\\:]+)(\\:)(.*)", "\\3", rn), CI = i, CI_low = .ci[, 1], CI_high = .ci[, 2], Response = gsub("([^\\:]+)(\\:)(.*)", "\\1", rn) ) }) out <- insight::text_remove_backticks(do.call(rbind, out), verbose = FALSE) # .ci_generic does handle `vcov` correctly. } else { out <- .data_frame( .ci_generic( x, ci = ci, vcov = vcov, vcov_args = vcov_args, ... ) ) resp <- insight::get_parameters(x)$Response if (!"Response" %in% colnames(out) && nrow(out) == length(resp)) { out[["Response"]] <- resp } else if (!isTRUE(all(out$Response == resp))) { insight::format_error( "Unable to assign labels to the model's parameters.", "Please report this problem to the {.pkg parameters} issue tracker:", "{.url https://github.com/easystats/parameters/issues}" ) } } out } #' @export simulate_model.mlm <- function(model, iterations = 1000, ...) { responses <- insight::find_response(model, combine = FALSE) out <- .simulate_model(model, iterations, component = "conditional", effects = "fixed", ...) cn <- paste0(colnames(out), rep(responses, each = length(colnames(out)) / length(responses))) colnames(out) <- cn class(out) <- c("parameters_simulate_model", class(out)) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(model)) out } #' @export simulate_parameters.mlm <- function(model, iterations = 1000, centrality = "median", ci = 0.95, ci_method = "quantile", test = "p-value", ...) { sim_data <- simulate_model(model, iterations = iterations, ...) out <- .summary_bootstrap( data = sim_data, test = test, centrality = centrality, ci = ci, ci_method = ci_method, ... ) out$Response <- NA responses <- insight::find_response(model, combine = FALSE) for (i in responses) { out$Response[grepl(paste0(i, "$"), out$Parameter)] <- i out$Parameter <- gsub(paste0(i, "$"), "", out$Parameter) } class(out) <- c("parameters_simulate", "see_parameters_simulate", class(out)) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(model)) attr(out, "object_class") <- class(model) attr(out, "iterations") <- iterations attr(out, "ci") <- ci attr(out, "ci_method") <- ci_method attr(out, "centrality") <- centrality out } parameters/R/methods_mixmod.R0000644000176200001440000000432014717111737015771 0ustar liggesusers#' @export model_parameters.MixMod <- model_parameters.glmmTMB #' @export ci.MixMod <- function(x, ci = 0.95, component = c("all", "conditional", "zi", "zero_inflated"), verbose = TRUE, ...) { component <- match.arg(component) if (is.null(.check_component(x, component, verbose = verbose))) { return(NULL) } .ci_generic( model = x, ci = ci, dof = Inf, component = component, ... ) } #' @export standard_error.MixMod <- function(model, effects = "fixed", component = "all", verbose = TRUE, ...) { component <- match.arg(component, choices = c("all", "conditional", "zi", "zero_inflated")) effects <- match.arg(effects, choices = c("fixed", "random")) if (effects == "random") { insight::check_if_installed("lme4") rand.se <- lme4::ranef(model, post_vars = TRUE) vars.m <- attr(rand.se, "post_vars") all_names <- attributes(rand.se)$dimnames if (dim(vars.m[[1]])[1] == 1) { rand.se <- sqrt(unlist(vars.m)) } else { rand.se <- do.call( rbind, lapply(vars.m, function(.x) t(as.data.frame(sqrt(diag(.x))))) ) rownames(rand.se) <- all_names[[1]] colnames(rand.se) <- all_names[[2]] rand.se <- list(rand.se) names(rand.se) <- insight::find_random(model, flatten = TRUE) } rand.se } else { if (is.null(.check_component(model, component, verbose = verbose))) { return(NULL) } vc <- insight::get_varcov(model, effects = "fixed", component = "all", ...) se <- sqrt(diag(vc)) x <- .data_frame( Parameter = names(se), SE = as.vector(se), Component = "conditional" ) zi_parms <- startsWith(x$Parameter, "zi_") if (any(zi_parms)) { x$Component[zi_parms] <- "zero_inflated" x$Parameter[zi_parms] <- gsub("^zi_(.*)", "\\1", x$Parameter[zi_parms]) } .filter_component(x, component) } } #' @export simulate_model.MixMod <- simulate_model.glmmTMB parameters/R/extract_parameters.R0000644000176200001440000007251515073721627016662 0ustar liggesusers# generic function ------------------------------------------------------ #' @keywords internal .extract_parameters_generic <- function( model, ci, component, merge_by = c("Parameter", "Component"), standardize = NULL, effects = "fixed", ci_method = NULL, p_adjust = NULL, wb_component = FALSE, verbose = TRUE, keep_component_column = FALSE, keep_parameters = NULL, drop_parameters = NULL, include_sigma = TRUE, include_info = FALSE, vcov = NULL, vcov_args = NULL, ... ) { dots <- list(...) # ==== check if standardization is required and package available if (isTRUE(standardize)) { if (verbose) { insight::format_alert( "`standardize` must be on of \"refit\", \"posthoc\", \"basic\", \"smart\" or \"pseudo\"." ) } standardize <- NULL } # ==== model exceptions if (inherits(model, c("crq", "crqs"))) { merge_by <- c("Parameter", "Component") } # ==== for refit, we completely refit the model, than extract parameters, ci etc. as usual if (isTRUE(standardize == "refit")) { fun_args <- c(list(model, verbose = FALSE), dots) # argument name conflict with deprecated `robust` fun_args[["robust"]] <- NULL fun <- datawizard::standardize model <- do.call(fun, fun_args) } parameters <- insight::get_parameters( model, effects = effects, component = component, verbose = FALSE ) statistic <- insight::get_statistic(model, component = component) # check if all estimates are non-NA parameters <- .check_rank_deficiency(model, parameters) # ==== check if we really have a component column if (!("Component" %in% names(parameters)) && "Component" %in% merge_by) { merge_by <- setdiff(merge_by, "Component") } if (!("Group" %in% names(parameters)) && "Group" %in% merge_by) { merge_by <- setdiff(merge_by, "Group") } # ==== check Degrees of freedom if (!.dof_method_ok(model, ci_method, type = "ci_method")) { ci_method <- NULL } # ==== for ordinal models, first, clean parameter names and then indicate # intercepts (alpha-coefficients) in the component column if (inherits(model, c("polr", "svyolr"))) { intercept_groups <- grep("Intercept:", parameters$Parameter, fixed = TRUE) parameters$Parameter <- gsub("Intercept: ", "", parameters$Parameter, fixed = TRUE) statistic$Parameter <- gsub("Intercept: ", "", statistic$Parameter, fixed = TRUE) } else if (inherits(model, "clm") && !is.null(model$alpha)) { intercept_groups <- rep( c("intercept", "location", "scale"), lengths(model[c("alpha", "beta", "zeta")]) ) } else if (inherits(model, "clm2") && !is.null(model$Alpha)) { intercept_groups <- rep( c("intercept", "location", "scale"), lengths(model[c("Alpha", "beta", "zeta")]) ) } else if (inherits(model, "ordinal_weightit")) { intercept_groups <- rep("conditional", nrow(parameters)) intercept_groups[grep("|", parameters$Parameter, fixed = TRUE)] <- "intercept" } else { intercept_groups <- NULL } original_order <- parameters$.id <- seq_len(nrow(parameters)) # column name for coefficients, non-standardized coef_col <- "Coefficient" # ==== CI - only if we don't already have CI for std. parameters ci_cols <- NULL if (!is.null(ci)) { # set up arguments for CI function fun_args <- list( model, ci = ci, component = component, vcov = vcov, vcov_args = vcov_args, verbose = verbose ) fun_args <- c(fun_args, dots) # add method argument if provided if (!is.null(ci_method)) { fun_args[["method"]] <- ci_method } ci_df <- suppressMessages(do.call("ci", fun_args)) # success? merge CI into parameters if (!is.null(ci_df)) { # for multiple CI columns, reshape CI-dataframe to match parameters df if (length(ci) > 1) { ci_df <- datawizard::reshape_ci(ci_df) } # remember names of CI columns, used for later sorting of columns ci_cols <- names(ci_df)[!names(ci_df) %in% c("CI", merge_by)] parameters <- merge(parameters, ci_df, by = merge_by, sort = FALSE) } } # ==== p value fun_args <- list( model, method = ci_method, effects = effects, verbose = verbose, component = component, vcov = vcov, vcov_args = vcov_args ) fun_args <- c(fun_args, dots) pval <- do.call("p_value", fun_args) # success? merge p-values into parameters if (!is.null(pval)) { parameters <- merge(parameters, pval, by = merge_by, sort = FALSE) } # ==== standard error - only if we don't already have SE for std. parameters std_err <- NULL fun_args <- list( model, effects = effects, component = component, verbose = verbose, vcov = vcov, vcov_args = vcov_args ) fun_args <- c(fun_args, dots) if (!is.null(ci_method)) { fun_args[["method"]] <- ci_method } std_err <- do.call("standard_error", fun_args) # success? merge SE into parameters if (!is.null(std_err)) { parameters <- merge(parameters, std_err, by = merge_by, sort = FALSE) } # ==== test statistic - fix values for robust vcov if (!is.null(vcov)) { parameters$Statistic <- parameters$Estimate / parameters$SE } else if (!is.null(statistic)) { parameters <- merge(parameters, statistic, by = merge_by, sort = FALSE) } # ==== degrees of freedom if (is.null(ci_method)) { df_error <- insight::get_df(x = model, type = "wald", verbose = FALSE) } else { df_error <- insight::get_df(x = model, type = ci_method, verbose = FALSE) } if ( !is.null(df_error) && (length(df_error) == 1 || length(df_error) == nrow(parameters)) ) { if (length(df_error) == 1) { parameters$df_error <- df_error } else { # order may have changed due to merging, so make sure # df are in correct order. parameters$df_error <- df_error[order(parameters$.id)] } } # ==== Rematch order after merging parameters <- parameters[match(original_order, parameters$.id), ] # ==== Renaming if ("Statistic" %in% colnames(parameters)) { stat_type <- attr(statistic, "statistic", exact = TRUE) if (!is.null(stat_type)) { colnames(parameters) <- gsub( "Statistic", gsub("(-|\\s)statistic", "", stat_type), colnames(parameters), fixed = TRUE ) colnames(parameters) <- gsub( "chi-squared", "Chi2", colnames(parameters), fixed = TRUE ) } } colnames(parameters) <- gsub("(c|C)hisq", "Chi2", colnames(parameters)) colnames(parameters) <- gsub( "Estimate", "Coefficient", colnames(parameters), fixed = TRUE ) # ==== add intercept groups for ordinal models if (inherits(model, c("polr", "svyolr")) && !is.null(intercept_groups)) { parameters$Component <- "beta" parameters$Component[intercept_groups] <- "alpha" } else if ( inherits(model, c("clm", "clm2", "ordinal_weightit")) && !is.null(intercept_groups) ) { parameters$Component <- intercept_groups } # ==== remove Component / Effects column if not needed if ( !is.null(parameters$Component) && insight::has_single_value(parameters$Component, remove_na = TRUE) && !keep_component_column ) { parameters$Component <- NULL } if ( (!is.null(parameters$Effects) && insight::n_unique(parameters$Effects) == 1) || effects == "fixed" ) { parameters$Effects <- NULL } # ==== filter parameters, if requested if (!is.null(keep_parameters) || !is.null(drop_parameters)) { parameters <- .filter_parameters( parameters, keep = keep_parameters, drop = drop_parameters, verbose = verbose ) } # ==== adjust p-values? if (!is.null(p_adjust)) { parameters <- .p_adjust(parameters, p_adjust, model, verbose) } # ==== remove all complete-missing cases parameters <- parameters[apply(parameters, 1, function(i) !all(is.na(i))), ] # ==== add within/between attributes if (inherits(model, c("glmmTMB", "MixMod")) && isTRUE(wb_component)) { parameters <- .add_within_between_effects(model, parameters) } # ==== Std Coefficients for other methods than "refit" if (!is.null(standardize) && !isTRUE(standardize == "refit")) { # give minimal attributes required for standardization temp_pars <- parameters class(temp_pars) <- c("parameters_model", class(temp_pars)) attr(temp_pars, "ci") <- ci attr(temp_pars, "object_name") <- model # pass the model as is (this is a cheat - teehee!) std_parms <- standardize_parameters(temp_pars, method = standardize) parameters$Std_Coefficient <- std_parms$Std_Coefficient parameters$SE <- attr(std_parms, "standard_error") if (!is.null(ci)) { parameters$CI_low <- std_parms$CI_low parameters$CI_high <- std_parms$CI_high } coef_col <- "Std_Coefficient" } # ==== Reorder # fmt: skip col_order <- c( "Parameter", coef_col, "SE", ci_cols, "t", "z", "t / F", "t/F", "z / Chisq", "z/Chisq", "z / Chi2", "z/Chi2", "F", "Chi2", "chisq", "chi-squared", "Statistic", "df", "df_error", "p", "Component", "Response", "Effects" ) parameters <- parameters[col_order[col_order %in% names(parameters)]] # ==== add sigma and residual df if (isTRUE(include_sigma) || isTRUE(include_info)) { parameters <- .add_sigma_residual_df(parameters, model) } rownames(parameters) <- NULL parameters } # helper ---------------- .add_sigma_residual_df <- function(params, model) { if (is.null(params$Component) || !"sigma" %in% params$Component) { sig <- .safe(suppressWarnings(insight::get_sigma(model, ci = NULL, verbose = FALSE))) attr(params, "sigma") <- as.numeric(sig) resdf <- .safe(suppressWarnings(insight::get_df(x = model, type = "residual"))) attr(params, "residual_df") <- as.numeric(resdf) } params } .filter_parameters <- function(params, keep = NULL, drop = NULL, verbose = TRUE) { if (!is.null(keep) && is.list(keep)) { for (i in names(keep)) { params <- .filter_parameters_vector( params, keep[[i]], drop = NULL, column = i, verbose = verbose ) } } else { params <- .filter_parameters_vector( params, keep, drop, column = NULL, verbose = verbose ) } params } .filter_parameters_vector <- function( params, keep = NULL, drop = NULL, column = NULL, verbose = TRUE ) { # check pattern if (!is.null(keep) && length(keep) > 1) { keep <- paste0("(", paste(keep, collapse = "|"), ")") if (verbose) { insight::format_alert(sprintf( "The `keep` argument has more than 1 element. Merging into following regular expression: `%s`.", keep )) } } # check pattern if (!is.null(drop) && length(drop) > 1) { drop <- paste0("(", paste(drop, collapse = "|"), ")") if (verbose) { insight::format_alert(sprintf( "The `drop` argument has more than 1 element. Merging into following regular expression: `%s`.", drop )) } } if (is.null(column) || !column %in% colnames(params)) { if ("Parameter" %in% colnames(params)) { column <- "Parameter" } else { column <- 1 } } # row to keep and drop if (is.null(keep)) { rows_to_keep <- rep_len(TRUE, nrow(params)) } else { rows_to_keep <- grepl(keep, params[[column]], perl = TRUE) } if (is.null(drop)) { rows_to_drop <- rep_len(TRUE, nrow(params)) } else { rows_to_drop <- !grepl(drop, params[[column]], perl = TRUE) } out <- params[rows_to_keep & rows_to_drop, ] if (nrow(out) == 0) { if (verbose) { insight::format_alert( "The pattern defined in the `keep` (and `drop`) arguments would remove all parameters from the output. Thus, selecting specific parameters will be ignored." # nolint ) } return(params) } out } # mixed models function ------------------------------------------------------ #' @keywords internal .extract_parameters_mixed <- function( model, ci = 0.95, ci_method = "wald", standardize = NULL, p_adjust = NULL, wb_component = FALSE, keep_parameters = NULL, drop_parameters = NULL, include_sigma = FALSE, include_info = FALSE, vcov = NULL, vcov_args = NULL, verbose = TRUE, ... ) { dots <- list(...) special_ci_methods <- c("betwithin", "satterthwaite", "ml1", "kenward", "kr") # get parameters and statistic parameters <- insight::get_parameters( model, effects = "fixed", component = "all", verbose = FALSE ) statistic <- insight::get_statistic(model, component = "all") # check if all estimates are non-NA parameters <- .check_rank_deficiency(model, parameters) # sometimes, due to merge(), row-order messes up, so we save this here original_order <- parameters$.id <- seq_len(nrow(parameters)) # remove SE column parameters <- datawizard::data_remove( parameters, c("SE", "Std. Error"), verbose = FALSE ) # column name for coefficients, non-standardized coef_col <- "Coefficient" # Degrees of freedom if (.dof_method_ok(model, ci_method)) { dof <- insight::get_df(x = model, type = ci_method, verbose = FALSE) } else { dof <- Inf } df_error <- data.frame( Parameter = parameters$Parameter, df_error = as.vector(dof), stringsAsFactors = FALSE ) # for KR-dof, we have the SE as well, to save computation time df_error$SE <- attr(dof, "se", exact = TRUE) # CI - only if we don't already have CI for std. parameters ci_cols <- NULL if (!is.null(ci)) { # HC vcov? if (!is.null(vcov)) { fun_args <- list( model, ci = ci, vcov = vcov, vcov_args = vcov_args, verbose = verbose ) fun_args <- c(fun_args, dots) ci_df <- suppressMessages(do.call("ci", fun_args)) } else if (ci_method %in% c("kenward", "kr")) { # special handling for KR-CIs, where we already have computed SE ci_df <- .ci_kenward_dof(model, ci = ci, df_kr = df_error) } else { ci_df <- ci(model, ci = ci, method = ci_method, effects = "fixed") } if (length(ci) > 1) { ci_df <- datawizard::reshape_ci(ci_df) } ci_cols <- names(ci_df)[!names(ci_df) %in% c("CI", "Parameter")] parameters <- merge(parameters, ci_df, by = "Parameter", sort = FALSE) } # standard error - only if we don't already have SE for std. parameters if (!"SE" %in% colnames(parameters)) { if (!is.null(vcov)) { fun_args <- list(model, vcov = vcov, vcov_args = vcov_args, verbose = verbose) fun_args <- c(fun_args, dots) parameters <- merge( parameters, do.call("standard_error", fun_args), by = "Parameter", sort = FALSE ) # special handling for KR-SEs, which we already have computed from dof } else if ("SE" %in% colnames(df_error)) { se_kr <- df_error se_kr$df_error <- NULL parameters <- merge(parameters, se_kr, by = "Parameter", sort = FALSE) } else { parameters <- merge( parameters, standard_error(model, method = ci_method, effects = "fixed"), by = "Parameter", sort = FALSE ) } } # p value if (!is.null(vcov)) { fun_args <- list(model, vcov = vcov, vcov_args = vcov_args, verbose = verbose) fun_args <- c(fun_args, dots) parameters <- merge( parameters, do.call("p_value", fun_args), by = "Parameter", sort = FALSE ) } else if ("Pr(>|z|)" %in% names(parameters)) { names(parameters)[grepl("Pr(>|z|)", names(parameters), fixed = TRUE)] <- "p" } else if (ci_method %in% special_ci_methods) { # special handling for KR-p, which we already have computed from dof parameters <- merge( parameters, .p_value_dof(model, dof = df_error$df_error, method = ci_method, se = df_error$SE), by = "Parameter", sort = FALSE ) } else { parameters <- merge( parameters, p_value(model, dof = dof, effects = "fixed"), by = "Parameter", sort = FALSE ) } # adjust standard errors and test-statistic as well if ((!is.null(vcov) || ci_method %in% special_ci_methods)) { parameters$Statistic <- parameters$Estimate / parameters$SE } else { parameters <- merge(parameters, statistic, by = "Parameter", sort = FALSE) } # dof if (!"df" %in% names(parameters)) { if (!ci_method %in% special_ci_methods) { df_error <- data.frame( Parameter = parameters$Parameter, df_error = insight::get_df(x = model, type = "wald"), stringsAsFactors = FALSE ) } if (!is.null(df_error) && nrow(df_error) == nrow(parameters)) { if ("SE" %in% colnames(df_error)) { df_error$SE <- NULL } parameters <- merge(parameters, df_error, by = "Parameter", sort = FALSE) } } # Rematch order after merging parameters <- parameters[match(original_order, parameters$.id), ] # Renaming colnames(parameters) <- gsub( "Statistic", gsub("-statistic", "", attr(statistic, "statistic", exact = TRUE), fixed = TRUE), colnames(parameters), fixed = TRUE ) colnames(parameters) <- gsub("Std. Error", "SE", colnames(parameters), fixed = TRUE) colnames(parameters) <- gsub( "Estimate", "Coefficient", colnames(parameters), fixed = TRUE ) colnames(parameters) <- gsub("t value", "t", colnames(parameters), fixed = TRUE) colnames(parameters) <- gsub("z value", "z", colnames(parameters), fixed = TRUE) # filter parameters, if requested if (!is.null(keep_parameters) || !is.null(drop_parameters)) { parameters <- .filter_parameters( parameters, keep = keep_parameters, drop = drop_parameters, verbose = verbose ) } # adjust p-values? if (!is.null(p_adjust)) { parameters <- .p_adjust(parameters, p_adjust, model, verbose) } # if we have within/between effects (from demean()), we can add a component # column for nicer printing... if (isTRUE(wb_component)) { parameters <- .add_within_between_effects(model, parameters) } # Std Coefficients for other methods than "refit" if (!is.null(standardize)) { temp_pars <- parameters class(temp_pars) <- c("parameters_model", class(temp_pars)) attr(temp_pars, "ci") <- ci attr(temp_pars, "object_name") <- model # pass the model as is (this is a cheat - teehee!) std_parms <- standardize_parameters(temp_pars, method = standardize) parameters$Std_Coefficient <- std_parms$Std_Coefficient parameters$SE <- attr(std_parms, "standard_error") if (!is.null(ci)) { parameters$CI_low <- std_parms$CI_low parameters$CI_high <- std_parms$CI_high } coef_col <- "Std_Coefficient" } # Reorder # fmt: skip col_order <- c( "Parameter", coef_col, "SE", ci_cols, "t", "z", "df", "df_error", "p", "Component" ) parameters <- parameters[col_order[col_order %in% colnames(parameters)]] # add sigma if (isTRUE(include_sigma) || isTRUE(include_info)) { parameters <- .add_sigma_residual_df(parameters, model) } rownames(parameters) <- NULL parameters } .add_within_between_effects <- function(model, parameters) { # This function checks whether the model contains predictors that were # "demeaned" using the "demean()" function. If so, these columns have an # attribute indicating the within or between effect, and in such cases, # this effect is used as "Component" value. by this, we get a nicer print # for model parameters... # extract attributes that indicate within and between effects within_effects <- .find_within_between(model, "within-effect") between_effects <- .find_within_between(model, "between-effect") # if there are no attributes, return if (is.null(within_effects) && is.null(between_effects)) { return(parameters) } if (is.null(parameters$Component)) { parameters$Component <- "rewb-contextual" } if (!is.null(within_effects)) { index <- unique(unlist( sapply(within_effects, grep, x = parameters$Parameter, fixed = TRUE), use.names = FALSE )) parameters$Component[index] <- "within" } if (!is.null(between_effects)) { index <- unique(unlist( sapply(between_effects, grep, x = parameters$Parameter, fixed = TRUE), use.names = FALSE )) parameters$Component[index] <- "between" } interactions <- grep(":", parameters$Parameter, fixed = TRUE) if (length(interactions)) { parameters$Component[interactions] <- "interactions" } if ( ((!all(c("within", "between") %in% parameters$Component)) && inherits(model, "merMod")) || all(parameters$Component == "rewb-contextual") ) { parameters$Component <- NULL } parameters } .find_within_between <- function(model, which_effect) { mf <- stats::model.frame(model) unlist( sapply(names(mf), function(i) { if (!is.null(attr(mf[[i]], which_effect, exact = TRUE))) { i } }), use.names = FALSE ) } # Bayes function ------------------------------------------------------ #' @keywords internal .extract_parameters_bayesian <- function( model, centrality = "median", dispersion = FALSE, ci = 0.95, ci_method = "eti", test = "pd", rope_range = "default", rope_ci = 0.95, bf_prior = NULL, diagnostic = c("ESS", "Rhat"), priors = FALSE, standardize = NULL, keep_parameters = NULL, drop_parameters = NULL, verbose = TRUE, ... ) { # no ROPE for multi-response models if (insight::is_multivariate(model) && any(c("rope", "p_rope") %in% test)) { test <- setdiff(test, c("rope", "p_rope")) if (verbose) { insight::format_alert( "Multivariate response models are not yet supported for tests `rope` and `p_rope`." ) } } # MCMCglmm need special handling if (inherits(model, "MCMCglmm")) { parameters <- bayestestR::describe_posterior( model, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, diagnostic = "ESS", verbose = verbose, ... ) } else { parameters <- bayestestR::describe_posterior( model, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, bf_prior = bf_prior, diagnostic = diagnostic, priors = priors, verbose = verbose, ... ) if (!is.null(standardize)) { # Don't test BF on standardized params test_no_BF <- test[!test %in% c("bf", "bayesfactor", "bayes_factor")] if (length(test_no_BF) == 0) { test_no_BF <- NULL } std_post <- standardize_posteriors(model, method = standardize) std_parameters <- bayestestR::describe_posterior( std_post, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test_no_BF, rope_range = rope_range, rope_ci = rope_ci, verbose = verbose, ... ) parameters <- merge( std_parameters, parameters[c( "Parameter", setdiff(colnames(parameters), colnames(std_parameters)) )], sort = FALSE ) } } if (length(ci) > 1) { parameters <- datawizard::reshape_ci(parameters) } # Remove unnecessary columns if ( "CI" %in% names(parameters) && insight::has_single_value(parameters$CI, remove_na = TRUE) ) { parameters$CI <- NULL } if ( "ROPE_CI" %in% names(parameters) && insight::has_single_value(parameters$ROPE_CI, remove_na = TRUE) ) { parameters$ROPE_CI <- NULL } if ("ROPE_low" %in% names(parameters) && "ROPE_high" %in% names(parameters)) { parameters$ROPE_low <- NULL parameters$ROPE_high <- NULL } # filter parameters, if requested if (!is.null(keep_parameters) || !is.null(drop_parameters)) { parameters <- .filter_parameters( parameters, keep = keep_parameters, drop = drop_parameters, verbose = verbose ) } rownames(parameters) <- NULL # indicate it's a Bayesian model attr(parameters, "is_bayesian") <- TRUE parameters } # SEM function ------------------------------------------------------ #' @keywords internal .extract_parameters_lavaan <- function( model, ci = 0.95, standardize = FALSE, keep_parameters = NULL, drop_parameters = NULL, verbose = TRUE, ... ) { insight::check_if_installed("lavaan") # lavaan::parameterEstimates does not accept NULL `level`, but a lot of our # other methods do. It is often useful to pass `NULL` to speed things up, # but it doesn't work here. if (is.null(ci)) { ci <- 0.95 } # set proper default if (is.null(standardize)) { standardize <- FALSE } # check for valid parameters valid_std_options <- c("all", "std.all", "latent", "std.lv", "no_exogenous", "std.nox") if (!is.logical(standardize) && !(standardize %in% valid_std_options)) { if (verbose) { insight::format_alert( "`standardize` should be one of `TRUE`, \"all\", \"std.all\", \"latent\", \"std.lv\", \"no_exogenous\" or \"std.nox\".", # nolint "Returning unstandardized solution." ) } standardize <- FALSE } # CI if (length(ci) > 1L) { ci <- ci[1] if (verbose) { insight::format_alert(paste0( "lavaan models only accept one level of CI. Keeping the first one: `ci = ", ci, "`." )) } } # collect dots dot_args <- list(...) # list all argument names from the `lavaan` function # fmt: skip dot_args <- dot_args[names(dot_args) %in% c( "zstat", "pvalue", "standardized", "fmi", "level", "boot.ci.type", "cov.std", "fmi.options", "rsquare", "remove.system.eq", "remove.eq", "remove.ineq", "remove.def", "remove.nonfree", "add.attributes", "output", "header" )] # Get estimates sem_data <- do.call( lavaan::parameterEstimates, c(list(object = model, se = TRUE, ci = TRUE, level = ci), dot_args) ) label <- sem_data$label # check if standardized estimates are requested, and if so, which type if (isTRUE(standardize) || !is.logical(standardize)) { if (is.logical(standardize)) { standardize <- "all" } type <- switch( standardize, all = , std.all = "std.all", latent = , std.lv = "std.lv", no_exogenous = , std.nox = "std.nox", "std.all" ) # this function errors on unknown arguments valid <- names(formals(lavaan::standardizedsolution)) dots <- list(...) dots <- dots[names(dots) %in% valid] fun_args <- c(list(model, se = TRUE, level = ci, type = type), dots) f <- utils::getFromNamespace("standardizedsolution", "lavaan") sem_data <- do.call("f", fun_args) names(sem_data)[names(sem_data) == "est.std"] <- "est" } params <- data.frame( To = sem_data$lhs, Operator = sem_data$op, From = sem_data$rhs, Coefficient = sem_data$est, SE = sem_data$se, CI_low = sem_data$ci.lower, CI_high = sem_data$ci.upper, z = sem_data$z, p = sem_data$pvalue, stringsAsFactors = FALSE ) if (!is.null(label)) { params$Label <- label } params$Component <- NA_character_ params$Component[params$Operator == "=~"] <- "Loading" params$Component[params$Operator == "~"] <- "Regression" params$Component[params$Operator == "~~"] <- "Correlation" params$Component[params$Operator == ":="] <- "Defined" params$Component[params$Operator == "~1"] <- "Mean" params$Component[as.character(params$From) == as.character(params$To)] <- "Variance" if ("p" %in% colnames(params)) { params$p[is.na(params$p)] <- 0 } if ("group" %in% names(sem_data)) { params$Group <- sem_data$group } # filter parameters, if requested if (!is.null(keep_parameters) || !is.null(drop_parameters)) { params <- .filter_parameters( params, keep = keep_parameters, drop = drop_parameters, verbose = verbose ) } params } # tools ------------------------- .check_rank_deficiency <- function(model, p, verbose = TRUE) { # for cox-panel models, we have non-linear parameters with NA coefficient, # but test statistic and p-value - don't check for NA estimates in this case if (!is.null(model) && inherits(model, "coxph.penal")) { return(p) } if (anyNA(p$Estimate)) { if (isTRUE(verbose)) { insight::format_alert(sprintf( "Model matrix is rank deficient. Parameters `%s` were not estimable.", toString(p$Parameter[is.na(p$Estimate)]) )) } p <- p[!is.na(p$Estimate), ] } p } parameters/R/methods_mvord.R0000644000176200001440000000627114775505314015634 0ustar liggesusers# classes: .mvord #################### .mvord #' @export model_parameters.mvord <- function(model, ci = 0.95, component = "all", standardize = NULL, exponentiate = FALSE, p_adjust = NULL, include_info = getOption("parameters_info", FALSE), keep = NULL, drop = NULL, verbose = TRUE, ...) { component <- insight::validate_argument(component, c("all", "conditional", "thresholds", "correlation")) out <- .model_parameters_generic( model = model, ci = ci, component = component, bootstrap = FALSE, iterations = 10, merge_by = c("Parameter", "Component", "Response"), standardize = standardize, exponentiate = exponentiate, p_adjust = p_adjust, keep_parameters = keep, drop_parameters = drop, include_info = include_info, ... ) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(model)) out } #' @export standard_error.mvord <- function(model, component = c("all", "conditional", "thresholds", "correlation"), ...) { component <- match.arg(component) params <- insight::get_parameters(model, component = "all") junk <- utils::capture.output({ s <- summary(model) }) params$SE <- c( unname(s$thresholds[, "Std. Error"]), unname(s$coefficients[, "Std. Error"]), unname(s$error.structure[, "Std. Error"]) ) params <- params[c("Parameter", "SE", "Component", "Response")] if (insight::has_single_value(params$Response, remove_na = TRUE)) { params$Response <- NULL } if (component != "all") { params <- params[params$Component == component, , drop = FALSE] } insight::text_remove_backticks(params, verbose = FALSE) } #' @export p_value.mvord <- function(model, component = c("all", "conditional", "thresholds", "correlation"), ...) { component <- match.arg(component) params <- insight::get_parameters(model, component = "all") junk <- utils::capture.output({ s <- summary(model) }) params$p <- c( unname(s$thresholds[, "Pr(>|z|)"]), unname(s$coefficients[, "Pr(>|z|)"]), unname(s$error.structure[, "Pr(>|z|)"]) ) params <- params[c("Parameter", "p", "Component", "Response")] if (insight::has_single_value(params$Response, remove_na = TRUE)) { params$Response <- NULL } if (component != "all") { params <- params[params$Component == component, , drop = FALSE] } insight::text_remove_backticks(params, verbose = FALSE) } #' @export simulate_model.mvord <- function(model, iterations = 1000, component = c("all", "conditional", "thresholds", "correlation"), ...) { component <- match.arg(component) out <- .simulate_model(model, iterations, component = component, ...) class(out) <- c("parameters_simulate_model", class(out)) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(model)) out } parameters/R/utils_format.R0000644000176200001440000011427115053035103015452 0ustar liggesusers# output-format helper ------------------------- .convert_to_glue_syntax <- function(style, linesep = NULL) { # set default if (is.null(linesep)) { linesep <- " " } # default if (is.null(style)) { style <- paste0("{estimate}", linesep, "({ci})|{p}") # style: estimate and CI, p-value in separate column (currently identical to "ci_p2") } else if (style %in% c("minimal", "ci_p2")) { style <- paste0("{estimate}", linesep, "({ci})|{p}") # style: estimate and CI, no p } else if (style == "ci") { style <- paste0("{estimate}", linesep, "({ci})") # style: estimate, p-stars and CI } else if (style == "ci_p") { style <- paste0("{estimate}{stars}", linesep, "({ci})") # style: estimate and SE, no p } else if (style == "se") { style <- paste0("{estimate}", linesep, "({se})") # style: estimate, p-stars and SE } else if (style == "se_p") { style <- paste0("{estimate}{stars}", linesep, "({se})") # style: estimate and SE, p-value in separate column } else if (style %in% c("short", "se_p2")) { style <- paste0("{estimate}", linesep, "({se})|{p}") # style: only estimate } else if (style %in% c("est", "coef")) { style <- "{estimate}" } # replace \n for now with default line-separators gsub("\n", linesep, style, fixed = TRUE) } # global definition of valid "style" shortcuts .style_shortcuts <- c("ci_p2", "ci", "ci_p", "se", "se_p", "se_p2", "est", "coef") .select_shortcuts <- c("minimal", "short") .add_obs_row <- function(x, att, style) { observations <- unlist(lapply(att, function(i) { if (is.null(i$n_obs)) { NA } else { i$n_obs } })) weighted_observations <- unlist(lapply(att, function(i) { if (is.null(i$weighted_nobs)) { NA } else { i$weighted_nobs } })) # check if model had weights, and if due to missing values n of weighted # observations differs from "raw" observations if (!all(is.na(weighted_observations)) && !all(is.na(observations))) { if (!isTRUE(all.equal(as.vector(weighted_observations), as.vector(observations)))) { insight::format_alert("Number of weighted observations differs from number of unweighted observations.") } observations <- weighted_observations } if (!all(is.na(observations))) { # add empty row, as separator empty_row <- do.call(data.frame, as.list(rep(NA, ncol(x)))) colnames(empty_row) <- colnames(x) x <- rbind(x, empty_row) # add observations steps <- (ncol(x) - 1) / length(observations) empty_row[[1]] <- "Observations" insert_at <- seq(2, ncol(x), by = steps) for (i in seq_along(insert_at)) { empty_row[[insert_at[i]]] <- observations[i] } x <- rbind(x, empty_row) } x } # other helper ------------------------ .format_columns_single_component <- function(x, pretty_names, digits = 2, ci_digits = digits, p_digits = 3, ci_width = "auto", ci_brackets = TRUE, format = NULL, coef_name = NULL, zap_small = FALSE, include_reference = FALSE, style = NULL, ...) { # default brackets are parenthesis for HTML / MD if ((is.null(ci_brackets) || isTRUE(ci_brackets)) && (identical(format, "html") || identical(format, "markdown"))) { brackets <- c("(", ")") } else if (is.null(ci_brackets) || isTRUE(ci_brackets)) { brackets <- c("[", "]") } else { brackets <- ci_brackets } # fix coefficient column name for random effects if (!is.null(x$Effects) && all(x$Effects == "random") && any(colnames(x) %in% .all_coefficient_types)) { colnames(x)[colnames(x) %in% .all_coefficient_types] <- "Coefficient" } # fix coefficient column name for mixed count and zi pars if (!is.null(x$Component) && sum(c("conditional", "zero_inflated", "dispersion") %in% x$Component) >= 2 && any(colnames(x) %in% .all_coefficient_types)) { colnames(x)[colnames(x) %in% .all_coefficient_types] <- "Coefficient" } # random pars with level? combine into parameter column if (all(c("Parameter", "Level") %in% colnames(x))) { x$Parameter <- paste0(x$Parameter, " ", brackets[1], x$Level, brackets[2]) x$Level <- NULL } # add the coefficient for the base-(reference)-level of factors? if (include_reference) { x <- .add_reference_level(x) } insight::format_table( x, pretty_names = pretty_names, digits = digits, ci_width = ci_width, ci_brackets = ci_brackets, ci_digits = ci_digits, p_digits = p_digits, zap_small = zap_small, select = style, ... ) } .format_ranef_parameters <- function(x) { if (!is.null(x$Group) && !is.null(x$Effects)) { ran_pars <- which(x$Effects == "random") stddevs <- startsWith(x$Parameter[ran_pars], "SD (") x$Parameter[ran_pars[stddevs]] <- paste0( gsub("(.*)\\)", "\\1", x$Parameter[ran_pars[stddevs]]), ": ", x$Group[ran_pars[stddevs]], ")" ) corrs <- startsWith(x$Parameter[ran_pars], "Cor (") x$Parameter[ran_pars[corrs]] <- paste0( gsub("(.*)\\)", "\\1", x$Parameter[ran_pars[corrs]]), ": ", x$Group[ran_pars[corrs]], ")" ) x$Parameter[x$Parameter == "SD (Observations: Residual)"] <- "SD (Residual)" x$Group <- NULL } x } .add_reference_level <- function(params, model = NULL) { if (is.null(model)) { # check if we have a model object, if not provided by user model <- .get_object(params) } # no model object provided? Try to get data from model call if (is.null(model)) { # get data from model call model_data <- .safe(eval(attributes(params)$model_call$data)) } else { # get data from model object model_data <- insight::get_data(model, verbose = FALSE) } # check if we have model data, else return parameter table if (is.null(model_data)) { return(params) } # find factors and factor levels and check if we have any factors in the data factors <- .find_factor_levels(model_data, model, model_call = attributes(params)$model_call) if (!length(factors)) { # in case of "on-the-fly" factors, e.g.: # m <- lm(mpg ~ cut(wt, c(0, 2.5, 3, 5)), data = mtcars) # we need to receive the data from the model frame, in order to find factors model_data <- insight::get_data(model, source = "mf", verbose = FALSE) if (!is.null(model_data)) { factors <- .find_factor_levels(model_data, model, model_call = attributes(params)$model_call) } # if we still didn't find anything, quit... if (!length(factors)) { return(params) } } # next, check contrasts of factors. including the reference level makes # only sense if there are contrasts that are all zeros, which means that # the reference level is not included in the model matrix remove_contrasts <- .remove_reference_contrasts(model) # keep only factors with valid contrasts if (!is.null(remove_contrasts) && length(remove_contrasts)) { factors <- factors[setdiff(names(factors), remove_contrasts)] } # we need some more information about prettified labels etc. pretty_names <- attributes(params)$pretty_names coef_name <- attributes(params)$coefficient_name if (is.null(coef_name)) { coef_name <- "Coefficient" } zi_coef_name <- attributes(params)$zi_coefficient_name if (is.null(zi_coef_name)) { zi_coef_name <- "Coefficient" } # copy object, so we save original data out <- params # sanity check - is pretty_names NULL? If so, use Parameters as pretty_names if (is.null(pretty_names)) { pretty_names <- stats::setNames(params$Parameter, params$Parameter) } # if we use "include_reference" and set "pretty_names = FALSE", pretty_names # is no named vector. So we need to make sure we have a named vector if (is.null(names(pretty_names))) { pretty_names <- stats::setNames(pretty_names, params$Parameter) } # if we use "keep" or "drop", we have less parameters in our data frame, # so we need to make sure we only have those pretty_names, which names match # the parameters in the data frame pretty_names <- pretty_names[names(pretty_names) %in% params$Parameter] # iterate all factors in the data and check if any factor was used in the model for (fn in names(factors)) { f <- factors[[fn]] # "f" contains all combinations of factor name and levels from the data, # which we can match with the names of the pretty_names vector found <- which(names(pretty_names) %in% f) # if we have a match, we add the reference level to the pretty_names vector if (length(found)) { # the reference level is *not* in the pretty names yet reference_level <- f[!f %in% names(pretty_names)] # for on-the-fly conversion of factors, the names of the factors can # can also contain "factor()" or "as.factor()" - we need to remove these if (any(grepl("(as\\.factor|factor|as\\.character)", fn))) { fn_clean <- gsub("(as\\.factor|factor|as\\.character)\\((.*)\\)", "\\2", fn) } else { fn_clean <- fn } # create a pretty level for the reference category pretty_level <- paste0(fn_clean, " [", sub(fn, "", reference_level, fixed = TRUE), "]") pretty_level <- gsub("_", " ", pretty_level, fixed = TRUE) # special handling for "cut()" pattern_cut_right <- "(.*)\\((.*),(.*)\\]\\]$" pattern_cut_left <- "(.*)\\[(.*),(.*)\\)\\]$" if (all(grepl(pattern_cut_right, pretty_level))) { lower_bounds <- gsub(pattern_cut_right, "\\2", pretty_level) upper_bounds <- gsub(pattern_cut_right, "\\3", pretty_level) pretty_level <- gsub(pattern_cut_right, paste0("\\1>", as.numeric(lower_bounds), "-", upper_bounds, "]"), pretty_level) } else if (all(grepl(pattern_cut_left, pretty_level))) { lower_bounds <- gsub(pattern_cut_left, "\\2", pretty_level) upper_bounds <- gsub(pattern_cut_left, "\\3", pretty_level) pretty_level <- gsub(pattern_cut_left, paste0("\\1", as.numeric(lower_bounds), "-<", upper_bounds, "]"), pretty_level) } # insert new pretty level at the correct position in "pretty_names" pretty_names <- .insert_element_at( pretty_names, stats::setNames(pretty_level, reference_level), min(found) ) # now we need to update the data as well (i.e. the parameters table) row_data <- data.frame( Parameter = reference_level, Coefficient = as.numeric(attributes(params)$exponentiate), stringsAsFactors = FALSE ) # coefficient name can also be "Odds Ratio" etc., so make sure we # have the correct column name in the data row we want to insert if (coef_name %in% colnames(out)) { colnames(row_data)[2] <- coef_name } else if (zi_coef_name %in% colnames(out)) { colnames(row_data)[2] <- zi_coef_name } out <- .insert_row_at(out, row_data, min(found)) } } if (length(pretty_names)) { # update pretty_names attribute attr(out, "pretty_names") <- pretty_names # update pretty_labels attribute - for mixed models, we need to add the random # effects stuff from pretty_labels to pretty_names first, else, matching will # fail pretty_labels <- attributes(out)$pretty_labels if (!is.null(pretty_labels)) { re_labels <- startsWith(names(pretty_labels), "SD (") | startsWith(names(pretty_labels), "Cor (") if (any(re_labels)) { pretty_names <- c(pretty_names, pretty_labels[re_labels]) } pretty_names[stats::na.omit(match(names(pretty_labels), names(pretty_names)))] <- pretty_labels pretty_names <- pretty_names[!re_labels] } attr(out, "pretty_labels") <- pretty_names } out } # The coefficient column in the printed output is renamed, based on the model. # But for instance, for random effects, however, which are on a different scale, # we want a different name for this column. Since print.parameters_model() splits # components into different tables, we change the column name for those "tables" # that contain the random effects or zero-inflation parameters .all_coefficient_types <- c( "Odds Ratio", "Risk Ratio", "Prevalence Ratio", "IRR", "Log-Odds", "Log-Mean", "Log-Ratio", "Log-Prevalence", "Probability", "Marginal Means", "Estimated Counts", "Ratio", "Z-Score", "exp(Z-Score)" ) .all_coefficient_names <- c("Coefficient", "Std_Coefficient", "Estimate", "Median", "Mean", "MAP") .format_stan_parameters <- function(out, dist_params = NULL) { has_component <- !is.null(out$Component) # brms random intercepts or random slope variances ran_sd <- startsWith(out$Parameter, "sd_") & out$Effects == "random" if (any(ran_sd)) { out$Parameter[ran_sd] <- gsub("^sd_(.*?)__(.*)", "SD \\(\\2\\)", out$Parameter[ran_sd]) if (has_component && !is.null(dist_params)) { for (dp in dist_params) { ran_dpars_sd <- ran_sd & out$Component == dp if (any(ran_dpars_sd)) { out$Parameter[ran_dpars_sd] <- gsub( paste0(dp, "_"), "", out$Parameter[ran_dpars_sd], fixed = TRUE ) } } } } # brms random slope-intercepts correlation ran_cor <- startsWith(out$Parameter, "cor_") & out$Effects == "random" if (any(ran_cor)) { out$Parameter[ran_cor] <- gsub("^cor_(.*?)__(.*)__(.*)", "Cor \\(\\2~\\3\\)", out$Parameter[ran_cor]) if (has_component && !is.null(dist_params)) { for (dp in dist_params) { ran_dpars_cor <- ran_cor & out$Component == dp if (any(ran_dpars_cor)) { out$Parameter[ran_dpars_cor] <- gsub( paste0(dp, "_"), "", out$Parameter[ran_dpars_cor], fixed = TRUE ) } } } } # stanreg random effects variances ran_sd_cor <- startsWith(out$Parameter, "Sigma[") if (any(ran_sd_cor)) { out$Parameter[ran_sd_cor] <- gsub("(Intercept)", "Intercept", out$Parameter[ran_sd_cor], fixed = TRUE) parm1 <- gsub("^Sigma\\[(.*):(.*),(.*)\\]", "\\2", out$Parameter[ran_sd_cor]) parm2 <- gsub("^Sigma\\[(.*):(.*),(.*)\\]", "\\3", out$Parameter[ran_sd_cor]) # for random intercept or slopes, parm1 and parm2 are identical ran_sd <- parm1 == parm2 ran_cor <- parm1 != parm2 if (any(ran_sd)) { out$Parameter[which(ran_sd_cor)[ran_sd]] <- paste0("Sigma (", parm1[ran_sd], ")") } if (any(ran_cor)) { out$Parameter[which(ran_sd_cor)[ran_cor]] <- paste0("Sigma (", parm1[ran_cor], "~", parm2[ran_cor], ")") } } out } # helper to format the header / subheader of different model components -------------- .format_model_component_header <- function(x, type, split_column, is_zero_inflated, is_ordinal_model, is_multivariate = FALSE, ran_pars, # nolint formatted_table = NULL) { # prepare component names .conditional_fixed_text <- if (is_zero_inflated) { "Fixed Effects (Count Model)" } else { "Fixed Effects" } .conditional_random_text <- if (ran_pars) { "Random Effects Variances" } else if (is_zero_inflated) { "Random Effects (Count Model)" } else { "Random Effects" } # remove trailing dots if (endsWith(type, ".")) { type <- gsub("\\.$", "", type) } component_name <- NULL # Do we have any distributional parameters? # this is only relevant for models from brms if (identical(attributes(x)$model_class, "brmsfit")) { # check if we can access the model model <- .get_object(x) # if yes, extract distributional parameters if (!is.null(model)) { dpars <- insight::find_auxiliary(model, verbose = FALSE) # if model has any distributional parameters, check if it's fixed or random # and create component header if (!is.null(dpars)) { type_parts <- unlist(strsplit(type, ".", fixed = TRUE)) if (type_parts[1] %in% dpars) { if (identical(type_parts[2], "random")) { component_name <- paste(type_parts[1], "Random Effects") } else if (identical(type_parts[2], "fixed") || length(type_parts) < 2) { component_name <- paste(type_parts[1], "Parameters") } } } } } if (is.null(component_name)) { component_name <- switch(type, mu = , fixed = , fixed. = , conditional = "Fixed Effects", random. = , random = "Random Effects", conditional.fixed = .conditional_fixed_text, conditional.random = .conditional_random_text, zero_inflated = "Zero-Inflation", zero_inflated.fixed = "Fixed Effects (Zero-Inflation Component)", zero_inflated.random = "Random Effects (Zero-Inflation Component)", survival = , survival.fixed = "Survival", dispersion.fixed = , dispersion = "Dispersion", marginal = "Marginal Effects", emmeans = "Estimated Marginal Means", contrasts = "Contrasts", simplex.fixed = , simplex = "Monotonic Effects", smooth_sd = "Smooth Terms (SD)", smooth_terms = "Smooth Terms", sigma.fixed = , sigma = "Sigma", thresholds = "Thresholds", correlation = "Correlation", `SD/Cor` = "SD / Correlation", Loading = "Loading", location = , location.fixed = "Location Parameters", scale = , scale.fixed = "Scale Parameters", extra = , extra.fixed = "Extra Parameters", nu = "Nu", tau = "Tau", meta = "Meta-Parameters", studies = "Studies", within = "Within-Effects", between = "Between-Effects", interactions = "(Cross-Level) Interactions", precision = "Precision", infrequent_purchase = "Infrequent Purchase", auxiliary = "Auxiliary", residual = "Residual", intercept = "Intercept", regression = "Regression", latent = "Latent", time_dummies = "Time Dummies", type ) } # handle exceptions if (grepl("^conditional\\.(r|R)andom_variances", component_name)) { component_name <- insight::trim_ws(gsub("^conditional\\.(r|R)andom_variances(\\.)*", "", component_name)) if (nzchar(component_name, keepNA = TRUE)) { component_name <- paste0("Random Effects Variances: ", component_name) } else { component_name <- "Random Effects Variances" } } if (grepl("^conditional\\.(r|R)andom", component_name)) { component_name <- insight::trim_ws(gsub("^conditional\\.(r|R)andom(\\.)*", "", component_name)) if (nzchar(component_name, keepNA = TRUE)) { component_name <- paste0("Random Effects (Count Model): ", component_name) } else { component_name <- ifelse(ran_pars, "Random Effects Variances", "Random Effects (Count Model)") } } if (grepl("^zero_inflated\\.(r|R)andom", component_name)) { component_name <- insight::trim_ws(gsub("^zero_inflated\\.(r|R)andom(\\.)*", "", component_name)) if (nzchar(component_name, keepNA = TRUE)) { component_name <- paste0("Random Effects (Zero-Inflation Component): ", component_name) } else { component_name <- "Random Effects (Zero-Inflation Component)" } } if (startsWith(component_name, "random.")) { component_name <- paste0("Random Effects: ", gsub("^random\\.", "", component_name)) } # clean some special parameter names component_name <- gsub("zi", "Zero-Inflation", component_name, fixed = TRUE) component_name <- gsub("zoi", "Zero-One-Inflation", component_name, fixed = TRUE) component_name <- gsub("coi", "Conditional-One-Inflation", component_name, fixed = TRUE) # if we show ZI component only, make sure this appears in header if (!grepl("(Zero-Inflation Component)", component_name, fixed = TRUE) && !is.null(formatted_table$Component) && all(formatted_table$Component == "zero_inflated")) { component_name <- paste0(component_name, " (Zero-Inflation Component)") } # tweaking of sub headers if ("DirichletRegModel" %in% attributes(x)$model_class) { if (startsWith(component_name, "conditional.") || split_column == "Response") { s1 <- "Response level:" s2 <- gsub("^conditional\\.(.*)", "\\1", component_name) } else { s1 <- component_name s2 <- "" } } else if (length(split_column) > 1 && "Response" %in% split_column && is_multivariate) { # This here only applies to brms multivariate response models component_name <- gsub("^conditional\\.(.*)", "Response level: \\1", component_name) component_name <- gsub("^sigma\\.(.*)", "Auxilliary parameters, response level: \\1", component_name) component_name <- gsub("(.*)fixed\\.(.*)", "\\1\\2", component_name) component_name <- gsub("(.*)random\\.(.*)", "Random effects, \\1\\2", component_name) s1 <- component_name s2 <- "" } else if (length(split_column) > 1 || split_column %in% c("Subgroup", "Type", "Group") || grepl(tolower(split_column), tolower(component_name), fixed = TRUE) || component_name %in% c("Within-Effects", "Between-Effects", "(Cross-Level) Interactions")) { s1 <- component_name s2 <- "" } else if (split_column == "Response" && is_ordinal_model) { s1 <- "Response level:" s2 <- component_name } else { s1 <- component_name if (tolower(split_column) == "component") { s2 <- "" } else { s2 <- split_column } } list(name = component_name, subheader1 = s1, subheader2 = s2) } # .insert_row <- function(x, newrow, r) { # existingDF[seq(r+1,nrow(existingDF)+1),] <- existingDF[seq(r,nrow(existingDF)),] # existingDF[r,] <- newrow # existingDF # } .prepare_x_for_print <- function(x, select, coef_name, s_value) { # minor fix for nested Anovas if ("Group" %in% colnames(x) && sum(x$Parameter == "Residuals") > 1) { colnames(x)[which(colnames(x) == "Group")] <- "Subgroup" } # check which columns to be printed if (!is.null(select)) { if (all(select == "minimal")) { select <- c("Parameter", "Coefficient", "Std_Coefficient", "CI", "CI_low", "CI_high", "p") } else if (all(select == "short")) { select <- c("Parameter", "Coefficient", "Std_Coefficient", "SE", "p") } else if (is.numeric(select)) { select <- colnames(x)[select] } select <- union(select, c("Parameter", "Component", "Effects", "Response", "Subgroup")) # for emmGrid objects, we save specific parameter names as attribute parameter_names <- attributes(x)$parameter_names if (!is.null(parameter_names)) { select <- c(parameter_names, select) } to_remove <- setdiff(colnames(x), select) x[to_remove] <- NULL } # remove columns that have only NA or Inf to_remove <- vapply(colnames(x), function(col) { all(is.na(x[[col]]) | is.infinite(x[[col]])) & !grepl("CI_", col, fixed = TRUE) }, TRUE) if (any(to_remove)) x[to_remove] <- NULL # For Bayesian models, we need to prettify parameter names here... mc <- attributes(x)$model_class cp <- attributes(x)$cleaned_parameters if (!is.null(mc) && !is.null(cp) && any(mc %in% c("stanreg", "stanmvreg", "brmsfit"))) { match_params <- stats::na.omit(match(names(cp), x$Parameter)) if (any(match_params)) { x$Parameter[match_params] <- cp[x$Parameter[match_params]] } attr(x, "pretty_names") <- FALSE attr(x, "cleaned_parameters") <- NULL } # for bayesian meta, remove ROPE_CI if (isTRUE(attributes(x)$is_bayes_meta)) { x$CI <- NULL x$ROPE_CI <- NULL x$ROPE_low <- NULL x$ROPE_high <- NULL } if (!is.null(coef_name)) { colnames(x)[which(colnames(x) == "Coefficient")] <- coef_name colnames(x)[which(colnames(x) == "Std_Coefficient")] <- paste0("Std_", coef_name) } # cpmpute s- instead of p-value? # see 10.1186/s12874-020-01105-9 if (isTRUE(s_value) && "p" %in% colnames(x)) { colnames(x)[colnames(x) == "p"] <- "s" x[["s"]] <- log2(1 / x[["s"]]) } x } .prepare_splitby_for_print <- function(x) { if (!is.null(attributes(x)$model_class) && any(attributes(x)$model_class == "mvord")) { x$Response <- NULL } split_by <- "" if ("Component" %in% names(x) && insight::n_unique(x$Component) > 1) { split_by <- c(split_by, "Component") } if ("Effects" %in% names(x) && insight::n_unique(x$Effects) > 1) { split_by <- c(split_by, "Effects") } if ("Response" %in% names(x) && insight::n_unique(x$Response) > 1) { split_by <- c(split_by, "Response") } if ("Group" %in% names(x) && insight::n_unique(x$Group) > 1) { split_by <- c(split_by, "Group") } if ("Subgroup" %in% names(x) && insight::n_unique(x$Subgroup) > 1) { split_by <- c(split_by, "Subgroup") } split_by <- split_by[nzchar(split_by, keepNA = TRUE)] split_by } # this function is actually similar to "insight::print_parameters()", but more # sophisticated, to ensure nicely outputs even for complicated or complex models, # or edge cases... #' @keywords internal .format_columns_multiple_components <- function(x, pretty_names, split_column = "Component", digits = 2, ci_digits = digits, p_digits = 3, coef_column = NULL, format = NULL, ci_width = "auto", ci_brackets = TRUE, zap_small = FALSE, include_reference = FALSE, style = NULL, ...) { final_table <- list() ignore_group <- isTRUE(attributes(x)$ignore_group) ran_pars <- isTRUE(attributes(x)$ran_pars) is_fixest_multi <- identical(attributes(x)$model_class, "fixest_multi") # name of "Parameter" column - usually the first column parameter_column <- "Parameter" # default brackets are parenthesis for HTML / MD if ((is.null(ci_brackets) || isTRUE(ci_brackets)) && (identical(format, "html") || identical(format, "markdown"))) { ci_brackets <- c("(", ")") } else if (is.null(ci_brackets) || isTRUE(ci_brackets)) { ci_brackets <- c("[", "]") } # check ordinal / multivariate is_ordinal_model <- isTRUE(attributes(x)$ordinal_model) is_multivariate <- isTRUE(attributes(x)$multivariate_response) # zero-inflation stuff is_zero_inflated <- (!is.null(x$Component) & "zero_inflated" %in% x$Component) zi_coef_name <- attributes(x)$zi_coefficient_name # other special model-components, like emm_list coef_name2 <- attributes(x)$coefficient_name2 # make sure we have correct order of levels from split-factor if (!is.null(attributes(x)$model_class) && all(attributes(x)$model_class == "mediate")) { x$Component <- factor(x$Component, levels = c("control", "treated", "average", "Total Effect")) x$Parameter <- insight::trim_ws(gsub("(.*)\\((.*)\\)$", "\\1", x$Parameter)) } else { x[split_column] <- lapply(x[split_column], function(i) { if (!is.factor(i)) i <- factor(i, levels = unique(i)) i }) } # fix column output if (inherits(attributes(x)[["model"]], c("lavaan", "blavaan")) && "Label" %in% colnames(x)) { x$From <- ifelse(!nzchar(as.character(x$Label), keepNA = TRUE) | x$Label == x$To, x$From, paste0(x$From, " (", x$Label, ")")) # nolint x$Label <- NULL } if (inherits(attributes(x)[["model"]], c("lavaan", "blavaan")) && !"Parameter" %in% colnames(x)) { parameter_column <- colnames(x)[1] } if (inherits(attributes(x)[["model"]], c("lavaan", "blavaan")) && "Defined" %in% x$Component) { x$From[x$Component == "Defined"] <- "" x$Operator[x$Component == "Defined"] <- "" x$To <- ifelse(x$Component == "Defined", paste0("(", x$To, ")"), x$To) } # set up split-factor if (length(split_column) > 1) { split_by <- lapply(split_column, function(i) x[[i]]) } else { split_by <- list(x[[split_column]]) } names(split_by) <- split_column # make sure we have correct sorting here... tables <- split(x, f = split_by) # validation check - only preserve tables with any data in data frames tables <- tables[vapply(tables, nrow, numeric(1)) > 0] # fix table names for random effects, when we only have random # effects. in such cases, the wrong header (fixed effects) is chosen # to prevent this, we "fake" the name of the splitted components by # prefixing them with "random." if (!is.null(x$Effects) && all(x$Effects == "random") && !all(startsWith(names(tables), "random."))) { wrong_names <- !startsWith(names(tables), "random.") names(tables)[wrong_names] <- paste0("random.", names(tables)[wrong_names]) } # fixest_multi models can have a special structure, with multiple responses # and multiple rhs of formulas. We fix headers here if (is_fixest_multi && length(split_column) > 1) { old_names <- unique(paste0(x$Response, ".", x$Group)) new_names <- unique(paste0(x$Response, " ~ ", x$Group)) names(tables) <- new_names[match(names(tables), old_names)] } for (type in names(tables)) { # do we have emmeans emlist? and contrasts? model_class <- attributes(tables[[type]])$model_class em_list_coef_name <- (!is.null(model_class) && "emm_list" %in% model_class && "contrasts" %in% tables[[type]]$Component) # Don't print Component column for (i in split_column) { tables[[type]][[i]] <- NULL } # Smooth terms statistics if ("t / F" %in% names(tables[[type]])) { if (type == "smooth_terms") { names(tables[[type]])[names(tables[[type]]) == "t / F"] <- "F" } if (type == "conditional") { names(tables[[type]])[names(tables[[type]]) == "t / F"] <- "t" } } else if (type == "smooth_terms" && "t" %in% names(tables[[type]])) { names(tables[[type]])[names(tables[[type]]) == "t"] <- "F" } if ("z / Chi2" %in% names(tables[[type]])) { if (type == "smooth_terms") { names(tables[[type]])[names(tables[[type]]) == "z / Chi2"] <- "Chi2" } if (type == "conditional") { names(tables[[type]])[names(tables[[type]]) == "z / Chi2"] <- "z" } } # Don't print se and ci if all are missing if (all(is.na(tables[[type]]$SE))) tables[[type]]$SE <- NULL if (all(is.na(tables[[type]]$CI_low)) && all(is.na(tables[[type]]$CI_high))) { tables[[type]]$CI_low <- NULL tables[[type]]$CI_high <- NULL } # if (all(is.na(tables[[type]]$CI_low))) tables[[type]]$CI_low <- NULL # if (all(is.na(tables[[type]]$CI_high))) tables[[type]]$CI_high <- NULL # Don't print if empty col tables[[type]][vapply(colnames(tables[[type]]), function(x) { column <- tables[[type]][[x]] (!any(nzchar(as.character(column), keepNA = TRUE)) | all(is.na(column))) && !grepl("_CI_(high|low)$", x) }, logical(1))] <- NULL attr(tables[[type]], "digits") <- digits attr(tables[[type]], "ci_digits") <- ci_digits attr(tables[[type]], "p_digits") <- p_digits # random pars with level? combine into parameter column if (all(c("Parameter", "Level") %in% colnames(tables[[type]]))) { tables[[type]]$Parameter <- paste0( tables[[type]]$Parameter, " ", ci_brackets[1], tables[[type]]$Level, ci_brackets[2] ) tables[[type]]$Level <- NULL } # rename columns for emmeans contrast part if (em_list_coef_name && !is.null(coef_column)) { colnames(tables[[type]])[which(colnames(tables[[type]]) == coef_column)] <- coef_name2 } # rename columns for zero-inflation part if (startsWith(type, "zero") && !is.null(zi_coef_name) && !is.null(coef_column)) { colnames(tables[[type]])[which(colnames(tables[[type]]) == coef_column)] <- zi_coef_name colnames(tables[[type]])[which(colnames(tables[[type]]) == paste0("Std_", coef_column))] <- paste0("Std_", zi_coef_name) # nolint } # rename columns for correlation, location or scale part if (type %in% c("correlation", "scale", "location") && !is.null(coef_column)) { colnames(tables[[type]])[which(colnames(tables[[type]]) == coef_column)] <- "Estimate" } # rename columns for dispersion part if (startsWith(type, "dispersion") && !is.null(coef_column)) { colnames(tables[[type]])[which(colnames(tables[[type]]) == coef_column)] <- "Coefficient" } # rename columns for random part if (grepl("random", type, fixed = TRUE) && any(colnames(tables[[type]]) %in% .all_coefficient_types)) { colnames(tables[[type]])[colnames(tables[[type]]) %in% .all_coefficient_types] <- "Coefficient" } if (grepl("random", type, fixed = TRUE) && isTRUE(ran_pars)) { tables[[type]]$CI <- NULL } # add the coefficient for the base-(reference)-level of factors? if (include_reference) { tables[[type]] <- .add_reference_level(tables[[type]]) } formatted_table <- insight::format_table( tables[[type]], digits = digits, ci_digits = ci_digits, p_digits = p_digits, pretty_names = pretty_names, ci_width = ci_width, ci_brackets = ci_brackets, zap_small = zap_small, select = style, ... ) component_header <- .format_model_component_header( x, type, split_column, is_zero_inflated, is_ordinal_model, is_multivariate, ran_pars, formatted_table ) # exceptions for random effects if (insight::has_single_value(formatted_table$Group, remove_na = TRUE)) { component_header$subheader1 <- paste0(component_header$subheader1, " (", formatted_table$Group, ")") formatted_table$Group <- NULL } # remove non-necessary columns if (insight::has_single_value(formatted_table$Component, remove_na = TRUE)) { formatted_table$Component <- NULL } # no column with CI-level in output if (!is.null(formatted_table$CI) && insight::has_single_value(formatted_table$CI, remove_na = TRUE)) { formatted_table$CI <- NULL } table_caption <- NULL if (is.null(format) || format %in% c("markdown", "text")) { # Print if (component_header$name != "rewb-contextual") { table_caption <- c( sprintf("# %s %s", component_header$subheader1, tolower(component_header$subheader2)), "blue" ) } } else if (format %in% c("markdown", "html")) { # Print if (component_header$name != "rewb-contextual") { table_caption <- sprintf("%s %s", component_header$subheader1, tolower(component_header$subheader2)) } # replace brackets by parenthesis if (!is.null(parameter_column) && parameter_column %in% colnames(formatted_table)) { formatted_table[[parameter_column]] <- gsub("[", ci_brackets[1], formatted_table[[parameter_column]], fixed = TRUE) # nolint formatted_table[[parameter_column]] <- gsub("]", ci_brackets[2], formatted_table[[parameter_column]], fixed = TRUE) # nolint } } if (identical(format, "html")) { formatted_table$Component <- table_caption } else { attr(formatted_table, "table_caption") <- table_caption } # remove unique columns if (insight::has_single_value(formatted_table$Effects, remove_na = TRUE)) formatted_table$Effects <- NULL if (insight::has_single_value(formatted_table$Group, remove_na = TRUE)) formatted_table$Group <- NULL final_table <- c(final_table, list(formatted_table)) } if (identical(format, "html")) { # fix non-equal length of columns final_table <- .fix_nonmatching_columns( final_table, is_lavaan = inherits(attributes(x)[["model"]], c("lavaan", "blavaan")) ) do.call(rbind, final_table) } else { insight::compact_list(final_table) } } # helper to fix unequal number of columns for list of data frames, # when used for HTML printing .fix_nonmatching_columns <- function(final_table, is_lavaan = FALSE) { # fix for lavaan here if (is_lavaan) { for (i in seq_along(final_table)) { if (!is.null(final_table[[i]]$Link) && !is.null(final_table[[i]]$To) && all(is.na(final_table[[i]]$Link))) { final_table[[i]]$Link <- final_table[[i]]$To final_table[[i]]$To <- NA } colnames(final_table[[i]])[1] <- "Parameter" if (!is.null(final_table[[i]]$To) && all(is.na(final_table[[i]]$To))) { final_table[[i]]$To <- NULL } } } # then check for correct column length col_len <- vapply(final_table, function(i) length(colnames(i)), numeric(1)) # remove non matching columns if (!all(col_len == max(col_len))) { all_columns <- unique(unlist(lapply(final_table, colnames))) for (i in seq_along(final_table)) { missing_columns <- setdiff(all_columns, colnames(final_table[[i]])) if (length(missing_columns)) { a <- attributes(final_table[[i]]) final_table[[i]][missing_columns] <- NA final_table[[i]] <- final_table[[i]][match(all_columns, colnames(final_table[[i]]))] attributes(final_table[[i]]) <- utils::modifyList(a, attributes(final_table[[i]])) } } } final_table } parameters/R/bootstrap_model.R0000644000176200001440000002173515033425412016145 0ustar liggesusers#' Model bootstrapping #' #' Bootstrap a statistical model n times to return a data frame of estimates. #' #' @param model Statistical model. #' @param iterations The number of draws to simulate/bootstrap. #' @param type Character string specifying the type of bootstrap. For mixed models #' of class `merMod` or `glmmTMB`, may be `"parametric"` (default) or #' `"semiparametric"` (see `?lme4::bootMer` for details). For all #' other models, see argument `sim` in `?boot::boot` (defaults to #' `"ordinary"`). #' @param parallel The type of parallel operation to be used (if any). #' @param n_cpus Number of processes to be used in parallel operation. #' @param cluster Optional cluster when `parallel = "snow"`. See `?lme4::bootMer` #' for details. #' @param ... Arguments passed to or from other methods. #' @inheritParams p_value #' #' @return A data frame of bootstrapped estimates. #' #' @details By default, `boot::boot()` is used to generate bootstraps from #' the model data, which are then used to `update()` the model, i.e. refit #' the model with the bootstrapped samples. For `merMod` objects (**lme4**) #' or models from **glmmTMB**, the `lme4::bootMer()` function is used to #' obtain bootstrapped samples. `bootstrap_parameters()` summarizes the #' bootstrapped model estimates. #' #' @section Using with **emmeans**: #' The output can be passed directly to the various functions from the #' **emmeans** package, to obtain bootstrapped estimates, contrasts, simple #' slopes, etc. and their confidence intervals. These can then be passed to #' `model_parameter()` to obtain standard errors, p-values, etc. (see #' example). #' #' Note that that p-values returned here are estimated under the assumption of #' *translation equivariance*: that shape of the sampling distribution is #' unaffected by the null being true or not. If this assumption does not hold, #' p-values can be biased, and it is suggested to use proper permutation tests #' to obtain non-parametric p-values. #' #' @seealso [`bootstrap_parameters()`], [`simulate_model()`], [`simulate_parameters()`] #' #' @examplesIf require("boot", quietly = TRUE) && require("emmeans", quietly = TRUE) #' \donttest{ #' model <- lm(mpg ~ wt + factor(cyl), data = mtcars) #' b <- bootstrap_model(model) #' print(head(b)) #' #' est <- emmeans::emmeans(b, consec ~ cyl) #' print(model_parameters(est)) #' } #' @export bootstrap_model <- function(model, iterations = 1000, ...) { UseMethod("bootstrap_model") } #' @rdname bootstrap_model #' @export bootstrap_model.default <- function(model, iterations = 1000, type = "ordinary", parallel = "no", n_cpus = 1, cluster = NULL, verbose = FALSE, ...) { # check for valid input .is_model_valid(model) insight::check_if_installed("boot") type <- insight::validate_argument( type, c("ordinary", "parametric", "balanced", "permutation", "antithetic") ) parallel <- insight::validate_argument(parallel, c("no", "multicore", "snow")) model_data <- data <- insight::get_data(model, verbose = FALSE) # nolint model_response <- insight::find_response(model) boot_function <- function(model, data, indices) { d <- data[indices, , drop = FALSE] # allows boot to select sample if (inherits(model, "biglm")) { fit <- suppressMessages(stats::update(model, moredata = d)) } else if (verbose) { fit <- stats::update(model, data = d) } else { fit <- suppressMessages(stats::update(model, data = d)) } params <- insight::get_parameters(fit, verbose = FALSE) n_params <- insight::n_parameters(model) if (nrow(params) != n_params) { params <- stats::setNames(rep.int(NA, n_params), params$Parameter) } else { params <- stats::setNames(params$Estimate, params$Parameter) # Transform to named vector } params } if (type == "parametric") { f <- function(x, mle) { out <- model_data resp <- stats::simulate(x, nsim = 1) out[[model_response]] <- resp out } results <- boot::boot( data = data, statistic = boot_function, R = iterations, sim = type, parallel = parallel, ncpus = n_cpus, model = model, ran.gen = f ) } else { results <- boot::boot( data = data, statistic = boot_function, R = iterations, sim = type, parallel = parallel, ncpus = n_cpus, model = model ) } out <- as.data.frame(results$t) out <- out[stats::complete.cases(out), ] names(out) <- insight::get_parameters(model, verbose = FALSE)$Parameter class(out) <- unique(c("bootstrap_model", "see_bootstrap_model", class(out))) attr(out, "original_model") <- model out } #' @export bootstrap_model.merMod <- function(model, iterations = 1000, type = "parametric", parallel = "no", n_cpus = 1, cluster = NULL, verbose = FALSE, ...) { insight::check_if_installed("lme4") type <- insight::validate_argument(type, c("parametric", "semiparametric")) parallel <- insight::validate_argument(parallel, c("no", "multicore", "snow")) boot_function <- function(model) { params <- insight::get_parameters(model, verbose = FALSE) n_params <- insight::n_parameters(model) # for glmmTMB, remove dispersion paramters, if any if (inherits(model, "glmmTMB") && "Component" %in% names(params) && "dispersion" %in% params$Component) { # find number of dispersion parameters n_disp <- sum(params$Component == "dispersion") # remove dispersion parameters params <- params[params$Component != "dispersion", ] # make sure number of parameters is updated n_params <- n_params - n_disp } if (nrow(params) != n_params) { params <- stats::setNames(rep.int(NA, n_params), params$Parameter) } else { params <- stats::setNames(params$Estimate, params$Parameter) # Transform to named vector } params } if (verbose) { results <- lme4::bootMer( model, boot_function, nsim = iterations, type = type, parallel = parallel, ncpus = n_cpus, cl = cluster ) } else { results <- suppressMessages(lme4::bootMer( model, boot_function, nsim = iterations, verbose = FALSE, type = type, parallel = parallel, ncpus = n_cpus, cl = cluster )) } out <- as.data.frame(results$t) out <- out[stats::complete.cases(out), ] names(out) <- insight::find_parameters(model, effects = "fixed")$conditional class(out) <- unique(c("bootstrap_model", "see_bootstrap_model", class(out))) attr(out, "original_model") <- model out } #' @export bootstrap_model.glmmTMB <- bootstrap_model.merMod #' @export bootstrap_model.nestedLogit <- function(model, iterations = 1000, type = "ordinary", parallel = "no", n_cpus = 1, verbose = FALSE, ...) { insight::check_if_installed("boot") type <- insight::validate_argument( type, c("ordinary", "balanced", "permutation", "antithetic") ) parallel <- insight::validate_argument(parallel, c("no", "multicore", "snow")) model_data <- data <- insight::get_data(model, verbose = FALSE) # nolint model_response <- insight::find_response(model) boot_function <- function(model, data, indices) { d <- data[indices, , drop = FALSE] # allows boot to select sample if (verbose) { fit <- stats::update(model, data = d) } else { fit <- suppressMessages(stats::update(model, data = d)) } params <- insight::get_parameters(fit, verbose = FALSE) stats::setNames(params$Estimate, params$Parameter) # Transform to named vector } results <- boot::boot( data = data, statistic = boot_function, R = iterations, sim = type, parallel = parallel, ncpus = n_cpus, model = model ) out <- as.data.frame(results$t) out <- out[stats::complete.cases(out), ] params <- insight::get_parameters(model, verbose = FALSE) names(out) <- paste0(params$Parameter, ".", params$Component) class(out) <- unique(c("bootstrap_model", "see_bootstrap_model", class(out))) attr(out, "original_model") <- model out } parameters/R/methods_vgam.R0000644000176200001440000000436014736731407015435 0ustar liggesusers# classes: .vglm, .vgam ########### .vgam --------------- #' @export model_parameters.vgam <- model_parameters.gam #' @export standard_error.vgam <- function(model, ...) { params <- insight::get_parameters(model) se <- sqrt(diag(insight::get_varcov(model))) # sort se <- se[params$Parameter] .data_frame( Parameter = .remove_backticks_from_string(names(se)), SE = as.vector(se), Component = params$Component ) } #' @export p_value.vgam <- function(model, ...) { stat <- insight::get_statistic(model) stat$p <- as.vector(stats::pchisq(stat$Statistic, df = insight::get_df(model), lower.tail = FALSE)) stat[c("Parameter", "p", "Component")] } #' @export simulate_model.vgam <- function(model, iterations = 1000, ...) { out <- .simulate_model(model, iterations, component = "all") class(out) <- c("parameters_simulate_model", class(out)) out } ########### .vglm --------------- #' @export p_value.vglm <- function(model, ...) { insight::check_if_installed("VGAM") cs <- VGAM::summary(model)@coef3 p <- cs[, 4] .data_frame( Parameter = .remove_backticks_from_string(names(p)), p = as.vector(p) ) } #' @export standard_error.vglm <- function(model, ...) { se <- sqrt(diag(insight::get_varcov(model))) .data_frame( Parameter = .remove_backticks_from_string(names(se)), SE = as.vector(se) ) } # ci.vgam <- function(x, ci = 0.95, component = c("all", "conditional", "smooth"), ...) { # component <- match.arg(component) # # # dof and SE # dof <- degrees_of_freedom(x) # se <- standard_error(x)$SE # params <- insight::get_parameters(x) # # se <- se[!is.na(dof)] # dof <- dof[!is.na(dof)] # params_names <- names(dof) # # # Wald CI for non-chisq parameters # out <- .ci_generic(model = x, ci = ci, dof = Inf) # # chisq_fac <- stats::qchisq(se, df = dof, lower.tail = FALSE) # for (i in 1:length(params_names)) { # out$CI_low[out$Parameter == params_names[i]] <- params$Estimate[params$Parameter == params_names[i]] - se[i] * chisq_fac[i] # out$CI_high[out$Parameter == params_names[i]] <- params$Estimate[params$Parameter == params_names[i]] + se[i] * chisq_fac[i] # } # # out # } parameters/R/methods_rstanarm.R0000644000176200001440000001135515022763445016330 0ustar liggesusers#' @export model_parameters.stanreg <- function(model, centrality = "median", dispersion = FALSE, ci = 0.95, ci_method = "eti", test = "pd", rope_range = "default", rope_ci = 0.95, bf_prior = NULL, diagnostic = c("ESS", "Rhat"), priors = TRUE, effects = "fixed", exponentiate = FALSE, standardize = NULL, group_level = FALSE, keep = NULL, drop = NULL, verbose = TRUE, ...) { # for coef(), we don't need all the attributes and just stop here if (effects %in% c("total", "random_total")) { params <- .group_level_total(model) params$Effects <- "total" class(params) <- c("parameters_coef", "see_parameters_coef", class(params)) return(params) } # adjust arguments if (effects == "random" && group_level) { effects <- "grouplevel" } if (effects == "grouplevel") { priors <- FALSE } # Processing params <- .extract_parameters_bayesian( model, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, bf_prior = bf_prior, diagnostic = diagnostic, priors = priors, effects = effects, standardize = standardize, keep_parameters = keep, drop_parameters = drop, verbose = verbose, ... ) ## TODO: can we use the regular pretty-name-formatting? params <- .add_pretty_names(params, model) # exponentiate coefficients and SE/CI, if requested params <- .exponentiate_parameters(params, model, exponentiate) params <- .add_model_parameters_attributes( params, model, ci, exponentiate, ci_method = ci_method, group_level = group_level, verbose = verbose, ... ) attr(params, "parameter_info") <- .get_cleaned_parameters(params, model) attr(params, "object_name") <- insight::safe_deparse_symbol(substitute(model)) class(params) <- c("parameters_model", "see_parameters_model", class(params)) params } #' @export model_parameters.stanmvreg <- function(model, centrality = "median", dispersion = FALSE, ci = 0.95, ci_method = "eti", test = "pd", rope_range = "default", rope_ci = 0.95, bf_prior = NULL, diagnostic = c("ESS", "Rhat"), priors = TRUE, effects = "fixed", standardize = NULL, group_level = FALSE, keep = NULL, drop = NULL, verbose = TRUE, ...) { if (utils::packageVersion("insight") > "1.2.0" && effects == "random" && group_level) { effects <- "grouplevel" } # Processing params <- .extract_parameters_bayesian( model, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, bf_prior = bf_prior, diagnostic = diagnostic, priors = priors, effects = effects, standardize = standardize, keep_parameters = keep, drop_parameters = drop, verbose = verbose, ... ) params$Parameter <- gsub("^(.*)\\|(.*)", "\\2", params$Parameter) params <- .add_pretty_names(params, model) attr(params, "ci") <- ci attr(params, "object_name") <- insight::safe_deparse_symbol(substitute(model)) class(params) <- c("parameters_model", "see_parameters_model", class(params)) params } #' @export standard_error.stanreg <- standard_error.brmsfit #' @export standard_error.mvstanreg <- standard_error.brmsfit #' @export p_value.stanreg <- p_value.BFBayesFactor parameters/R/ci_ml1.R0000644000176200001440000000057514317274256014127 0ustar liggesusers#' @rdname p_value_ml1 #' @export ci_ml1 <- function(model, ci = 0.95, ...) { df_ml1 <- dof_ml1(model) out <- lapply(ci, function(i) { .ci_dof( model = model, ci = i, effects = "fixed", component = "all", dof = df_ml1, method = "ml1", ... ) }) out <- do.call(rbind, out) row.names(out) <- NULL out } parameters/R/methods_margins.R0000644000176200001440000000424514507235543016141 0ustar liggesusers#' @export model_parameters.margins <- function(model, ci = 0.95, exponentiate = FALSE, p_adjust = NULL, verbose = TRUE, ...) { # Parameters, Estimate and CI params <- insight::get_parameters(model) params <- .data_frame( params, SE = summary(model)$SE ) # CI params <- merge(params, ci(model, ci = ci), by = "Parameter", sort = FALSE) # Statistic statistic <- insight::get_statistic(model) params <- merge(params, statistic, by = "Parameter", sort = FALSE) # p-value params <- .data_frame(params, p = summary(model)$p) # ==== Renaming if ("Statistic" %in% names(params)) { names(params) <- gsub( "Statistic", gsub("(-|\\s)statistic", "", attr(statistic, "statistic", exact = TRUE)), names(params), fixed = TRUE ) names(params) <- gsub("chi-squared", "Chi2", names(params), fixed = TRUE) } names(params) <- gsub("(c|C)hisq", "Chi2", names(params)) names(params) <- gsub("Estimate", "Coefficient", names(params), fixed = TRUE) # ==== adjust p-values? if (!is.null(p_adjust)) { params <- .p_adjust(params, p_adjust, model, verbose) } # exponentiate coefficients and SE/CI, if requested params <- .exponentiate_parameters(params, model, exponentiate) params <- .add_model_parameters_attributes( params, model, ci, exponentiate, p_adjust = p_adjust, verbose = verbose, ... ) attr(params, "object_name") <- insight::safe_deparse_symbol(substitute(model)) class(params) <- c("parameters_model", "see_parameters_model", class(params)) params } #' @export ci.margins <- function(x, ci = 0.95, ...) { .ci_generic(model = x, ci = ci, dof = Inf, ...) } #' @export standard_error.margins <- function(model, ...) { params <- insight::get_parameters(model) .data_frame( Parameter = params$Parameter, SE = summary(model)$SE ) } #' @export p_value.margins <- function(model, ...) { params <- insight::get_parameters(model) .data_frame( Parameter = params$Parameter, p = summary(model)$p ) } #' @export format_parameters.margins <- function(model, ...) { NULL } parameters/R/methods_pglm.R0000644000176200001440000000030314004234333015413 0ustar liggesusers#' @export p_value.pglm <- function(model, ...) { p <- summary(model)$estimate[, 4] .data_frame( Parameter = .remove_backticks_from_string(names(p)), p = as.vector(p) ) } parameters/R/methods_marginaleffects.R0000644000176200001440000001726115062754403017633 0ustar liggesusers# x <- lm(Sepal.Width ~ Species * Petal.Length, data = iris) # model <- marginaleffects(x, newdata = insight::get_datagrid(x, at = "Species"), variables = "Petal.Length") # model_parameters ---------------- #' @export model_parameters.marginaleffects <- function( model, ci = 0.95, exponentiate = FALSE, verbose = TRUE, ... ) { insight::check_if_installed("marginaleffects", minimum_version = "0.29.0") # Bayesian models have posterior draws as attribute is_bayesian <- !is.null(suppressWarnings(marginaleffects::get_draws(model, "PxD"))) if (is_bayesian) { # Bayesian out <- suppressWarnings(bayestestR::describe_posterior(model, ci = ci, verbose = verbose, ...)) } else { # non-Bayesian out <- as.data.frame(model) # all columns in data grid and model data, we only want to keep "by" variables all_data_cols <- union( colnames(marginaleffects::components(model, "newdata")), colnames(marginaleffects::components(model, "modeldata")) ) # columns we want to keep by_cols <- .keep_me_columns(model) # remove redundant columns to_remove <- setdiff(all_data_cols, by_cols) out <- out[, !colnames(out) %in% to_remove, drop = FALSE] } out <- .rename_reserved_marginaleffects(out) # need to standardize names for non-Bayesian models. Bayesian models have # been processed through describe_posterior() already if (!is_bayesian) { out <- insight::standardize_names(out, style = "easystats") } # edge case: for avg_comparisons() with custom hypothesis, "term" and "hypothesis" # are identical columns, now both names "Parameter" - remove one param_cols <- which(colnames(out) == "Parameter") if (length(param_cols) > 1) { out[param_cols[-1]] <- NULL } # in case data grid contained column names that are reserved words, # rename those back now... colnames(out) <- gsub("#####$", "", colnames(out)) # contrast_ columns provide indispensable information about the comparisons colnames(out)[colnames(out) == "contrast"] <- "Comparison" colnames(out) <- gsub("^contrast_", "Comparison: ", colnames(out)) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(model)) # do not print or report these columns out <- out[, !colnames(out) %in% c("predicted_lo", "predicted_hi"), drop = FALSE] if (inherits(model, "comparisons")) { attr(out, "coefficient_name") <- "Estimate" attr(out, "title") <- "Contrasts between Adjusted Predictions" if ("Type" %in% colnames(out)) { attr(out, "prediction_type") <- out$Type[1] } } else if (inherits(model, "slopes")) { attr(out, "coefficient_name") <- "Slope" } else if (inherits(model, "predictions")) { attr(out, "coefficient_name") <- "Predicted" } else if (inherits(model, "hypotheses")) { attr(out, "coefficient_name") <- "Estimate" } # exponentiate coefficients and SE/CI, if requested out <- .exponentiate_parameters(out, model = NULL, exponentiate) # add further information as attributes out <- .safe( .add_model_parameters_attributes( out, model = model, ci = ci, exponentiate = exponentiate, verbose = verbose, ... ), out ) class(out) <- c("parameters_model", "see_parameters_model", class(out)) out } #' @export model_parameters.comparisons <- model_parameters.marginaleffects #' @export model_parameters.hypotheses <- model_parameters.marginaleffects #' @export model_parameters.slopes <- model_parameters.marginaleffects #' @export model_parameters.predictions <- function( model, ci = 0.95, exponentiate = FALSE, verbose = TRUE, ... ) { insight::check_if_installed("marginaleffects", minimum_version = "0.29.0") # Bayesian models have posterior draws as attribute is_bayesian <- !is.null(suppressWarnings(marginaleffects::get_draws(model, "PxD"))) if (is_bayesian) { # Bayesian out <- suppressWarnings(bayestestR::describe_posterior(model, ci = ci, verbose = verbose, ...)) } else { # columns we want to keep by_cols <- .keep_me_columns(model) # handle non-Bayesian models out <- .rename_reserved_marginaleffects(model) out <- datawizard::data_rename(out, "estimate", "predicted") out <- datawizard::data_relocate(out, "predicted", before = 1) out <- insight::standardize_names(out, style = "easystats") } out <- insight::standardize_column_order(out, style = "easystats") # in case data grid contained column names that are reserved words, # rename those back now... colnames(out) <- gsub("#####$", "", colnames(out)) # remove and reorder some columns out$rowid <- out$Type <- out$rowid_dedup <- NULL # find at-variables at_variables <- insight::compact_character(c( marginaleffects::components(model, "variable_names_by"), marginaleffects::components(model, "variable_names_by_hypothesis") )) # find cofficient name - differs for Bayesian models coef_name <- intersect(c("Predicted", "Coefficient"), colnames(out))[1] if (!is.null(at_variables) && !is.na(coef_name) && all(at_variables %in% colnames(out))) { out <- datawizard::data_relocate(out, select = at_variables, after = coef_name) } # extract response, remove from data frame reg_model <- marginaleffects::components(model, "model") if (!is.null(reg_model) && insight::is_model(reg_model)) { resp <- insight::find_response(reg_model) # check if response could be extracted if (!is.null(resp)) { # for some models, like brms-special response formula, we have multiple # values in "resp", so we iterate all of them separately for (r in resp) { out[[r]] <- NULL } } } out <- .safe( .add_model_parameters_attributes( out, model = model, ci = ci, exponentiate = exponentiate, verbose = verbose, ... ), out ) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(model)) attr(out, "coefficient_name") <- "Predicted" attr(out, "no_caption") <- TRUE # exponentiate coefficients and SE/CI, if requested out <- .exponentiate_parameters(out, model = NULL, exponentiate) class(out) <- c("parameters_model", "see_parameters_model", class(out)) out } .rename_reserved_marginaleffects <- function(model) { # get focal terms - we might escape column names where focal terms # equal "reserved" names, like t- or z-statistic focal_terms <- attributes(model)$focal_terms reserved <- c("t", "z") renamed_focal <- NULL # any focal terms equals reserved words? if so, rename if (any(reserved %in% focal_terms)) { renamed_focal <- focal_terms[focal_terms %in% reserved] model <- datawizard::data_rename( model, select = renamed_focal, replacement = paste0(renamed_focal, "#####") ) } model } .fix_duplicated_by_columns <- function(x, by_cols) { duplicated_names <- grep( paste0("(", paste0(by_cols, "\\.\\d+$", collapse = "|"), ")"), colnames(x), value = TRUE ) # if we have duplicated "by" columns, we want to remove those as well if (length(duplicated_names) > 0) { x[duplicated_names] <- NULL } x } .keep_me_columns <- function(model) { # columns we want to keep by_cols <- union( marginaleffects::components(model, "variable_names_by"), marginaleffects::components(model, "variable_names_by_hypothesis") ) # and newdata, if specified if (!is.null(marginaleffects::components(model, "call")$newdata)) { by_cols <- union( by_cols, colnames(marginaleffects::components(model, "newdata")) ) } by_cols } parameters/R/methods_quantreg.R0000644000176200001440000001604014736731407016327 0ustar liggesusers# quantreg: .rq, .rqss, .crq, .nlrq, .rqs # model parameters --------------------- #' @export model_parameters.rqss <- model_parameters.cgam #' @export model_parameters.rqs <- function(model, ci = 0.95, bootstrap = FALSE, iterations = 1000, standardize = NULL, exponentiate = FALSE, p_adjust = NULL, keep = NULL, drop = NULL, verbose = TRUE, ...) { out <- .model_parameters_generic( model = model, ci = ci, bootstrap = bootstrap, iterations = iterations, merge_by = c("Parameter", "Component"), standardize = standardize, exponentiate = exponentiate, keep_parameters = keep, drop_parameters = drop, p_adjust = p_adjust, verbose = verbose, ... ) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(model)) out } # ci --------------------- #' @export ci.rq <- ci.default #' @export ci.rqss <- ci.default #' @export ci.crq <- ci.default #' @export ci.nlrq <- ci.default #' @export ci.rqs <- ci.default # standard errors --------------------- #' @export standard_error.rq <- function(model, ...) { se <- .get_quantreg_se(model) if (is.null(se)) { vc <- insight::get_varcov(model) se <- as.vector(sqrt(diag(vc))) } params <- insight::get_parameters(model) params$SE <- se params[intersect(colnames(params), c("Parameter", "SE", "Component"))] } #' @export standard_error.rqs <- function(model, ...) { se <- tryCatch( { s <- suppressWarnings(summary(model, covariance = TRUE)) cs <- do.call(rbind, lapply(s, stats::coef)) cs[, "Std. Error"] }, error = function(e) { NULL } ) params <- insight::get_parameters(model) data.frame( Parameter = params$Parameter, SE = se, Component = params$Component, stringsAsFactors = FALSE, row.names = NULL ) } #' @export standard_error.crq <- standard_error.rq #' @export standard_error.nlrq <- standard_error.rq #' @export standard_error.rqss <- function(model, component = c("all", "conditional", "smooth_terms"), ...) { component <- match.arg(component) cs <- summary(model)$coef se_column <- intersect(c("Std Error", "Std. Error"), colnames(cs)) se <- cs[, se_column] params_cond <- insight::get_parameters(model, component = "conditional") params_smooth <- insight::get_parameters(model, component = "smooth_terms") out_cond <- .data_frame( Parameter = params_cond$Parameter, SE = se, Component = "conditional" ) out_smooth <- .data_frame( Parameter = params_smooth$Parameter, SE = NA, Component = "smooth_terms" ) switch(component, all = rbind(out_cond, out_smooth), conditional = out_cond, smooth_terms = out_smooth ) } .get_quantreg_se <- function(model) { se <- tryCatch( { cs <- suppressWarnings(stats::coef(summary(model))) se_column <- intersect(c("Std Error", "Std. Error"), colnames(cs)) if (length(se_column)) { cs[, se_column] } else { vc <- insight::get_varcov(model) as.vector(sqrt(diag(vc))) } }, error = function(e) { NULL } ) if (is.null(se)) { se <- tryCatch( { sc <- summary(model) if (all(unlist(lapply(sc, is.list)))) { list_sc <- lapply(sc, function(i) { .x <- as.data.frame(i) .x$Parameter <- rownames(.x) .x }) out <- do.call(rbind, list_sc) se <- stats::setNames(out$coefficients.Std.Error, sprintf("tau (%g)", out$tau)) } else { se <- stats::setNames(unname(sc$coefficients[, 4]), names(sc$coefficients[, 4])) } }, error = function(e) { NULL } ) } se } # p values --------------------- #' @export p_value.rq <- function(model, ...) { p <- .get_quantreg_p(model) params <- insight::get_parameters(model) params$p <- p params[intersect(colnames(params), c("Parameter", "p", "Component"))] } #' @export p_value.rqs <- function(model, ...) { p <- tryCatch( { s <- suppressWarnings(summary(model, covariance = TRUE)) cs <- do.call(rbind, lapply(s, stats::coef)) cs[, "Pr(>|t|)"] }, error = function(e) { NULL } ) params <- insight::get_parameters(model) data.frame( Parameter = params$Parameter, p = p, Component = params$Component, stringsAsFactors = FALSE, row.names = NULL ) } #' @export p_value.crq <- p_value.rq #' @export p_value.nlrq <- p_value.rq #' @export p_value.rqss <- function(model, component = c("all", "conditional", "smooth_terms"), ...) { component <- match.arg(component) cs <- summary(model)$coef p_column <- intersect(c("Pr(>|t|)", "Pr(>|z|)"), colnames(cs)) p_cond <- cs[, p_column] cs <- summary(model)$qsstab p_smooth <- cs[, "Pr(>F)"] params_cond <- insight::get_parameters(model, component = "conditional") params_smooth <- insight::get_parameters(model, component = "smooth_terms") out_cond <- .data_frame( Parameter = params_cond$Parameter, p = as.vector(p_cond), Component = "conditional" ) out_smooth <- .data_frame( Parameter = params_smooth$Parameter, p = as.vector(p_smooth), Component = "smooth_terms" ) switch(component, all = rbind(out_cond, out_smooth), conditional = out_cond, smooth_terms = out_smooth ) } .get_quantreg_p <- function(model) { p <- tryCatch( { cs <- suppressWarnings(stats::coef(summary(model))) cs[, "Pr(>|t|)"] }, error = function(e) { NULL } ) if (is.null(p)) { p <- tryCatch( { .get_pval_from_summary( model, cs = suppressWarnings(stats::coef(summary(model, covariance = TRUE))) ) }, error = function(e) { NULL } ) } if (is.null(p)) { p <- tryCatch( { sc <- summary(model) if (all(unlist(lapply(sc, is.list)))) { list_sc <- lapply(sc, function(i) { .x <- as.data.frame(i) .x$Parameter <- rownames(.x) .x }) out <- do.call(rbind, list_sc) p <- stats::setNames(out[[grep("^coefficients\\.Pr", colnames(out))]], sprintf("tau (%g)", out$tau)) } else { p <- stats::setNames(unname(sc$coefficients[, 6]), names(sc$coefficients[, 6])) } }, error = function(e) { NULL } ) } p } parameters/R/methods_lmtest.R0000644000176200001440000000076214716604200016001 0ustar liggesusers#' @export ci.coeftest <- ci.default #' @export p_value.coeftest <- function(model, ...) { .data_frame( Parameter = .remove_backticks_from_string(row.names(model)), p = model[, 4] ) } #' @export standard_error.coeftest <- function(model, ...) { .data_frame( Parameter = .remove_backticks_from_string(row.names(model)), SE = model[, "Std. Error"] ) } #' @rdname model_parameters.htest #' @export model_parameters.coeftest <- model_parameters.ivFixed parameters/R/methods_ivfixed.R0000644000176200001440000000217614716604200016130 0ustar liggesusers#' @export ci.ivFixed <- ci.default #' @export standard_error.ivFixed <- standard_error.coxr #' @export p_value.ivFixed <- function(model, method = "wald", ...) { stat <- insight::get_statistic(model) if (!is.null(stat)) { .data_frame( Parameter = stat$Parameter, p = as.vector(2 * stats::pt( abs(stat$Statistic), df = insight::get_df(model, type = method), lower.tail = FALSE )) ) } } #' @export model_parameters.ivFixed <- function(model, ci = 0.95, ci_method = "wald", keep = NULL, drop = NULL, verbose = TRUE, ...) { out <- .model_parameters_generic( model = model, ci = ci, ci_method = ci_method, merge_by = "Parameter", keep_parameters = keep, drop_parameters = drop, verbose = verbose, ... ) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(model)) out } parameters/R/p_significance.R0000644000176200001440000003104715033425412015706 0ustar liggesusers#' @importFrom bayestestR p_significance #' @export bayestestR::p_significance #' @title Practical Significance (ps) #' #' @description Compute the probability of **Practical Significance** (*ps*), #' which can be conceptualized as a unidirectional equivalence test. It returns #' the probability that an effect is above a given threshold corresponding to a #' negligible effect in the median's direction, considering a parameter's _full_ #' confidence interval. In other words, it returns the probability of a clear #' direction of an effect, which is larger than the smallest effect size of #' interest (e.g., a minimal important difference). Its theoretical range is #' from zero to one, but the *ps* is typically larger than 0.5 (to indicate #' practical significance). #' #' In comparison the the [`equivalence_test()`] function, where the *SGPV* #' (second generation p-value) describes the proportion of the _full_ confidence #' interval that is _inside_ the ROPE, the value returned by `p_significance()` #' describes the _larger_ proportion of the _full_ confidence interval that is #' _outside_ the ROPE. This makes `p_significance()` comparable to #' [`bayestestR::p_direction()`], however, while `p_direction()` compares to a #' point-null by default, `p_significance()` compares to a range-null. #' #' @param x A statistical model. #' @inheritParams bayestestR::p_significance #' @inheritParams model_parameters.default #' @param verbose Toggle warnings and messages. #' @param ... Arguments passed to other methods. #' #' @seealso For more details, see [`bayestestR::p_significance()`]. See also #' [`equivalence_test()`], [`p_function()`] and [`bayestestR::p_direction()`] #' for functions related to checking effect existence and significance. #' #' @details `p_significance()` returns the proportion of the _full_ confidence #' interval range (assuming a normally or t-distributed, equal-tailed interval, #' based on the model) that is outside a certain range (the negligible effect, #' or ROPE, see argument `threshold`). If there are values of the distribution #' both below and above the ROPE, `p_significance()` returns the higher #' probability of a value being outside the ROPE. Typically, this value should #' be larger than 0.5 to indicate practical significance. However, if the range #' of the negligible effect is rather large compared to the range of the #' confidence interval, `p_significance()` will be less than 0.5, which #' indicates no clear practical significance. #' #' Note that the assumed interval, which is used to calculate the practical #' significance, is an estimation of the _full interval_ based on the chosen #' confidence level. For example, if the 95% confidence interval of a #' coefficient ranges from -1 to 1, the underlying _full (normally or #' t-distributed) interval_ approximately ranges from -1.9 to 1.9, see also #' following code: #' #' ``` #' # simulate full normal distribution #' out <- bayestestR::distribution_normal(10000, 0, 0.5) #' # range of "full" distribution #' range(out) #' # range of 95% CI #' round(quantile(out, probs = c(0.025, 0.975)), 2) #' ``` #' #' This ensures that the practical significance always refers to the general #' compatible parameter space of coefficients. Therefore, the _full interval_ is #' similar to a Bayesian posterior distribution of an equivalent Bayesian model, #' see following code: #' #' ``` #' library(bayestestR) #' library(brms) #' m <- lm(mpg ~ gear + wt + cyl + hp, data = mtcars) #' m2 <- brm(mpg ~ gear + wt + cyl + hp, data = mtcars) #' # probability of significance (ps) for frequentist model #' p_significance(m) #' # similar to ps of Bayesian models #' p_significance(m2) #' # similar to ps of simulated draws / bootstrap samples #' p_significance(simulate_model(m)) #' ``` #' #' @note There is also a [`plot()`-method](https://easystats.github.io/see/articles/bayestestR.html) #' implemented in the [**see**-package](https://easystats.github.io/see/). #' #' @inheritSection model_parameters Statistical inference - how to quantify evidence #' #' @references #' #' - Amrhein, V., Korner-Nievergelt, F., and Roth, T. (2017). The earth is #' flat (p > 0.05): Significance thresholds and the crisis of unreplicable #' research. PeerJ, 5, e3544. \doi{10.7717/peerj.3544} #' #' - Greenland S, Rafi Z, Matthews R, Higgs M. To Aid Scientific Inference, #' Emphasize Unconditional Compatibility Descriptions of Statistics. (2022) #' https://arxiv.org/abs/1909.08583v7 (Accessed November 10, 2022) #' #' - Lakens, D. (2024). Improving Your Statistical Inferences (Version v1.5.1). #' Retrieved from https://lakens.github.io/statistical_inferences/. #' \doi{10.5281/ZENODO.6409077} #' #' - Lakens, D., Scheel, A. M., and Isager, P. M. (2018). Equivalence Testing #' for Psychological Research: A Tutorial. Advances in Methods and Practices #' in Psychological Science, 1(2), 259–269. #' #' - Makowski, D., Ben-Shachar, M. S., Chen, S. H. A., and Lüdecke, D. (2019). #' Indices of Effect Existence and Significance in the Bayesian Framework. #' Frontiers in Psychology, 10, 2767. \doi{10.3389/fpsyg.2019.02767} #' #' - Rafi Z, Greenland S. Semantic and cognitive tools to aid statistical #' science: replace confidence and significance by compatibility and surprise. #' BMC Medical Research Methodology (2020) 20:244. #' #' - Schweder T. Confidence is epistemic probability for empirical science. #' Journal of Statistical Planning and Inference (2018) 195:116–125. #' \doi{10.1016/j.jspi.2017.09.016} #' #' - Schweder T, Hjort NL. Frequentist analogues of priors and posteriors. #' In Stigum, B. (ed.), Econometrics and the Philosophy of Economics: Theory #' Data Confrontation in Economics, pp. 285-217. Princeton University Press, #' Princeton, NJ, 2003 #' #' - Vos P, Holbert D. Frequentist statistical inference without repeated sampling. #' Synthese 200, 89 (2022). \doi{10.1007/s11229-022-03560-x} #' #' @return A data frame with columns for the parameter names, the confidence #' intervals and the values for practical significance. Higher values indicate #' more practical significance (upper bound is one). #' #' @examplesIf requireNamespace("bayestestR") && packageVersion("bayestestR") > "0.14.0" && requireNamespace("sandwich") #' data(qol_cancer) #' model <- lm(QoL ~ time + age + education, data = qol_cancer) #' #' p_significance(model) #' p_significance(model, threshold = c(-0.5, 1.5)) #' #' # based on heteroscedasticity-robust standard errors #' p_significance(model, vcov = "HC3") #' #' if (require("see", quietly = TRUE)) { #' result <- p_significance(model) #' plot(result) #' } #' @export p_significance.lm <- function(x, threshold = "default", ci = 0.95, vcov = NULL, vcov_args = NULL, verbose = TRUE, ...) { # generate normal distribution based on CI range result <- .posterior_ci(x, ci, vcov = vcov, vcov_args = vcov_args, ...) # copy out <- result$out posterior <- result$posterior # calculate the ROPE range - for multiple thresholds, we have to check # each list element for "default", to replace it with the appropriate range if (is.list(threshold)) { threshold <- lapply(threshold, function(i) { if (all(i == "default")) { i <- bayestestR::rope_range(x, verbose = verbose) } i }) } else if (all(threshold == "default")) { threshold <- bayestestR::rope_range(x, verbose = verbose) } # add ps result_ps <- bayestestR::p_significance( posterior, threshold = threshold, verbose = verbose ) out$ps <- as.numeric(result_ps) # for list-thresholds, we have the list as attribute and need to save it as # data.frame if (is.list(threshold)) { # save for later threshold_data <- stats::setNames( as.data.frame(do.call(rbind, attributes(result_ps)$threshold)), c("ROPE_low", "ROPE_high") ) out <- cbind(out, threshold_data) keep <- c("Parameter", "CI", "CI_low", "CI_high", "ROPE_low", "ROPE_high", "ps", "Effects", "Component") } else { keep <- c("Parameter", "CI", "CI_low", "CI_high", "ps", "Effects", "Component") } # for plot, we need to have it numeric if (!is.numeric(threshold) && !is.list(threshold)) { threshold <- 0.1 } # Reorder columns of 'out' to keep only the relevant ones out <- out[intersect(keep, colnames(out))] attr(out, "data") <- posterior attr(out, "threshold") <- threshold class(out) <- c("p_significance_lm", "p_significance", "see_p_significance", "data.frame") out } # helper ---------------------------------------------------------------------- .posterior_ci <- function(x, ci, vcov = NULL, vcov_args = NULL, ...) { # first, we need CIs if (inherits(x, "parameters_model")) { # for model_parameters objects, directly extract CIs out <- as.data.frame(x)[intersect( c("Parameter", "CI_low", "CI_high", "Component", "Effects"), colnames(x) )] ci <- attributes(x)$ci # and extract degrees of freedom df_column <- grep("(df|df_error)", colnames(x)) if (length(df_column) > 0) { dof <- unique(x[[df_column]]) if (length(dof) > 1) { dof <- Inf } } else { dof <- Inf } } else { out <- ci(x, ci = ci, vcov = vcov, vcov_args = vcov_args, ...) dof <- .safe(insight::get_df(x, type = "wald"), Inf) } # we now iterate all confidence intervals and create an approximate normal # distribution that covers the CI-range. posterior <- as.data.frame(lapply(seq_len(nrow(out)), function(i) { ci_range <- as.numeric(out[i, c("CI_low", "CI_high")]) .generate_posterior_from_ci(ci, ci_range, dof = dof) })) colnames(posterior) <- out$Parameter # deal with Effects and Component columns if ("Effects" %in% colnames(out) && insight::has_single_value(out$Effects, remove_na = TRUE)) { out$Effects <- NULL } if ("Component" %in% colnames(out) && insight::has_single_value(out$Component, remove_na = TRUE)) { out$Component <- NULL } # check we don't have duplicated columns in "posterior" we need this for # plotting if (anyDuplicated(colnames(posterior)) > 0 && !is.null(out$Component)) { comps <- .rename_values(out$Component, "zero_inflated", "zi") comps <- .rename_values(comps, "conditional", "cond") colnames(posterior) <- paste0(out$Parameter, "_", comps) out$Parameter <- paste0(out$Parameter, "_", comps) } list(out = out, posterior = posterior) } # methods --------------------------------------------------------------------- #' @export print.p_significance_lm <- function(x, digits = 2, ...) { threshold <- attributes(x)$threshold # Check if threshold is a list, which indicates multiple thresholds if (is.list(threshold)) { caption <- "Practical Significance" } else { # make sure it's numeric if (!is.numeric(threshold)) { threshold <- 0.1 } # make sure we have both bounds for the range if (length(threshold) == 1) { threshold <- c(threshold * -1, threshold) } caption <- sprintf( "Practical Significance (threshold: %s)", toString(insight::format_value(threshold, digits = 2)) ) } x$ps <- insight::format_pd(x$ps, name = NULL) x <- insight::format_table(x, digits = digits) cat(insight::export_table(x, title = caption, ...)) } # other classes -------------------------------------------------------------- #' @export p_significance.glm <- p_significance.lm #' @export p_significance.coxph <- p_significance.lm #' @export p_significance.svyglm <- p_significance.lm #' @export p_significance.glmmTMB <- p_significance.lm #' @export p_significance.merMod <- p_significance.lm #' @export p_significance.wbm <- p_significance.lm #' @export p_significance.lme <- p_significance.lm #' @export p_significance.gee <- p_significance.lm #' @export p_significance.gls <- p_significance.lm #' @export p_significance.feis <- p_significance.lm #' @export p_significance.felm <- p_significance.lm #' @export p_significance.mixed <- p_significance.lm #' @export p_significance.hurdle <- p_significance.lm #' @export p_significance.zeroinfl <- p_significance.lm #' @export p_significance.rma <- p_significance.lm #' @export p_significance.parameters_model <- p_significance.lm parameters/R/utils_pca_efa.R0000644000176200001440000004341615053035103015542 0ustar liggesusers#' Get Scores from Principal Component or Factor Analysis (PCA/FA) #' #' `get_scores()` takes `n_items` amount of items that load the most #' (either by loading cutoff or number) on a component, and then computes their #' average. This results in a sum score for each component from the PCA/FA, #' which is on the same scale as the original, single items that were used to #' compute the PCA/FA. #' #' @param x An object returned by [principal_components()] or [factor_analysis()]. #' @param n_items Number of required (i.e. non-missing) items to build the sum #' score for an observation. If an observation has more missing values than #' `n_items` in all items of a (sub) scale, `NA` is returned for that #' observation, else, the sum score of all (sub) items is calculated. If `NULL`, #' the value is chosen to match half of the number of columns in a data frame, #' i.e. no more than 50% missing values are allowed. #' #' @details #' `get_scores()` takes the results from [`principal_components()`] or #' [`factor_analysis()`] and extracts the variables for each component found by #' the PCA/FA. Then, for each of these "subscales", row means are calculated #' (which equals adding up the single items and dividing by the number of #' items). This results in a sum score for each component from the PCA/FA, which #' is on the same scale as the original, single items that were used to compute #' the PCA/FA. #' #' @return A data frame with subscales, which are average sum scores for all #' items from each component or factor. #' #' @seealso Functions to carry out a PCA ([`principal_components()`]) or #' a FA ([`factor_analysis()`]). [`factor_scores()`] extracts factor scores #' from an FA object. #' #' @examplesIf insight::check_if_installed("psych", quietly = TRUE) #' pca <- principal_components(mtcars[, 1:7], n = 2, rotation = "varimax") #' #' # PCA extracted two components #' pca #' #' # assignment of items to each component #' closest_component(pca) #' #' # now we want to have sum scores for each component #' get_scores(pca) #' #' # compare to manually computed sum score for 2nd component, which #' # consists of items "hp" and "qsec" #' (mtcars$hp + mtcars$qsec) / 2 #' #' @export get_scores <- function(x, n_items = NULL) { subscales <- closest_component(x) dataset <- attributes(x)$dataset out <- lapply(sort(unique(subscales)), function(.subscale) { columns <- names(subscales)[subscales == .subscale] items <- dataset[columns] if (is.null(n_items)) { .n_items <- round(ncol(items) / 2) } else { .n_items <- n_items } apply(items, 1, function(i) { if (sum(!is.na(i)) >= .n_items) { mean(i, na.rm = TRUE) } else { NA } }) }) out <- as.data.frame(do.call(cbind, out)) colnames(out) <- sprintf("Component_%i", seq_len(ncol(out))) out } # model parameters ----------------------------------------------------------------- #' @export model_parameters.parameters_efa <- function(model, ...) { x <- attributes(model)$summary if (inherits(model, "parameters_efa")) { class(x) <- c("parameters_efa_summary", class(model)) } else { class(x) <- c("parameters_pca_summary", class(model)) } x } #' @export model_parameters.parameters_pca <- model_parameters.parameters_efa # summary ----------------------------------------------------------------- #' @export summary.parameters_efa <- function(object, ...) { x <- attributes(object)$summary cols <- intersect( c("Std_Dev", "Eigenvalues", "Variance", "Variance_Cumulative", "Variance_Proportion"), colnames(x) ) x <- as.data.frame(t(x[, cols])) x <- cbind(data.frame(Parameter = row.names(x), stringsAsFactors = FALSE), x) names(x) <- c("Parameter", attributes(object)$summary$Component) row.names(x) <- NULL if (.is_oblique_rotation(attributes(object)$rotation)) { factor_correlations <- attributes(object)$model$Phi if (!is.null(factor_correlations)) { attr(x, "factor_correlations") <- datawizard::rownames_as_column( as.data.frame(factor_correlations), var = "Factor" ) } } if (inherits(object, "parameters_efa")) { class(x) <- c("parameters_efa_summary", class(object)) } else { class(x) <- c("parameters_pca_summary", class(object)) } x } #' @export summary.parameters_pca <- summary.parameters_efa #' @export summary.parameters_omega <- function(object, ...) { class(object) <- c("parameters_omega_summary", "data.frame") object } # predict ----------------------------------------------------------------- #' @rdname principal_components #' @export predict.parameters_efa <- function(object, newdata = NULL, names = NULL, keep_na = TRUE, verbose = TRUE, ...) { attri <- attributes(object) # handle if no data is provided if (is.null(newdata)) { # check if we have scores attribute - these will be returned directly if ("scores" %in% names(attri)) { out <- as.data.frame(attri$scores) if (isTRUE(keep_na)) { out <- .merge_na(object, out, verbose) } } else if ("dataset" %in% names(attri)) { # if we have data, use that for prediction d <- attri$data_set d <- d[vapply(d, is.numeric, logical(1))] out <- as.data.frame(stats::predict(attri$model, newdata = d)) } else { insight::format_error( "Could not retrieve data nor model. Please report an issue on {.url https://github.com/easystats/parameters/issues}." # nolint ) } } else if (inherits(attri$model, "spca")) { # https://github.com/erichson/spca/issues/7 newdata <- newdata[names(attri$model$center)] if (attri$standardize) { newdata <- sweep(newdata, MARGIN = 2, STATS = attri$model$center, FUN = "-", check.margin = TRUE) newdata <- sweep(newdata, MARGIN = 2, STATS = attri$model$scale, FUN = "/", check.margin = TRUE) } out <- as.matrix(newdata) %*% as.matrix(attri$model$loadings) out <- stats::setNames(as.data.frame(out), paste0("Component", seq_len(ncol(out)))) } else if (inherits(attri$model, c("psych", "fa", "principal"))) { out <- as.data.frame(stats::predict(attri$model, data = newdata[rownames(attri$model$weights)], ...)) } else { out <- as.data.frame(stats::predict(attri$model, newdata = newdata, ...)) } if (!is.null(names)) { names(out)[seq_along(names)] <- names } row.names(out) <- NULL out } #' @export predict.parameters_pca <- predict.parameters_efa .merge_na <- function(object, out, verbose = TRUE) { compl_cases <- attributes(object)$complete_cases if (is.null(compl_cases)) { if (verbose) { insight::format_alert( "Could not retrieve information about missing data. Returning only complete cases." ) } } else { original_data <- data.frame(.parameters_merge_id = seq_along(compl_cases)) out$.parameters_merge_id <- (seq_len(nrow(original_data)))[compl_cases] out <- merge(original_data, out, by = ".parameters_merge_id", all = TRUE, sort = TRUE) out$.parameters_merge_id <- NULL } out } # print ------------------------------------------------------------------- #' @export print.parameters_efa_summary <- function(x, digits = 3, ...) { # we may have factor correlations fc <- attributes(x)$factor_correlations if ("Parameter" %in% names(x)) { x$Parameter <- c( "Eigenvalues", "Variance Explained", "Variance Explained (Cumulative)", "Variance Explained (Proportion)" ) } else if ("Component" %in% names(x)) { names(x) <- c( "Component", "Eigenvalues", "Variance Explained", "Variance Explained (Cumulative)", "Variance Explained (Proportion)" ) } cat(insight::export_table( x, digits = digits, caption = c("# (Explained) Variance of Components", "blue"), format = "text", ... )) if (!is.null(fc)) { cat("\n") cat(insight::export_table( fc, digits = digits, caption = c("# Factor Correlations", "blue"), format = "text", ... )) } invisible(x) } #' @export print.parameters_pca_summary <- print.parameters_efa_summary #' @rdname principal_components #' @export print.parameters_efa <- function(x, digits = 2, sort = FALSE, threshold = NULL, labels = NULL, ...) { # extract attributes if (is.null(threshold)) { threshold <- attributes(x)$threshold } cat(.print_parameters_cfa_efa( x, threshold = threshold, sort = sort, format = "text", digits = digits, labels = labels, ... )) invisible(x) } #' @export print.parameters_pca <- print.parameters_efa #' @export print.parameters_omega <- print.parameters_efa #' @export print.parameters_omega_summary <- function(x, ...) { out <- .print_omega_summary(x) cat(insight::export_table(out$tables, caption = out$captions, format = "text", ...)) invisible(x) } # print-helper ---------------------- .print_omega_summary <- function(x, format = "text") { caption1 <- NULL caption2 <- NULL caption3 <- NULL # extract model model <- attributes(x)$model if (!is.null(model)) { stats <- data.frame( Statistic = c("Alpha", "G.6", "Omega (hierarchical)", "Omega (asymptotic H)", "Omega (total)"), Coefficient = c(model$alpha, model$G6, model$omega_h, model$omega.lim, model$omega.tot) ) if (format == "text") { caption1 <- c("# Omega Statistics", "blue") } else { caption1 <- "Omega Statistics" } } # extract summary tables omega_coefficients <- attributes(x)$omega_coefficients variance_summary <- attributes(x)$summary # rename columns if (!is.null(omega_coefficients)) { names(omega_coefficients) <- c( "Composite", "Omega (total)", "Omega (hierarchical)", "Omega (group)" ) if (format == "text") { caption2 <- c("# Omega Coefficients", "blue") } else { caption2 <- "Omega Coefficients" } } if (!is.null(variance_summary)) { names(variance_summary) <- c( "Composite", "Total (%)", "General Factor (%)", "Group Factor (%)" ) if (format == "text") { caption3 <- c("# Variances", "blue") } else { caption3 <- "Variances" } } # list for export out <- insight::compact_list(list(stats, omega_coefficients, variance_summary)) captions <- insight::compact_list(list(caption1, caption2, caption3)) list(tables = out, captions = captions) } .print_parameters_cfa_efa <- function(x, threshold, sort, format, digits, labels, ...) { # html engine? engine <- .check_format_backend(...) # Method if (inherits(x, "parameters_pca")) { method <- "Principal Component Analysis" } else if (inherits(x, "parameters_efa")) { method <- "Factor Analysis" } else { method <- "Omega" } # Rotation rotation_name <- attr(x, "rotation", exact = TRUE) # Labels if (!is.null(labels)) { x$Label <- labels x <- x[c("Variable", "Label", names(x)[!names(x) %in% c("Variable", "Label")])] } # Sorting if (isTRUE(sort)) { x <- .sort_loadings(x) } # Replace by NA all cells below threshold if (!is.null(threshold)) { x <- .filter_loadings(x, threshold = threshold) } # table caption if (is.null(rotation_name) || rotation_name == "none") { if (format %in% c("markdown", "html")) { table_caption <- sprintf("Loadings from %s (no rotation)", method) } else { table_caption <- c(sprintf("# Loadings from %s (no rotation)", method), "blue") } } else if (format %in% c("markdown", "html")) { table_caption <- sprintf("Rotated loadings from %s (%s-rotation)", method, rotation_name) } else { table_caption <- c(sprintf("# Rotated loadings from %s (%s-rotation)", method, rotation_name), "blue") } # footer if (is.null(attributes(x)$type)) { footer <- NULL } else { footer <- c(.text_components_variance(x, sep = ifelse(format %in% c("markdown", "html"), "", "\n")), "yellow") } # alignment? if (is.null(labels)) { alignment <- NULL } else { alignment <- paste(c("ll", rep("r", ncol(x) - 2)), collapse = "") } # set engine for html format if (format == "html" && identical(engine, "tt")) { format <- "tt" } insight::export_table( x, digits = digits, format = format, caption = table_caption, footer = footer, align = alignment, ... ) } #' @keywords internal .text_components_variance <- function(x, sep = "") { type <- attributes(x)$type if (type %in% c("prcomp", "principal", "pca")) { type <- "principal component" } else if (type == "fa") { type <- "latent factor" } else if (type %in% c("kmeans", "hclust", "pvclust", "dbscan", "mixture", "pam")) { type <- "cluster" } else { type <- paste0(type, " component") } if (type == "cluster") { cluster_summary <- as.data.frame(x) variance <- attributes(x)$variance * 100 } else { cluster_summary <- attributes(x)$summary variance <- max(cluster_summary$Variance_Cumulative) * 100 } if (nrow(cluster_summary) == 1) { text_variance <- paste0("The unique ", type) } else { text_variance <- paste0("The ", nrow(cluster_summary), " ", type, "s") } # rotation if (!is.null(attributes(x)$rotation) && attributes(x)$rotation != "none") { text_variance <- paste0(text_variance, " (", attributes(x)$rotation, " rotation)") } text_variance <- paste0( text_variance, " accounted for ", sprintf("%.2f", variance), "% of the total variance of the original data" ) if (type == "cluster" || nrow(cluster_summary) == 1) { text_variance <- paste0(text_variance, ".") } else { text_variance <- paste0( text_variance, " (", paste0(cluster_summary$Component, " = ", sprintf("%.2f", cluster_summary$Variance * 100), "%", collapse = ", " ), ")." ) } paste0(sep, text_variance, sep) } # sort -------------------------------------------------------------------- #' @rdname principal_components #' @export sort.parameters_efa <- function(x, ...) { .sort_loadings(x) } #' @export sort.parameters_pca <- sort.parameters_efa #' @keywords internal .sort_loadings <- function(loadings, cols = NULL) { if (is.null(cols)) { cols <- attributes(loadings)$loadings_columns } # Remove variable name column x <- loadings[, cols, drop = FALSE] row.names(x) <- NULL # Initialize clusters nitems <- nrow(x) loads <- data.frame(item = seq(1:nitems), cluster = rep(0, nitems)) # first sort them into clusters: Find the maximum for each row and assign it to that cluster loads$cluster <- apply(abs(x), 1, which.max) ord <- sort(loads$cluster, index.return = TRUE) x[1:nitems, ] <- x[ord$ix, ] rownames(x)[1:nitems] <- rownames(x)[ord$ix] total.ord <- ord$ix # now sort column wise so that the loadings that have their highest loading on each cluster items <- table(loads$cluster) # how many items are in each cluster? first <- 1 item <- loads$item for (i in seq_along(items)) { if (items[i] > 0) { last <- first + items[i] - 1 ord <- sort(abs(x[first:last, i]), decreasing = TRUE, index.return = TRUE) x[first:last, ] <- x[item[ord$ix + first - 1], ] loads[first:last, 1] <- item[ord$ix + first - 1] rownames(x)[first:last] <- rownames(x)[ord$ix + first - 1] total.ord[first:last] <- total.ord[ord$ix + first - 1] first <- first + items[i] } } row_order <- row.names(x) loadings <- loadings[as.numeric(as.character(row_order)), ] # Arrange by max row.names(loadings) <- NULL loadings } # Filter -------------------------------------------------------------------- #' @keywords internal .filter_loadings <- function(loadings, threshold = 0.2, loadings_columns = NULL) { if (is.null(loadings_columns)) { loadings_columns <- attributes(loadings)$loadings_columns } if (threshold == "max" || threshold >= 1) { if (threshold == "max") { for (row in seq_len(nrow(loadings))) { maxi <- max(abs(loadings[row, loadings_columns, drop = FALSE])) loadings[row, loadings_columns][abs(loadings[row, loadings_columns]) < maxi] <- NA } } else { for (col in loadings_columns) { loadings[utils::tail(order(abs(loadings[, col]), decreasing = TRUE), -round(threshold)), col] <- NA } } } else { loadings[, loadings_columns][abs(loadings[, loadings_columns]) < threshold] <- NA } loadings } # closest_component ------------------------------------------------------- #' @rdname principal_components #' @export closest_component <- function(x) { if ("closest_component" %in% names(attributes(x))) { attributes(x)$closest_component } else { .closest_component(x) } } .closest_component <- function(loadings, loadings_columns = NULL, variable_names = NULL) { if (is.matrix(loadings)) loadings <- as.data.frame(loadings) if (is.null(loadings_columns)) loadings_columns <- seq_len(ncol(loadings)) if (is.null(variable_names)) variable_names <- row.names(loadings) component_columns <- apply(loadings[loadings_columns], 1, function(i) which.max(abs(i))) stats::setNames(component_columns, variable_names) } parameters/R/dominance_analysis.R0000644000176200001440000005330414736731407016622 0ustar liggesusers#' @title Dominance Analysis #' @name dominance_analysis #' @inheritParams domir::domin #' #' @description Computes Dominance Analysis Statistics and Designations #' #' @param model A model object supported by `performance::r2()`. See 'Details'. #' #' @param sets A (named) list of formula objects with no left hand #' side/response. If the list has names, the name provided each element #' will be used as the label for the set. Unnamed list elements will be #' provided a set number name based on its position among the sets as entered. #' #' Predictors in each formula are bound together as a set in the dominance #' analysis and dominance statistics and designations are computed for #' the predictors together. Predictors in `sets` must be present in the model #' submitted to the `model` argument and cannot be in the `all` argument. #' #' @param all A formula with no left hand side/response. #' #' Predictors in the formula are included in each subset in the dominance #' analysis and the R2 value associated with them is subtracted from the #' overall value. Predictors in `all` must be present in the model #' submitted to the `model` argument and cannot be in the `sets` argument. #' #' @param quote_args A character vector of arguments in the model submitted to #' `model` to `quote()` prior to submitting to the dominance analysis. This #' is necessary for data masked arguments (e.g., `weights`) to prevent them #' from being evaluated before being applied to the model and causing an error. #' #' @param contrasts A named list of [`contrasts`] used by the model object. #' This list is required in order for the correct mapping of parameters to #' predictors in the output when the model creates indicator codes for factor #' variables using [`insight::get_modelmatrix()`]. By default, the `contrast` #' element from the model object submitted is used. If the model object does #' not have a `contrast` element the user can supply this named list. #' #' @param ... Not used at current. #' #' @return Object of class `"parameters_da"`. #' #' An object of class `"parameters_da"` is a list of `data.frame`s composed #' of the following elements: #' \describe{ #' \item{`General`}{A `data.frame` which associates dominance statistics with #' model parameters. The variables in this `data.frame` include: #' \describe{ #' \item{`Parameter`}{Parameter names.} #' \item{`General_Dominance`}{Vector of general dominance statistics. #' The R2 ascribed to variables in the `all` argument are also reported #' here though they are not general dominance statistics.} #' \item{`Percent`}{Vector of general dominance statistics normalized #' to sum to 1.} #' \item{`Ranks`}{Vector of ranks applied to the general dominance #' statistics.} #' \item{`Subset`}{Names of the subset to which the parameter belongs in #' the dominance analysis. Each other `data.frame` returned will refer #' to these subset names.}}} #' \item{`Conditional`}{A `data.frame` of conditional dominance #' statistics. Each observation represents a subset and each variable #' represents an the average increment to R2 with a specific number of #' subsets in the model. `NULL` if `conditional` argument is `FALSE`.} #' \item{`Complete`}{A `data.frame` of complete dominance #' designations. The subsets in the observations are compared to the #' subsets referenced in each variable. Whether the subset #' in each variable dominates the subset in each observation is #' represented in the logical value. `NULL` if `complete` #' argument is `FALSE`.} #' } #' #' @details Computes two decompositions of the model's R2 and returns #' a matrix of designations from which predictor relative importance #' determinations can be obtained. #' #' Note in the output that the "constant" subset is associated with a #' component of the model that does not directly contribute to the R2 such #' as an intercept. The "all" subset is apportioned a component of the fit #' statistic but is not considered a part of the dominance analysis and #' therefore does not receive a rank, conditional dominance statistics, or #' complete dominance designations. #' #' The input model is parsed using `insight::find_predictors()`, does not #' yet support interactions, transformations, or offsets applied in the R #' formula, and will fail with an error if any such terms are detected. #' #' The model submitted must accept an formula object as a `formula` #' argument. In addition, the model object must accept the data on which #' the model is estimated as a `data` argument. Formulas submitted #' using object references (i.e., `lm(mtcars$mpg ~ mtcars$vs)`) and #' functions that accept data as a non-`data` argument #' (e.g., `survey::svyglm()` uses `design`) will fail with an error. #' #' Models that return `TRUE` for the `insight::model_info()` #' function's values "is_bayesian", "is_mixed", "is_gam", #' is_multivariate", "is_zero_inflated", #' or "is_hurdle" are not supported at current. #' #' When `performance::r2()` returns multiple values, only the first is used #' by default. #' #' @references #' - Azen, R., & Budescu, D. V. (2003). The dominance analysis approach #' for comparing predictors in multiple regression. Psychological Methods, #' 8(2), 129-148. doi:10.1037/1082-989X.8.2.129 #' #' - Budescu, D. V. (1993). Dominance analysis: A new approach to the #' problem of relative importance of predictors in multiple regression. #' Psychological Bulletin, 114(3), 542-551. doi:10.1037/0033-2909.114.3.542 #' #' - Groemping, U. (2007). Estimators of relative importance in linear #' regression based on variance decomposition. The American Statistician, #' 61(2), 139-147. doi:10.1198/000313007X188252 #' #' @seealso [domir::domin()] #' #' @author Joseph Luchman #' #' @examplesIf require("domir") && require("performance") #' data(mtcars) #' #' # Dominance Analysis with Logit Regression #' model <- glm(vs ~ cyl + carb + mpg, data = mtcars, family = binomial()) #' #' performance::r2(model) #' dominance_analysis(model) #' #' # Dominance Analysis with Weighted Logit Regression #' model_wt <- glm(vs ~ cyl + carb + mpg, #' data = mtcars, #' weights = wt, family = quasibinomial() #' ) #' #' dominance_analysis(model_wt, quote_args = "weights") #' @export dominance_analysis <- function(model, sets = NULL, all = NULL, conditional = TRUE, complete = TRUE, quote_args = NULL, contrasts = model$contrasts, ...) { # Exit Conditions ---- insight::check_if_installed("domir") insight::check_if_installed("performance") if (!insight::is_regression_model(model)) { insight::format_error( paste(deparse(substitute(model)), "is not a supported {.pkg insight} model."), "You may be able to dominance analyze this model using the {.pkg domir} package." ) } if (!any(utils::.S3methods("r2", class = class(model)[[1]], envir = getNamespace("performance")) %in% paste0("r2.", class(model)))) { insight::format_error( paste(deparse(substitute(model)), "does not have a {.pkg perfomance}-supported `r2()` method."), "You may be able to dominance analyze this model using the {.pkg domir} package." ) } model_info <- insight::model_info(model) if (any(unlist(model_info[c("is_bayesian", "is_mixed", "is_gam", "is_multivariate", "is_zero_inflated", "is_hurdle")]))) { insight::format_error( paste0("`dominance_analysis()` does not yet support models of class `", class(model)[[1]], "`."), "You may be able to dominance analyze this model using the {.pkg domir} package." ) } if (length(insight::find_predictors(model, flatten = TRUE)) < 2) { insight::format_error("Too few predictors for a dominance analysis.") } if (!is.null(insight::find_offset(model))) { insight::format_error( "Offsets in the model are not allowed in this version of `dominance_analysis()`.", "Try using package {.pkg domir}." ) } if (!all(insight::find_predictors(model, flatten = TRUE) %in% insight::find_terms(model)$conditional)) { insight::format_error( "Predictors do not match terms.", "This usually occurs when there are in-formula predictor transformations such as `log(x)` or `I(x+z)`.", "`dominance_analysis()` cannot yet accommodate such terms. Reformat your model to ensure all parameters", "match predictors in the data or use the {.pkg domir} package." ) } if (!is.null(insight::find_interactions(model))) { insight::format_error("Interactions in the model formula are not allowed.") } if (!is.null(sets)) { if (!is.list(sets)) { insight::format_error("`sets` argument must be submitted as list.") } if (length(sets) != length(unlist(sets))) { insight::format_error("Nested lists are not allowed in `sets`.") } if (!all(sapply(sets, inherits, "formula"))) { insight::format_error("Each element of list in `sets` must be a formula.") } if (any(sapply(sets, function(x) attr(stats::terms(x), "response") == 1))) { insight::format_error("Formulas in `sets` argument must not have responses/left hand sides.") } } if (!is.null(all)) { if (!inherits(all, "formula")) { insight::format_error("`all` argument must be submitted as a formula.") } if (attr(stats::terms(all), "response") == 1) { insight::format_error("Formula in `all` argument must not have a response/left hand side.") } } if (!is.null(quote_args) && !all(is.character(quote_args))) { insight::format_error("All arguments in `quote_args` must be characters.") } # Collect components for arguments ---- ivs <- insight::find_predictors(model, flatten = TRUE) dv <- insight::find_response(model) # reg <- insight::model_name(model) # insight::get_call + as.list() and take first element? glm.nb doesn't work... reg <- as.list(insight::get_call(model))[[1]] # Process sets ---- if (!is.null(sets)) { # gather predictors from each set sets_processed <- lapply(sets, function(x) attr(stats::terms(x), "term.labels")) # remove predictors from `ivs` list if in sets set_remove_loc <- unlist(lapply(sets_processed, function(x) which(ivs %in% x))) if (length(set_remove_loc) != length(unlist(sets_processed))) { wrong_set_terms <- unlist(sets_processed)[which(!(unlist(sets_processed) %in% ivs))] insight::format_error( "Terms", paste(wrong_set_terms, sep = " "), "in `sets` argument do not match any predictors in model." ) } ivs <- ivs[-set_remove_loc] # apply names to sets set_names <- names(sets) missing_set_names <- which(set_names == "") if (length(missing_set_names) > 0) { set_names[missing_set_names] <- paste0("set", missing_set_names) } if (any(set_names %in% c("all", "constant"))) { insight::format_error( "Names \"all\" and \"constant\" are reserved for subset names in the `dominance_analysis()` function.", "Please rename any sets currently named \"all\" or \"constant\"." ) } if (any(set_names %in% ivs)) { repeat_names <- set_names[which(set_names %in% ivs)] insight::format_error( "Set names", paste(repeat_names, sep = " "), "are also the names of invidiual predictors.", "Please rename these sets." ) } } else { sets_processed <- NULL } # Process all ---- if (!is.null(all)) { # gather predictors in all all_processed <- attr(stats::terms(all), "term.labels") # remove predictors in all from `ivs` list all_remove_loc <- which(ivs %in% all_processed) if (any(all_processed %in% unlist(sets_processed))) { reused_terms <- all_processed[which(all_processed %in% unlist(sets_processed))] insight::format_error( "Terms", paste(reused_terms, sep = " "), "in all argument are also used in `sets` argument." ) } if (length(all_remove_loc) != length(unlist(all_processed))) { wrong_all_terms <- all_processed[which(!(all_processed) %in% ivs)] insight::format_error( "Terms", paste(wrong_all_terms, sep = " "), "in `all` argument do not match any predictors in model." ) } ivs <- ivs[-all_remove_loc] # update IVs } else { all_processed <- NULL } # name collisions across subsets - exit if (any(ivs %in% c("all", "constant"))) { insight::format_error( "Names 'all' and 'constant' are reserved for subset names in the `dominance_analysis()` function.", "Please rename any predictors currently named 'all' or 'constant.'", "Alternatively, put the predictor in a set by itself." ) } # big DA warning if (length(c(ivs, unlist(sets_processed))) > 15) { insight::format_warning( paste0("Total of ", 2^length(ivs) - 1, " models to be estimated."), "Process may take some time." ) } # Build non-formula model arguments to `domin` ---- if (length(ivs) == 0) ivs <- "1" fml <- stats::reformulate(ivs, response = dv, intercept = insight::has_intercept(model)) data <- insight::get_data(model, verbose = FALSE) args <- as.list(insight::get_call(model), collapse = "") # extract all arguments from call loc <- which(!(names(args) %in% c("formula", "data"))) # find formula and data arguments if (length(which(names(args) %in% c("formula", "data"))) != 2) { # exit if formula and data arguments missing insight::format_error("Model submitted does not have a formula and `data` argument.") } args <- args[loc] # remove formula and data arguments args <- args[-1] # remove function name # quote arguments for domin for (arg in quote_args) { if (arg %in% names(args)) { args[[arg]] <- str2lang(paste0("quote(", deparse(args[[arg]]), ")", collapse = "")) } else { insight::format_error(arg, " in `quote_args` not among arguments in model.") } } # Internal wrapper to ensure r2 values conform to domin ---- .r2_wrap <- function(model, ...) { list(fitstat = performance::r2(model, ...)[[1]]) } # Finalize and implement DA args2domin <- append(list( formula_overall = fml, reg = reg, fitstat = list(.r2_wrap, "fitstat"), data = data, conditional = conditional, complete = complete, sets = sets_processed, all = all_processed ), args) utils::capture.output({ domir_res <- do.call(domir::domin, args2domin) }) # Set up returned data.frames ---- # Apply set names to domin results if (!is.null(sets)) { names(domir_res$General_Dominance) <- c( names(domir_res$General_Dominance)[1:(length(domir_res$General_Dominance) - length(set_names))], set_names ) if (conditional) { rownames(domir_res$Conditional_Dominance) <- names(domir_res$General_Dominance) } } if (complete) { colnames(domir_res$Complete_Dominance) <- paste0("dmn_", names(domir_res$General_Dominance)) dimnames(domir_res$Complete_Dominance) <- list( colnames(domir_res$Complete_Dominance), names(domir_res$General_Dominance) ) domir_res$Complete_Dominance <- t(domir_res$Complete_Dominance) } # Map parameter names to subsets - structure set-up da_df_res <- da_df_cat <- .data_frame(parameter = insight::find_parameters(model, flatten = TRUE)) da_df_cat <- .data_frame(da_df_cat, subset = NA_character_) # if parameter is same as domin name, copy it to 'subset' da_df_cat$subset <- ifelse((da_df_res$parameter %in% names(domir_res$General_Dominance)) & (is.na(da_df_cat$subset)), da_df_res$parameter, da_df_cat$subset ) # Expand contrast names if (!is.null(contrasts)) { contr_names <- lapply( names(contrasts), function(name) { pred_loc <- which(insight::find_predictors(model, flatten = TRUE) == name) pred_names <- colnames(insight::get_modelmatrix(model))[ which(attr(insight::get_modelmatrix(model), "assign") == pred_loc) ] } ) names(contr_names) <- names(contrasts) contr_map <- rep(names(contr_names), lengths(contr_names)) names(contr_map) <- unlist(contr_names) for (subset in which(is.na(da_df_cat$subset))) { if ((da_df_res$parameter[[subset]] %in% names(contr_map))) { da_df_cat$subset[[subset]] <- contr_map[[which(names(contr_map) == da_df_res$parameter[[subset]])]] } } } # Apply set names if (!is.null(sets)) { for (set in seq_along(sets)) { set_name <- if (!is.null(names(sets)[[set]])) { names(sets)[[set]] } else { paste0("set", set) } da_df_cat$subset <- replace( da_df_cat$subset, da_df_res$parameter %in% all.vars(sets[[set]]), set_name ) da_df_cat$subset <- replace( da_df_cat$subset, da_df_cat$subset %in% all.vars(sets[[set]]), set_name ) } } # Apply 'all' names if (!is.null(all)) { da_df_cat$subset <- replace( da_df_cat$subset, da_df_res$parameter %in% all.vars(all), "all" ) da_df_cat$subset <- replace( da_df_cat$subset, da_df_cat$subset %in% all.vars(all), "all" ) } # assume remaining parameters are part of 'constant' da_df_cat$subset <- replace( da_df_cat$subset, is.na(da_df_cat$subset), "constant" ) # merge subsets and DA results to parameter names da_df_res <- datawizard::data_merge( da_df_cat, .data_frame( subset = names(domir_res$General_Dominance), general_dominance = domir_res$General_Dominance ) ) # plug in value of 'all' in 'all' subsets/parameters if (!is.null(all)) { da_df_res$general_dominance <- replace( da_df_res$general_dominance, da_df_res$subset == "all", domir_res$Fit_Statistic_All_Subsets ) } # merge standardized general dominance stat values da_df_res <- datawizard::data_merge( da_df_res, .data_frame( subset = names(domir_res$General_Dominance), standardized = domir_res$Standardized ) ) # merge ranks based on general dominance stat values da_df_res <- datawizard::data_merge( da_df_res, .data_frame( subset = names(domir_res$General_Dominance), ranks = domir_res$Ranks ) ) da_df_res <- datawizard::data_relocate(da_df_res, "subset", after = "ranks") if (conditional) { da_df_cdl <- .data_frame(Subset = names(domir_res$General_Dominance)) da_df_cdl <- datawizard::data_merge( da_df_cdl, .data_frame( Subset = names(domir_res$General_Dominance), domir_res$Conditional_Dominance ) ) cols_to_select <- colnames(da_df_cdl)[2:length(da_df_cdl)] da_df_cdl <- datawizard::data_rename( da_df_cdl, select = cols_to_select, replacement = colnames(domir_res$Conditional_Dominance) ) } else { da_df_cdl <- NULL } if (complete) { da_df_cpt <- .data_frame(Subset = names(domir_res$General_Dominance)) da_df_cpt <- datawizard::data_merge( da_df_cpt, .data_frame( Subset = names(domir_res$General_Dominance), domir_res$Complete_Dominance ) ) cols_to_select <- colnames(da_df_cpt)[2:length(da_df_cpt)] da_df_cpt <- datawizard::data_rename( da_df_cpt, select = cols_to_select, replacement = colnames(domir_res$Complete_Dominance) ) } else { da_df_cpt <- NULL } da_df_res <- datawizard::data_rename( da_df_res, replacement = c( "Parameter", "General_Dominance", "Percent", "Ranks", "Subset" ) ) da_list <- list( General = da_df_res, Conditional = da_df_cdl, Complete = da_df_cpt ) # add attributes and class attr(da_list, "model_R2") <- domir_res$Fit_Statistic_Overall attr(da_list$General, "table_title") <- "General Dominance Statistics" if (conditional) attr(da_list$Conditional, "table_title") <- "Conditional Dominance Statistics" if (complete) attr(da_list$Complete, "table_title") <- "Complete Dominance Designations" class(da_list) <- "parameters_da" da_list } # methods ------------------------------ #' @export print.parameters_da <- function(x, digits = 3, ...) { insight::print_color("# Dominance Analysis Results", "blue") cat("\n\n") cat("Model R2 Value: ", sprintf("%.*f", digits, attr(x, "model_R2")), "\n\n") printed_x <- x printed_x$General <- datawizard::data_rename(x$General, select = "General_Dominance", replacement = "General Dominance" ) if (!is.null(x$Conditional)) { cdl_col <- ncol(x$Conditional) cdl_names <- paste0("IVs_", 1:(cdl_col - 1)) cdl_names_rep <- paste("IVs:", 1:(cdl_col - 1)) printed_x$Conditional <- datawizard::data_rename(x$Conditional, select = cdl_names, replacement = cdl_names_rep ) } if (!is.null(x$Complete)) { cpt_names <- names(x$Complete)[-1] cpt_names_rep <- gsub( "dmn_", "< ", cpt_names, fixed = TRUE ) printed_x$Complete <- datawizard::data_rename(x$Complete, select = cpt_names, replacement = cpt_names_rep ) } cat(insight::export_table(printed_x, digits = digits, ...)) invisible(x) } parameters/R/methods_metaplus.R0000644000176200001440000002713414736731407016341 0ustar liggesusers# metaplus ###### .metaplus ------------------- #' @export model_parameters.metaplus <- function(model, ci = 0.95, bootstrap = FALSE, iterations = 1000, standardize = NULL, exponentiate = FALSE, include_studies = TRUE, keep = NULL, drop = NULL, verbose = TRUE, ...) { if (!missing(ci)) { if (isTRUE(verbose)) { insight::format_alert( "'metaplus' models do not support other levels for confidence intervals than 0.95. Argument 'ci' is ignored." ) } ci <- 0.95 } meta_analysis_overall <- suppressWarnings(.model_parameters_generic( model = model, ci = ci, bootstrap = bootstrap, iterations = iterations, merge_by = "Parameter", standardize = standardize, exponentiate = exponentiate, keep_parameters = keep, drop_parameters = drop, ... )) rma_parameters <- if (!is.null(model$slab) && !is.numeric(model$slab)) { sprintf("%s", model$slab) } else if (is.null(model$k) && !is.null(model$slab) && is.numeric(model$slab)) { sprintf("Study %i", model$slab) } else if (!is.null(model$k)) { sprintf("Study %i", 1:model[["k"]]) } else { sprintf("Study %i", seq_along(model$yi)) } alpha <- (1 + ci) / 2 rma_coeffients <- as.vector(model$yi) rma_se <- as.vector(model$sei) rma_ci_low <- rma_coeffients - rma_se * stats::qt(alpha, df = Inf) rma_ci_high <- rma_coeffients + rma_se * stats::qt(alpha, df = Inf) rma_statistic <- rma_coeffients / rma_se rma_ci_p <- 2 * stats::pt(abs(rma_statistic), df = Inf, lower.tail = FALSE) meta_analysis_studies <- data.frame( Parameter = rma_parameters, Coefficient = rma_coeffients, SE = rma_se, CI_low = rma_ci_low, CI_high = rma_ci_high, z = rma_statistic, df_error = NA, p = rma_ci_p, Weight = 1 / as.vector(model$sei), stringsAsFactors = FALSE ) original_attributes <- attributes(meta_analysis_overall) out <- merge(meta_analysis_studies, meta_analysis_overall, all = TRUE, sort = FALSE) # fix intercept name out$Parameter[out$Parameter == "(Intercept)"] <- "Overall" out <- out[!(out$Parameter %in% c("tau2", "vinv")), ] # filter studies? if (isFALSE(include_studies)) { out <- out[out$Parameter == "Overall", ] } original_attributes$names <- names(out) original_attributes$row.names <- seq_len(nrow(out)) original_attributes$pretty_names <- stats::setNames(out$Parameter, out$Parameter) attributes(out) <- original_attributes # no df out$df_error <- NULL attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(model)) attr(out, "measure") <- "Estimate" if (!"Method" %in% names(out)) { out$Method <- "Robust meta-analysis using 'metaplus'" } attr(out, "title") <- unique(out$Method) out } #' @export standard_error.metaplus <- function(model, ...) { ci_low <- as.vector(model$results[, "95% ci.lb"]) ci_high <- as.vector(model$results[, "95% ci.ub"]) cis <- apply(cbind(ci_low, ci_high), MARGIN = 1, diff) out <- .data_frame( Parameter = .remove_backticks_from_string(rownames(model$results)), SE = cis / (2 * stats::qnorm(0.975)) ) out$Parameter[grepl("muhat", out$Parameter, fixed = TRUE)] <- "(Intercept)" out } #' @export p_value.metaplus <- function(model, ...) { out <- .data_frame( Parameter = .remove_backticks_from_string(rownames(model$results)), p = as.vector(model$results[, "pvalue"]) ) out$Parameter[grepl("muhat", out$Parameter, fixed = TRUE)] <- "(Intercept)" out } #' @export ci.metaplus <- function(x, ...) { out <- .data_frame( Parameter = .remove_backticks_from_string(rownames(x$results)), CI_low = as.vector(x$results[, "95% ci.lb"]), CI_high = as.vector(x$results[, "95% ci.ub"]) ) out$Parameter[grepl("muhat", out$Parameter, fixed = TRUE)] <- "(Intercept)" out } ###### .meta_random ------------------- #' @export model_parameters.meta_random <- function(model, ci = 0.95, ci_method = "eti", exponentiate = FALSE, include_studies = TRUE, verbose = TRUE, ...) { # process arguments params <- as.data.frame(model$estimates) ci_method <- match.arg(ci_method, choices = c("hdi", "eti", "quantile")) # parameters of studies included study_params <- model$data fac <- stats::qnorm((1 + ci) / 2, lower.tail = TRUE) out_study <- data.frame( Parameter = study_params$labels, Coefficient = study_params$y, SE = study_params$SE, CI_low = study_params$y - fac * study_params$SE, CI_high = study_params$y + fac * study_params$SE, Weight = 1 / study_params$SE^2, BF = NA, Rhat = NA, ESS = NA, Component = "studies", Prior_Distribution = NA, Prior_Location = NA, Prior_Scale = NA, stringsAsFactors = FALSE ) # extract ci-level and find ci-columns ci <- .meta_bma_extract_ci(params) ci_cols <- .metabma_ci_columns(ci_method, ci) # parameters of overall / tau out <- data.frame( Parameter = rownames(params), Coefficient = params$mean, SE = params$sd, CI_low = params[[ci_cols[1]]], CI_high = params[[ci_cols[2]]], Weight = NA, BF = NA, Rhat = params$Rhat, ESS = params$n_eff, Component = "meta", stringsAsFactors = FALSE ) # add prior information priors <- insight::get_priors(model) out$Prior_Distribution <- priors$Distribution out$Prior_Location <- priors$Location out$Prior_Scale <- priors$Scale # fix intercept name out$Parameter[out$Parameter == "d"] <- "Overall" # add BF out$BF[1] <- model$BF[2, 1] # merge out <- rbind(out_study, out) # filter studies? if (isFALSE(include_studies)) { out <- out[out$Parameter %in% c("Overall", "tau"), ] } # exponentiate coefficients and SE/CI, if requested out <- .exponentiate_parameters(out, model, exponentiate) out <- .add_model_parameters_attributes( params = out, model = model, ci = ci, exponentiate = exponentiate, ci_method = ci_method, verbose = verbose, ... ) # final atributes attr(out, "measure") <- "Estimate" attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(model)) class(out) <- c("parameters_model", "see_parameters_model", class(params)) if (!"Method" %in% names(out)) { out$Method <- "Bayesian meta-analysis using 'metaBMA'" } attr(out, "title") <- unique(out$Method) out } #' @export standard_error.meta_random <- function(model, ...) { params <- as.data.frame(model$estimates) out <- data.frame( Parameter = .remove_backticks_from_string(rownames(params)), SE = params$sd, stringsAsFactors = FALSE ) out$Parameter[grepl("d", out$Parameter, fixed = TRUE)] <- "(Intercept)" out } #' @export ci.meta_random <- function(x, method = "eti", ...) { # process arguments params <- as.data.frame(x$estimates) ci_method <- match.arg(method, choices = c("hdi", "eti", "quantile")) # extract ci-level and find ci-columns ci <- .meta_bma_extract_ci(params) ci_cols <- .metabma_ci_columns(ci_method, ci) out <- data.frame( Parameter = rownames(params), ci = 0.95, CI_low = params[[ci_cols[1]]], CI_high = params[[ci_cols[2]]], stringsAsFactors = FALSE ) out$Parameter[grepl("d", out$Parameter, fixed = TRUE)] <- "(Intercept)" out } ###### .meta_fixed ------------------- #' @export model_parameters.meta_fixed <- model_parameters.meta_random #' @export standard_error.meta_fixed <- standard_error.meta_random #' @export ci.meta_fixed <- ci.meta_random ###### .meta_bma ------------------- #' @export model_parameters.meta_bma <- function(model, ci = 0.95, ci_method = "eti", exponentiate = FALSE, include_studies = TRUE, verbose = TRUE, ...) { # process arguments params <- as.data.frame(model$estimates) ci_method <- insight::validate_argument(ci_method, c("hdi", "eti", "quantile")) # parameters of studies included study_params <- model$meta$fixed$data fac <- stats::qnorm((1 + ci) / 2, lower.tail = TRUE) out_study <- data.frame( Parameter = study_params$labels, Coefficient = study_params$y, SE = study_params$SE, CI_low = study_params$y - fac * study_params$SE, CI_high = study_params$y + fac * study_params$SE, Weight = 1 / study_params$SE^2, BF = NA, Rhat = NA, ESS = NA, Component = "studies", stringsAsFactors = FALSE ) # extract ci-level and find ci-columns ci <- .meta_bma_extract_ci(params) ci_cols <- .metabma_ci_columns(ci_method, ci) out <- data.frame( Parameter = rownames(params), Coefficient = params$mean, SE = params$sd, CI_low = params[[ci_cols[1]]], CI_high = params[[ci_cols[2]]], Weight = NA, BF = NA, Rhat = params$Rhat, ESS = params$n_eff, Component = "meta", stringsAsFactors = FALSE ) # add BF out$BF <- c(NA, model$BF[2, 1], model$BF[4, 1]) # merge out <- rbind(out_study, out) # filter studies? if (isFALSE(include_studies)) { out <- out[out$Parameter %in% c("averaged", "fixed", "random"), ] } # exponentiate coefficients and SE/CI, if requested out <- .exponentiate_parameters(out, model, exponentiate) out <- .add_model_parameters_attributes( params = out, model = model, ci = ci, exponentiate = exponentiate, ci_method = ci_method, verbose = verbose, ... ) # final attributes attr(out, "measure") <- "Estimate" attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(model)) class(out) <- c("parameters_model", "see_parameters_model", class(params)) if (!"Method" %in% names(out)) { out$Method <- "Bayesian meta-analysis using 'metaBMA'" } attr(out, "title") <- unique(out$Method) out } #' @export standard_error.meta_bma <- standard_error.meta_random #' @export ci.meta_bma <- ci.meta_random # helper ------ .meta_bma_extract_ci <- function(params) { hpd_col <- colnames(params)[grepl("hpd(\\d+)_lower", colnames(params))] as.numeric(gsub("hpd(\\d+)_lower", "\\1", hpd_col)) / 100 } .metabma_ci_columns <- function(ci_method, ci) { switch(toupper(ci_method), HDI = sprintf(c("hpd%i_lower", "hpd%i_upper"), 100 * ci), c(sprintf("%g%%", (100 * (1 - ci)) / 2), sprintf("%g%%", 100 - (100 * (1 - ci)) / 2)) ) } # format_parameters ----------------------------------- #' @export format_parameters.meta_random <- function(model, ...) { params <- insight::find_parameters(model, flatten = TRUE) names(params) <- params params } #' @export format_parameters.meta_fixed <- format_parameters.meta_random #' @export format_parameters.meta_bma <- format_parameters.meta_random parameters/R/methods_emmeans.R0000644000176200001440000003163714736731407016137 0ustar liggesusers# emmeans # model_parameters ---------------- #' @export model_parameters.emmGrid <- function(model, ci = 0.95, centrality = "median", dispersion = FALSE, ci_method = "eti", test = "pd", rope_range = "default", rope_ci = 0.95, exponentiate = FALSE, p_adjust = NULL, keep = NULL, drop = NULL, verbose = TRUE, ...) { # set default for p-adjust emm_padjust <- .safe(model@misc$adjust) if (!is.null(emm_padjust) && is.null(p_adjust)) { p_adjust <- emm_padjust } s <- summary(model, level = ci, adjust = "none") params <- as.data.frame(s) if (.is_bayesian_emmeans(model)) { # Bayesian models go here... params <- bayestestR::describe_posterior( model, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, bf_prior = NULL, diagnostic = NULL, priors = NULL, verbose = verbose, ... ) statistic <- NULL } else { # we assume frequentist here... statistic <- insight::get_statistic(model, ci = ci, adjust = "none") SE <- standard_error(model) p <- p_value(model, ci = ci, adjust = "none") params$Statistic <- statistic$Statistic params$SE <- SE$SE params$p <- p$p # ==== adjust p-values? if (!is.null(p_adjust)) { params <- .p_adjust(params, p_adjust, model, verbose) } } # Renaming estName <- attr(s, "estName") if (!is.null(statistic)) { names(params) <- gsub( "Statistic", gsub("-statistic", "", attr(statistic, "statistic", exact = TRUE), fixed = TRUE), names(params), fixed = TRUE ) } names(params) <- gsub("Std. Error", "SE", names(params), fixed = TRUE) names(params) <- gsub(estName, "Estimate", names(params), fixed = TRUE) names(params) <- gsub("lower.CL", "CI_low", names(params), fixed = TRUE) names(params) <- gsub("upper.CL", "CI_high", names(params), fixed = TRUE) names(params) <- gsub("asymp.LCL", "CI_low", names(params), fixed = TRUE) names(params) <- gsub("asymp.UCL", "CI_high", names(params), fixed = TRUE) names(params) <- gsub("lower.HPD", "CI_low", names(params), fixed = TRUE) names(params) <- gsub("upper.HPD", "CI_high", names(params), fixed = TRUE) # check if we have CIs if (!any(startsWith(colnames(params), "CI_"))) { df_column <- grep("(df|df_error)", colnames(params)) if (length(df_column) > 0) { dof <- params[[df_column[1]]] } else { dof <- Inf } fac <- stats::qt((1 + ci) / 2, df = dof) params$CI_low <- params$Estimate - fac * params$SE params$CI_high <- params$Estimate + fac * params$SE } # rename if necessary if ("df" %in% colnames(params)) { colnames(params)[colnames(params) == "df"] <- "df_error" } # Reorder estimate_pos <- which(colnames(s) == estName) parameter_names <- colnames(params)[seq_len(estimate_pos - 1)] col_order <- c( parameter_names, "Estimate", "Median", "Mean", "SE", "SD", "MAD", "CI_low", "CI_high", "F", "t", "z", "df", "df_error", "p", "pd", "ROPE_CI", "ROPE_low", "ROPE_high", "ROPE_Percentage" ) params <- params[col_order[col_order %in% names(params)]] # rename names(params) <- gsub("Estimate", "Coefficient", names(params), fixed = TRUE) # exponentiate coefficients and SE/CI, if requested params <- .exponentiate_parameters(params, model, exponentiate) # filter parameters if (!is.null(keep) || !is.null(drop)) { params <- .filter_parameters(params, keep = keep, drop = drop, verbose = verbose ) } params <- suppressWarnings(.add_model_parameters_attributes( params, model, ci, exponentiate = FALSE, p_adjust = p_adjust, verbose = verbose, ... )) attr(params, "object_name") <- insight::safe_deparse_symbol(substitute(model)) attr(params, "parameter_names") <- parameter_names class(params) <- c("parameters_model", "see_parameters_model", class(params)) params } #' @export model_parameters.emm_list <- function(model, ci = 0.95, exponentiate = FALSE, p_adjust = NULL, verbose = TRUE, ...) { s <- summary(model) params <- lapply(seq_along(s), function(i) { pars <- model_parameters( model[[i]], ci = ci, exponentiate = exponentiate, p_adjust = p_adjust, verbose = verbose ) estimate_pos <- which(colnames(pars) %in% c("Coefficient", "Median", "Mean"))[1] pars[seq_len(estimate_pos - 1)] <- NULL cbind( Parameter = .pretty_emmeans_Parameter_names(model[[i]]), pars ) }) params <- do.call(rbind, params) params$Component <- .pretty_emmeans_Component_names(s) # exponentiate coefficients and SE/CI, if requested params <- .exponentiate_parameters(params, model, exponentiate) params <- .add_model_parameters_attributes( params, model, ci, exponentiate, p_adjust = p_adjust, verbose = verbose, ... ) attr(params, "object_name") <- insight::safe_deparse_symbol(substitute(model)) class(params) <- c("parameters_model", "see_parameters_model", class(params)) params } #' @export model_parameters.summary_emm <- function(model, keep = NULL, drop = NULL, verbose = TRUE, ...) { params <- model # Renaming estName <- attr(model, "estName") names(params) <- gsub("Std. Error", "SE", names(params), fixed = TRUE) names(params) <- gsub(estName, "Estimate", names(params), fixed = TRUE) names(params) <- gsub("response", "Response", names(params), fixed = TRUE) names(params) <- gsub("lower.CL", "CI_low", names(params), fixed = TRUE) names(params) <- gsub("upper.CL", "CI_high", names(params), fixed = TRUE) names(params) <- gsub("asymp.LCL", "CI_low", names(params), fixed = TRUE) names(params) <- gsub("asymp.UCL", "CI_high", names(params), fixed = TRUE) names(params) <- gsub("lower.HPD", "CI_low", names(params), fixed = TRUE) names(params) <- gsub("upper.HPD", "CI_high", names(params), fixed = TRUE) # rename if necessary if ("df" %in% colnames(params)) { colnames(params)[colnames(params) == "df"] <- "df_error" } # Reorder estimate_pos <- which(colnames(model) == estName) parameter_names <- colnames(params)[seq_len(estimate_pos - 1)] col_order <- c( parameter_names, "Estimate", "Median", "Mean", "SE", "SD", "MAD", "CI_low", "CI_high", "F", "t", "z", "df", "df_error", "p", "pd", "ROPE_CI", "ROPE_low", "ROPE_high", "ROPE_Percentage" ) params <- params[col_order[col_order %in% names(params)]] # rename names(params) <- gsub("Estimate", "Coefficient", names(params), fixed = TRUE) # filter parameters if (!is.null(keep) || !is.null(drop)) { params <- .filter_parameters(params, keep = keep, drop = drop, verbose = verbose ) } params <- suppressWarnings(.add_model_parameters_attributes( params, model, ci = 0.95, exponentiate = FALSE, p_adjust = NULL, verbose = verbose, ... )) attr(params, "object_name") <- insight::safe_deparse_symbol(substitute(model)) attr(params, "parameter_names") <- parameter_names class(params) <- c("parameters_model", "see_parameters_model", class(params)) params } # standard errors ----------------- #' @export standard_error.emmGrid <- function(model, ...) { if (!is.null(model@misc$is_boot) && model@misc$is_boot) { return(boot_em_standard_error(model)) } s <- summary(model) estimate_pos <- which(colnames(s) == attr(s, "estName")) if (length(estimate_pos) && !is.null(s$SE)) { out <- .data_frame( Parameter = .pretty_emmeans_Parameter_names(model), SE = unname(s$SE) ) } else { out <- NULL } out } #' @export standard_error.emm_list <- function(model, ...) { if (!is.null(model[[1]]@misc$is_boot) && model[[1]]@misc$is_boot) { return(boot_em_standard_error(model)) } params <- insight::get_parameters(model) s <- summary(model) se <- unlist(lapply(s, function(i) { if (is.null(i$SE)) { rep(NA, nrow(i)) } else { i$SE } })) .data_frame( Parameter = .pretty_emmeans_Parameter_names(model), SE = unname(se), Component = .pretty_emmeans_Component_names(s) ) } boot_em_standard_error <- function(model) { est <- insight::get_parameters(model, summary = FALSE) Component <- NULL s <- summary(model) if (inherits(s, "list")) { Component <- .pretty_emmeans_Component_names(s) } out <- .data_frame( Parameter = .pretty_emmeans_Parameter_names(model), SE = vapply(est, stats::sd, numeric(1)) ) if (!is.null(Component)) out$Component <- Component out } # p values ---------------------- #' @rdname p_value #' @export p_value.emmGrid <- function(model, ci = 0.95, adjust = "none", ...) { if (!is.null(model@misc$is_boot) && model@misc$is_boot) { return(boot_em_pval(model, adjust)) } s <- summary(model, level = ci, adjust = adjust) estimate_pos <- which(colnames(s) == attr(s, "estName")) if (!length(estimate_pos)) { return(NULL) } stat <- insight::get_statistic(model, ci = ci, adjust = adjust) p <- 2 * stats::pt(abs(stat$Statistic), df = s$df, lower.tail = FALSE) .data_frame( Parameter = .pretty_emmeans_Parameter_names(model), p = as.vector(p) ) } #' @export p_value.emm_list <- function(model, adjust = "none", ...) { if (!is.null(model[[1]]@misc$is_boot) && model[[1]]@misc$is_boot) { return(boot_em_pval(model, adjust)) } params <- insight::get_parameters(model) s <- summary(model, adjust = adjust) # p-values p <- unlist(lapply(s, function(i) { if (is.null(i$p)) { rep(NA, nrow(i)) } else { i$p } })) # result out <- .data_frame( Parameter = .pretty_emmeans_Parameter_names(model), p = as.vector(p), Component = .pretty_emmeans_Component_names(s) ) # any missing values? if (anyNA(out$p)) { # standard errors se <- unlist(lapply(s, function(i) { if (is.null(i$SE)) { rep(NA, nrow(i)) } else { i$SE } })) # test statistic and p-values stat <- params$Estimate / se dof <- insight::get_df(model) p_val <- 2 * stats::pt(abs(stat), df = dof, lower.tail = FALSE) out$p[is.na(out$p)] <- p_val[is.na(out$p)] } out } boot_em_pval <- function(model, adjust) { est <- insight::get_parameters(model, summary = FALSE) p <- sapply(est, p_value) p <- stats::p.adjust(p, method = adjust) Component <- NULL s <- summary(model) if (inherits(s, "list")) { Component <- .pretty_emmeans_Component_names(s) } out <- .data_frame( Parameter = .pretty_emmeans_Parameter_names(model), p = unname(p) ) if (!is.null(Component)) out$Component <- Component out } # format parameters ----------------- #' @export format_parameters.emm_list <- function(model, ...) { NULL } # Utils ------------------------------------------------------------------- .pretty_emmeans_Parameter_names <- function(model) { s <- summary(model) if (inherits(s, "list")) { parnames <- lapply(seq_along(s), function(i) .pretty_emmeans_Parameter_names(model[[i]])) parnames <- unlist(parnames) } else { estimate_pos <- which(colnames(s) == attr(s, "estName")) params <- s[, 1:(estimate_pos - 1), drop = FALSE] if (ncol(params) >= 2) { r <- apply(params, 1, function(i) paste0(colnames(params), " [", i, "]")) parnames <- unname(sapply(as.data.frame(r), toString)) } else { parnames <- as.vector(params[[1]]) } } parnames } .pretty_emmeans_Component_names <- function(s) { Component <- lapply(seq_along(s), function(i) { rep(names(s)[[i]], nrow(s[[i]])) }) Component <- unlist(Component) } .is_bayesian_emmeans <- function(model) { is_frq <- isTRUE(all.equal(dim(model@post.beta), c(1, 1))) && isTRUE(is.na(model@post.beta)) && is.null(model@misc$is_boot) isFALSE(is_frq) } parameters/R/print_html.R0000644000176200001440000003454415053035103015126 0ustar liggesusers# normal print ---------------------------- #' @rdname print.parameters_model #' @export print_html.parameters_model <- function(x, pretty_names = TRUE, split_components = TRUE, select = NULL, caption = NULL, subtitle = NULL, footer = NULL, align = NULL, digits = 2, ci_digits = digits, p_digits = 3, footer_digits = 3, ci_brackets = c("(", ")"), show_sigma = FALSE, show_formula = FALSE, zap_small = FALSE, groups = NULL, font_size = "100%", line_padding = 4, column_labels = NULL, include_reference = FALSE, verbose = TRUE, ...) { # which engine? engine <- .check_format_backend(...) # line separator - for tinytable, we have no specific line separator, # because the output format is context-dependent line_sep <- ifelse(identical(engine, "tt"), " ", "
") # check if user supplied digits attributes if (missing(digits)) { digits <- .additional_arguments(x, "digits", digits) } if (missing(ci_digits)) { ci_digits <- .additional_arguments(x, "ci_digits", digits) } if (missing(p_digits)) { p_digits <- .additional_arguments(x, "p_digits", p_digits) } if (missing(footer_digits)) { footer_digits <- .additional_arguments(x, "footer_digits", footer_digits) } # get attributes if (missing(select) || is.null(select)) { select <- attributes(x)$output_style } if (missing(groups)) { groups <- attributes(x)$parameter_groups } # we need glue-like syntax right now... if (!is.null(select)) { select <- .convert_to_glue_syntax(style = select, line_sep) } # check options --------------- # check if pretty names should be replaced by value labels # (if we have labelled data) if (isTRUE(getOption("parameters_labels", FALSE)) || identical(pretty_names, "labels")) { attr(x, "pretty_names") <- attr(x, "pretty_labels", exact = TRUE) pretty_names <- TRUE } # select which columns to print if (is.null(select)) { select <- getOption("parameters_select") } # table caption table_caption <- .print_caption(x, caption, format = "html") # main table formatted_table <- format( x = x, pretty_names = pretty_names, split_components = split_components, select = select, digits = digits, ci_digits = ci_digits, p_digits = p_digits, zap_small = zap_small, ci_width = NULL, ci_brackets = ci_brackets, format = "html", groups = groups, include_reference = include_reference, ... ) # replace brackets by parenthesis if (!is.null(ci_brackets) && "Parameter" %in% colnames(formatted_table)) { formatted_table$Parameter <- gsub("[", ci_brackets[1], formatted_table$Parameter, fixed = TRUE) formatted_table$Parameter <- gsub("]", ci_brackets[2], formatted_table$Parameter, fixed = TRUE) } # footer footer_stats <- .print_footer( x, digits = footer_digits, show_sigma = show_sigma, show_formula = show_formula, format = "html" ) if (!is.null(footer)) { footer <- paste0(footer, line_sep, paste(footer_stats, collapse = line_sep)) } else if (!is.null(footer_stats)) { footer <- paste(footer_stats, collapse = line_sep) } out <- insight::export_table( formatted_table, format = engine, caption = table_caption, subtitle = subtitle, footer = footer, align = align, ... ) if (identical(engine, "tt")) { out } else { .add_gt_options( out, style = select, font_size = font_size, line_padding = line_padding, user_labels = column_labels ) } } #' @export print_html.parameters_brms_meta <- print_html.parameters_model #' @export print_html.parameters_simulate <- print_html.parameters_model #' @export print_html.parameters_sem <- print_html.parameters_model #' @rdname print.compare_parameters #' @export print_html.compare_parameters <- function(x, caption = NULL, subtitle = NULL, footer = NULL, digits = 2, ci_digits = digits, p_digits = 3, zap_small = FALSE, groups = NULL, select = NULL, ci_brackets = c("(", ")"), font_size = "100%", line_padding = 4, column_labels = NULL, ...) { # check if user supplied digits attributes if (missing(digits)) { digits <- .additional_arguments(x, "digits", digits) } if (missing(ci_digits)) { ci_digits <- .additional_arguments(x, "ci_digits", digits) } if (missing(p_digits)) { p_digits <- .additional_arguments(x, "p_digits", p_digits) } # get attributes if (missing(select) || is.null(select)) { select <- attributes(x)$output_style } # which engine? engine <- .check_format_backend(...) # line separator - for tinytable, we have no specific line separator, # because the output format is context-dependent line_sep <- ifelse(identical(engine, "tt"), " ", "
") # we need glue-like syntax right now... select <- .convert_to_glue_syntax(style = select, line_sep) formatted_table <- format( x, select = select, split_components = TRUE, digits = digits, ci_digits = ci_digits, p_digits = p_digits, ci_width = NULL, ci_brackets = ci_brackets, format = "html", zap_small = zap_small, groups = groups ) # replace brackets by parenthesis if (!is.null(ci_brackets) && "Parameter" %in% colnames(formatted_table)) { formatted_table$Parameter <- gsub("[", ci_brackets[1], formatted_table$Parameter, fixed = TRUE) formatted_table$Parameter <- gsub("]", ci_brackets[2], formatted_table$Parameter, fixed = TRUE) } # setup grouping for tt-backend -------------------------------------------- # -------------------------------------------------------------------------- model_groups <- NULL by <- NULL # find columns that contain model names, which we want to group models <- setdiff( unique(gsub("(.*) \\((.*)\\)$", "\\2", colnames(formatted_table))[-1]), c("Component", "Effects", "Response", "Group") ) # grouping only applies when we have custom column layout (with "select") # else, we don't need grouping if (any(grepl(paste0("(", models[1], ")"), colnames(formatted_table), fixed = TRUE))) { model_groups <- lapply(models, function(model) { which(endsWith(colnames(formatted_table), paste0("(", model, ")"))) }) names(model_groups) <- models if (identical(engine, "tt")) { # for the tt backend, we need to add the model name to the column names colnames(formatted_table)[-1] <- gsub( "(.*) \\((.*)\\)$", "\\1", colnames(formatted_table)[-1] ) } } if ("Component" %in% colnames(formatted_table)) { by <- c(by, "Component") } if ("Effects" %in% colnames(formatted_table)) { by <- c(by, "Effects") } # export table ------------------------------------------------------------ out <- insight::export_table( formatted_table, format = engine, caption = caption, # TODO: get rid of NOTE subtitle = subtitle, footer = footer, column_groups = model_groups, by = by, ... ) # setup gt-backend --------------------------------------------------------- # -------------------------------------------------------------------------- if (identical(engine, "tt")) { out } else { .add_gt_options( out, style = select, font_size = font_size, line_padding = line_padding, column_names = colnames(formatted_table), user_labels = column_labels ) } } # PCA / EFA / CFA ---------------------------- #' @rdname principal_components #' @export print_html.parameters_efa <- function(x, digits = 2, sort = FALSE, threshold = NULL, labels = NULL, ...) { # extract attributes if (is.null(threshold)) { threshold <- attributes(x)$threshold } .print_parameters_cfa_efa( x, threshold = threshold, sort = sort, format = "html", digits = digits, labels = labels, ... ) } #' @export print_html.parameters_pca <- print_html.parameters_efa #' @export print_html.parameters_efa_summary <- function(x, digits = 3, ...) { # html engine? engine <- .check_format_backend(...) table_caption <- "(Explained) Variance of Components" if ("Parameter" %in% names(x)) { x$Parameter <- c("Eigenvalues", "Variance Explained", "Variance Explained (Cumulative)", "Variance Explained (Proportion)") # nolint } else if ("Component" %in% names(x)) { names(x) <- c("Component", "Eigenvalues", "Variance Explained", "Variance Explained (Cumulative)", "Variance Explained (Proportion)") # nolint } # we may have factor correlations fc <- attributes(x)$factor_correlations # if we have factor correlations, we need to add them to the table if (!is.null(fc)) { fc$Component <- "Factor Correlations" x$Component <- "Explained Variance" colnames(fc)[1] <- colnames(x)[1] x <- .safe(rbind(x, fc), x) } insight::export_table( x, digits = digits, format = engine, caption = table_caption, align = "firstleft" ) } #' @export print_html.parameters_pca_summary <- print_html.parameters_efa_summary # Equivalence test ---------------------------- #' @export print_html.equivalence_test_lm <- function( x, digits = 2, ci_brackets = c("(", ")"), zap_small = FALSE, ... ) { .print_equivalence_test_lm( x, digits = digits, ci_brackets = ci_brackets, zap_small = zap_small, format = .check_format_backend(...), ... ) } # p_function ---------------------------- #' @rdname p_function #' @export print_html.parameters_p_function <- function(x, digits = 2, ci_width = "auto", ci_brackets = c("(", ")"), pretty_names = TRUE, ...) { .print_p_function( x, digits, ci_width, ci_brackets, pretty_names, format = "html", ... ) } # helper ------------------ .add_gt_options <- function(out, style, font_size = "100%", line_padding = 4, column_names = NULL, user_labels = NULL) { insight::check_if_installed("gt") out <- gt::tab_options(out, table.font.size = font_size, data_row.padding = gt::px(line_padding) ) # insert newlines if (!is.null(style) && grepl("
", style, fixed = TRUE)) { insight::check_if_installed("tidyselect") out <- gt::fmt_markdown(out, columns = tidyselect::everything()) } # user defined column labels new_labels <- NULL if (!is.null(user_labels)) { new_labels <- c( colnames(out[["_data"]])[1], rep_len(user_labels, ncol(out[["_data"]]) - 1) ) new_labels <- as.list(new_labels) } # relabel columns. The single columns still have their old labels # (like "Estimate (model1)", "p (model1)"), and we extracted the "model names" # and used them for the column spanner. Now we no longer need this suffix, # and remove it. In case user-defined column labels are provided, "new_labels" # is not NULL, so we use user labels, else we extract labels from columns. if (!is.null(column_names)) { if (is.null(new_labels)) { new_labels <- as.list(gsub("(.*) \\((.*)\\)$", "\\1", column_names)) } names(new_labels) <- column_names out <- gt::cols_label(out, .list = new_labels) # default column label, if we have user labels } else if (!is.null(new_labels)) { names(new_labels) <- colnames(out[["_data"]]) out <- gt::cols_label(out, .list = new_labels) } # find name of parameter column pcol_name <- colnames(out[["_data"]])[1] # check where last parameter row ends. For "compare_models()", the # first Parameter value after data rows is "". If this is not found, # simply use number of rows as last row last_row <- which(!nzchar(as.character(out[["_data"]][[pcol_name]]), keepNA = TRUE))[1] if (is.na(last_row)) { last_row <- nrow(out[["_data"]]) } else { last_row <- last_row - 1 } # add a border to the first column. out <- gt::tab_style( out, style = gt::cell_borders( sides = "right", style = "solid", color = "#d3d3d3" ), locations = gt::cells_body( columns = pcol_name, rows = 1:last_row ) ) out } # we allow exporting HTML format based on "gt" or "tinytable" .check_format_backend <- function(...) { dots <- list(...) if (identical(dots$backend, "tt")) { "tt" } else { "html" } } parameters/R/methods_DirichletReg.R0000644000176200001440000000645214717111737017051 0ustar liggesusers#' @export model_parameters.DirichletRegModel <- function(model, ci = 0.95, bootstrap = FALSE, iterations = 1000, component = "all", standardize = NULL, exponentiate = FALSE, p_adjust = NULL, keep = NULL, drop = NULL, verbose = TRUE, ...) { component <- insight::validate_argument( component, c("all", "conditional", "precision") ) if (component == "all") { merge_by <- c("Parameter", "Component", "Response") } else { merge_by <- c("Parameter", "Response") } ## TODO check merge by junk <- utils::capture.output({ out <- .model_parameters_generic( model = model, ci = ci, component = component, bootstrap = bootstrap, iterations = iterations, merge_by = merge_by, standardize = standardize, exponentiate = exponentiate, p_adjust = p_adjust, keep_parameters = keep, drop_parameters = drop, ... ) }) out$Response[is.na(out$Response)] <- "" attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(model)) out } #' @export ci.DirichletRegModel <- function(x, ci = 0.95, component = "all", ...) { component <- insight::validate_argument( component, c("all", "conditional", "precision") ) params <- insight::get_parameters(x, component = component) out <- .ci_generic(model = x, ci = ci, dof = Inf, ...) if (is.null(out$Component)) { component <- "all" } if ("Response" %in% colnames(params)) { out$Response <- params$Response } if (component != "all") { out <- out[out$Component == component, ] } out } #' @export standard_error.DirichletRegModel <- function(model, component = "all", ...) { component <- insight::validate_argument( component, c("all", "conditional", "precision") ) params <- insight::get_parameters(model) out <- .data_frame( Parameter = params$Parameter, Response = params$Response, SE = as.vector(model$se) ) if (is.null(params$Component)) { component <- "all" } else { out$Component <- params$Component } if (component != "all") { out <- out[out$Component == component, ] } out } #' @export p_value.DirichletRegModel <- function(model, component = "all", ...) { component <- insight::validate_argument( component, c("all", "conditional", "precision") ) params <- insight::get_parameters(model) out <- .data_frame( Parameter = params$Parameter, Response = params$Response, p = as.vector(2 * stats::pnorm(-abs(params$Estimate / model$se))) ) if (is.null(params$Component)) { component <- "all" } else { out$Component <- params$Component } if (component != "all") { out <- out[out$Component == component, ] } out } parameters/R/methods_brglm2.R0000644000176200001440000001701314761570351015664 0ustar liggesusers# classes: .bracl, .multinom, .brmultinom ## TODO add ci_method later? ############# .bracl -------------- #' @export model_parameters.bracl <- function(model, ci = 0.95, bootstrap = FALSE, iterations = 1000, standardize = NULL, exponentiate = FALSE, p_adjust = NULL, include_info = getOption("parameters_info", FALSE), keep = NULL, drop = NULL, verbose = TRUE, ...) { # validation check, warn if unsupported argument is used. dot_args <- .check_dots( dots = list(...), not_allowed = c("vcov", "vcov_args"), class(model)[1], verbose = verbose ) # detect number of levels of response resp <- insight::get_response(model) # for cbind(), response is a data frame, not a factor. We then need to use # number of columns as "nl" if (is.data.frame(resp)) { nl <- ncol(resp) } else { nl <- .safe(nlevels(factor(resp)), 0) } # merge by response as well if more than 2 levels if (nl > 2) { merge_by <- c("Parameter", "Response") } else { merge_by <- "Parameter" } fun_args <- list( model, ci = ci, bootstrap = bootstrap, iterations = iterations, merge_by = merge_by, standardize = standardize, exponentiate = exponentiate, p_adjust = p_adjust, keep_parameters = keep, drop_parameters = drop, include_info = include_info, vcov = NULL, vcov_args = NULL ) fun_args <- c(fun_args, dot_args) out <- do.call(".model_parameters_generic", fun_args) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(model)) out } #' @export ci.bracl <- function(x, ci = 0.95, method = NULL, verbose = TRUE, ...) { # validation check, warn if unsupported argument is used. dot_args <- .check_dots( dots = list(...), not_allowed = c("vcov", "vcov_args"), class(x)[1], function_name = "ci", verbose = verbose ) params <- insight::get_parameters(x) out <- .ci_generic(model = x, ci = ci, method = method, ...) if ("Response" %in% colnames(params)) { out$Response <- params$Response } out } #' @export standard_error.bracl <- function(model, verbose = TRUE, ...) { # validation check, warn if unsupported argument is used. dot_args <- .check_dots( dots = list(...), not_allowed = c("vcov", "vcov_args"), class(model)[1], function_name = "standard_error", verbose = verbose ) smry <- suppressMessages(as.data.frame(stats::coef(summary(model)))) se <- smry[[2]] names(se) <- rownames(smry) params <- insight::get_parameters(model) .data_frame( Parameter = params$Parameter, SE = as.vector(se), Response = params$Response ) } #' @export p_value.bracl <- function(model, verbose = TRUE, ...) { # validation check, warn if unsupported argument is used. dot_args <- .check_dots( dots = list(...), not_allowed = c("vcov", "vcov_args"), class(model)[1], function_name = "p_value", verbose = verbose ) smry <- suppressMessages(as.data.frame(stats::coef(summary(model)))) p <- smry[[4]] names(p) <- rownames(smry) params <- insight::get_parameters(model) .data_frame( Parameter = params$Parameter, p = as.vector(p), Response = params$Response ) } ############# .multinom -------------- #' @export model_parameters.multinom <- function(model, ci = 0.95, ci_method = "normal", bootstrap = FALSE, iterations = 1000, standardize = NULL, exponentiate = FALSE, p_adjust = NULL, include_info = getOption("parameters_info", FALSE), keep = NULL, drop = NULL, verbose = TRUE, ...) { model_parameters.bracl( model, ci = ci, ci_method = ci_method, bootstrap = bootstrap, iterations = iterations, standardize = standardize, exponentiate = exponentiate, p_adjust = p_adjust, include_info = include_info, keep = keep, drop = drop, verbose = verbose, ... ) } #' @export ci.multinom <- function(x, ci = 0.95, method = "normal", verbose = TRUE, ...) { ci.bracl(x, ci = ci, method = method, verbose = verbose, ...) } #' @export standard_error.multinom <- function(model, ...) { se <- tryCatch( { std_err <- summary(model)$standard.errors if (is.null(std_err)) { vc <- insight::get_varcov(model) std_err <- as.vector(sqrt(diag(vc))) } else { if (is.matrix(std_err)) { tmp <- NULL for (i in seq_len(nrow(std_err))) { tmp <- c(tmp, as.vector(std_err[i, ])) } } else { tmp <- as.vector(std_err) } std_err <- tmp } std_err }, error = function(e) { vc <- insight::get_varcov(model) as.vector(sqrt(diag(vc))) } ) params <- insight::get_parameters(model) if ("Response" %in% colnames(params)) { .data_frame( Parameter = params$Parameter, SE = se, Response = params$Response ) } else { .data_frame( Parameter = params$Parameter, SE = se ) } } #' @export p_value.multinom <- function(model, method = "normal", ...) { stat <- insight::get_statistic(model) out <- p_value.default(model, method = method, ...) if (!is.null(stat$Response)) { out$Response <- stat$Response } out } #' @export simulate_parameters.multinom <- function(model, iterations = 1000, centrality = "median", ci = 0.95, ci_method = "quantile", test = "p-value", ...) { sim_data <- simulate_model(model, iterations = iterations, ...) out <- .summary_bootstrap( data = sim_data, test = test, centrality = centrality, ci = ci, ci_method = ci_method, ... ) params <- insight::get_parameters(model) out$Parameter <- params$Parameter if ("Response" %in% colnames(params)) { out$Response <- params$Response } class(out) <- c("parameters_simulate", "see_parameters_simulate", class(out)) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(model)) attr(out, "iterations") <- iterations attr(out, "ci") <- ci attr(out, "ci_method") <- ci_method attr(out, "centrality") <- centrality out } ############# .brmultinom -------------- #' @export model_parameters.brmultinom <- model_parameters.bracl #' @export ci.brmultinom <- ci.bracl #' @export standard_error.brmultinom <- standard_error.multinom #' @export p_value.brmultinom <- p_value.multinom parameters/R/methods_mclogit.R0000644000176200001440000000352114761570351016134 0ustar liggesusers#' @export model_parameters.mblogit <- function(model, ci = 0.95, bootstrap = FALSE, iterations = 1000, standardize = NULL, exponentiate = FALSE, p_adjust = NULL, include_info = getOption("parameters_info", FALSE), keep = NULL, drop = NULL, verbose = TRUE, ...) { out <- .model_parameters_generic( model = model, ci = ci, bootstrap = bootstrap, iterations = iterations, merge_by = c("Parameter", "Response"), standardize = standardize, exponentiate = exponentiate, p_adjust = p_adjust, keep_parameters = keep, drop_parameters = drop, include_info = include_info, ... ) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(model)) out } #' @export standard_error.mblogit <- function(model, ...) { s <- stats::coef(summary(model)) out <- data.frame( Parameter = gsub("(.*)~(.*)", "\\2", rownames(s)), SE = unname(s[, "Std. Error"]), Response = gsub("(.*)~(.*)", "\\1", rownames(s)), stringsAsFactors = FALSE, row.names = NULL ) } #' @export p_value.mblogit <- function(model, ...) { s <- stats::coef(summary(model)) out <- data.frame( Parameter = gsub("(.*)~(.*)", "\\2", rownames(s)), p = unname(s[, "Pr(>|z|)"]), Response = gsub("(.*)~(.*)", "\\1", rownames(s)), stringsAsFactors = FALSE, row.names = NULL ) } #' @export simulate_parameters.mblogit <- simulate_parameters.multinom parameters/R/ci_betwithin.R0000644000176200001440000000061714317274256015430 0ustar liggesusers#' @rdname p_value_betwithin #' @export ci_betwithin <- function(model, ci = 0.95, ...) { df_bet <- dof_ml1(model) out <- lapply(ci, function(i) { .ci_dof( model = model, ci = i, effects = "fixed", component = "all", dof = df_bet, method = "betwithin", ... ) }) out <- do.call(rbind, out) row.names(out) <- NULL out } parameters/R/methods_FactoMineR.R0000644000176200001440000000466414717115074016474 0ustar liggesusers#' @export model_parameters.PCA <- function(model, sort = FALSE, threshold = NULL, labels = NULL, verbose = TRUE, ...) { loadings <- as.data.frame(model$var$coord) n <- model$call$ncp # Get summary eig <- as.data.frame(model$eig[1:n, ]) data_summary <- .data_frame( Component = names(loadings), Eigenvalues = eig$eigenvalue, Variance = eig$`percentage of variance` / 100, Variance_Cumulative = eig$`cumulative percentage of variance` / 100 ) data_summary$Variance_Proportion <- data_summary$Variance / sum(data_summary$Variance) # Format loadings <- cbind(data.frame(Variable = row.names(loadings)), loadings) row.names(loadings) <- NULL # Labels if (!is.null(labels)) { loadings$Label <- labels loadings <- loadings[c("Variable", "Label", names(loadings)[!names(loadings) %in% c("Variable", "Label")])] loading_cols <- 3:(n + 2) } else { loading_cols <- 2:(n + 1) } loadings$Complexity <- (apply(loadings[, loading_cols, drop = FALSE], 1, function(x) sum(x^2)))^2 / apply(loadings[, loading_cols, drop = FALSE], 1, function(x) sum(x^4)) # Add attributes attr(loadings, "summary") <- data_summary attr(loadings, "model") <- model attr(loadings, "rotation") <- "none" attr(loadings, "scores") <- as.data.frame(model$ind$coord) attr(loadings, "additional_arguments") <- list(...) attr(loadings, "n") <- n attr(loadings, "loadings_columns") <- loading_cols # Sorting if (isTRUE(sort)) { loadings <- .sort_loadings(loadings) } # Replace by NA all cells below threshold if (!is.null(threshold)) { loadings <- .filter_loadings(loadings, threshold = threshold) } # Add some more attributes attr(loadings, "loadings_long") <- .long_loadings(loadings, threshold = threshold, loadings_columns = loading_cols) # add class-attribute for printing if (inherits(model, "PCA")) { attr(loadings, "type") <- "pca" class(loadings) <- unique(c("parameters_pca", "see_parameters_pca", class(loadings))) } else if (inherits(model, "FAMD")) { attr(loadings, "type") <- "fa" class(loadings) <- unique(c("parameters_efa", "see_parameters_efa", class(loadings))) } loadings } #' @export model_parameters.FAMD <- model_parameters.PCA parameters/R/standardize_info.R0000644000176200001440000004346015111054715016273 0ustar liggesusers#' Get Standardization Information #' #' This function extracts information, such as the deviations (SD or MAD) from #' parent variables, that are necessary for post-hoc standardization of #' parameters. This function gives a window on how standardized are obtained, #' i.e., by what they are divided. The "basic" method of standardization uses. #' #' @inheritParams standardize_parameters #' @param include_pseudo (For (G)LMMs) Should Pseudo-standardized information be #' included? #' @param ... Arguments passed to or from other methods. #' #' @return A data frame with information on each parameter (see #' [`parameters_type()`]), and various standardization coefficients #' for the post-hoc methods (see [`standardize_parameters()`]) for the predictor #' and the response. #' #' @family standardize #' #' @examplesIf insight::check_if_installed("datawizard", quietly = TRUE) #' model <- lm(mpg ~ ., data = mtcars) #' standardize_info(model) #' standardize_info(model, robust = TRUE) #' standardize_info(model, two_sd = TRUE) #' @aliases standardise_info #' @export standardize_info <- function(model, ...) { UseMethod("standardize_info") } #' @export standardise_info <- standardize_info #' @rdname standardize_info #' @export standardize_info.default <- function(model, robust = FALSE, two_sd = FALSE, include_pseudo = FALSE, verbose = TRUE, ...) { # check for valid input .is_model_valid(model) mi <- .get_model_info(model, ...) params <- if (inherits(model, c("glmmTMB", "MixMod"))) { insight::find_parameters(model, effects = "fixed", component = "conditional", flatten = TRUE, ...) } else { insight::find_parameters(model, effects = "fixed", flatten = TRUE, ...) } types <- parameters_type(model) model_matrix <- as.data.frame(insight::get_modelmatrix(model)) model_data <- insight::get_data(model, source = "mf", verbose = FALSE) wgts <- insight::get_weights(model, remove_na = TRUE) # validation check for ZI if (mi$is_zero_inflated && verbose) { insight::format_alert( "Non-refit parameter standardization is ignoring the zero-inflation component." ) # would need to also get the binomial model matrix... } # validation check for glmmTMB with dispersion if (length(params) != nrow(types)) { types <- types[types$Parameter %in% params, ] } out <- data.frame( Parameter = params, Type = types$Type, Link = types$Link, Secondary_Parameter = types$Secondary_Parameter, stringsAsFactors = FALSE ) # Type of effect size out$EffectSize_Type <- ifelse(types$Type == "interaction", "interaction", ifelse(types$Link == "Association", "r", # nolint ifelse(types$Link == "Difference", "d", NA) # nolint ) ) # Response - Basic out <- merge( out, .std_info_response_basic(model, mi, params, robust = robust, w = wgts), by = "Parameter", all = TRUE ) # Response - Smart out <- merge( out, .std_info_response_smart(model, mi, data = model_data, model_matrix, types, robust = robust, w = wgts), by = "Parameter", all = TRUE ) # Basic out <- merge( out, .std_info_predictors_basic(model, model_matrix, types, robust = robust, two_sd = two_sd, w = wgts), by = "Parameter", all = TRUE ) # Smart out <- merge( out, .std_info_predictors_smart(model, data = model_data, params, types, robust = robust, two_sd = two_sd, w = wgts ), by = "Parameter", all = TRUE ) # sdy (see Mood 2009, 10.1093/esr/jcp006) out <- merge( out, .std_info_predictors_sdy(model, model_matrix, types, robust = robust, two_sd = two_sd, w = wgts), by = "Parameter", all = TRUE ) # Pseudo (for LMM) if (include_pseudo && mi$is_mixed && length(insight::find_random(model)$random) == 1L) { out <- merge( out, .std_info_pseudo( model, mi, params, model_matrix, data = model_data, types = types$Type, robust = robust, two_sd = two_sd, verbose = verbose ) ) } # Reorder out <- out[match(params, out$Parameter), ] out$Parameter <- params row.names(out) <- NULL # Remove all means for now (because it's not used) out <- out[!grepl("Mean_", names(out), fixed = TRUE)] # Select only desired columns # if(method == "all") method <- c("smart", "basic") # if(!any(method == "smart")){ # out <- out[!grepl("_Smart", names(out))] # } # if(!any(method == "basic")){ # out <- out[!grepl("_Basic", names(out))] # } out } # Predictors - Smart ------------------------------------------------------------ #' @keywords internal .std_info_predictors_smart <- function(model, data, params, types, robust = FALSE, two_sd = FALSE, w = NULL, ...) { # Get deviations for all parameters means <- deviations <- rep(NA_real_, times = length(params)) for (i in seq_along(params)) { variable <- params[i] info <- .std_info_predictor_smart( data = data, variable = types[types$Parameter == variable, "Variable"], type = types[types$Parameter == variable, "Type"], robust = robust, two_sd = two_sd, weights = w ) deviations[i] <- info$sd means[i] <- info$mean } # Out data.frame( Parameter = params, Deviation_Smart = deviations, Mean_Smart = means, stringsAsFactors = FALSE ) } #' @keywords internal .std_info_predictor_smart <- function(data, variable, type, robust = FALSE, two_sd = FALSE, weights = NULL, ...) { if (type == "intercept") { # nolint info <- list(sd = 0, mean = 0) } else if (type == "numeric") { info <- .compute_std_info( data = data, variable = variable, robust = robust, two_sd = two_sd, weights = weights ) } else if (type == "factor") { info <- list(sd = 1, mean = 0) # TO BE IMPROVED: Adjust if involved in interactions # interactions <- types[types$Type %in% c("interaction"), ] # if(variable %in% interactions$Secondary_Variable){ # interac_var <- unique(interactions[interactions$Secondary_Variable == variable, "Variable"]) # for(i in interac_var){ # if(types[types$Parameter == i, "Type"] == "numeric"){ # sd_x <- sd_x * .get_deviation(data, i, robust) # } # } # } } else if (type %in% c("interaction", "nested")) { if (is.numeric(data[, variable])) { info <- .compute_std_info( data = data, variable = variable, robust = robust, two_sd = two_sd, weights = weights ) } else if (is.factor(data[, variable])) { info <- list(sd = 1, mean = 0) } else { info <- list(sd = 1, mean = 0) } } else { info <- list(sd = 1, mean = 0) } list(sd = info$sd, mean = info$mean) } # Predictors - Basic ------------------------------------------------------------ #' @keywords internal .std_info_predictors_basic <- function(model, model_matrix, types, robust = FALSE, two_sd = FALSE, w = NULL, ...) { # Get deviations for all parameters means <- deviations <- rep(NA_real_, length = length(names(model_matrix))) for (i in seq_along(names(model_matrix))) { variable <- names(model_matrix)[i] if (types[i, "Type"] == "intercept") { means[i] <- deviations[i] <- 0 } else { std_info <- .compute_std_info( data = model_matrix, variable = variable, robust = robust, two_sd = two_sd, weights = w ) deviations[i] <- std_info$sd means[i] <- std_info$mean } } # Out data.frame( Parameter = types$Parameter[seq_along(names(model_matrix))], Deviation_Basic = deviations, Mean_Basic = means, stringsAsFactors = FALSE ) } # Predictors - sdy ------------------------------------------------------------ #' @keywords internal .std_info_predictors_sdy <- function(model, model_matrix, types, ...) { deviations <- NA_real_ # fitted values fitted_values <- .safe(stats::fitted(model)) if (!is.null(fitted_values)) { deviations <- 1 / sum(c(stats::sd(fitted_values), sqrt(pi^2 / 3))) } # Out data.frame( Parameter = types$Parameter[seq_along(names(model_matrix))], Deviation_SDy = deviations, stringsAsFactors = FALSE ) } # Response ------------------------------------------------------------ #' @keywords internal .std_info_response_smart <- function(model, info, data, model_matrix, types, robust = FALSE, w = NULL, ...) { if (info$is_linear) { if (inherits(model, c("gls", "lme"))) { response <- insight::get_response(model) } else if (inherits(model, "fixest")) { response <- stats::model.matrix(model, type = "lhs") } else { response <- stats::model.frame(model)[[1]] } means <- deviations <- rep(NA_real_, length = length(names(model_matrix))) for (i in seq_along(names(model_matrix))) { variable <- names(model_matrix)[i] if (any(types$Parameter == variable) && types$Link[types$Parameter == variable] == "Difference") { parent_var <- types$Variable[types$Parameter == variable] intercept <- unique(data[[parent_var]])[1] response_at_intercept <- response[data[[parent_var]] == intercept] weights_at_intercept <- if (length(w)) w[data[[parent_var]] == intercept] else NULL std_info <- .compute_std_info( response = response_at_intercept, robust = robust, weights = weights_at_intercept ) } else { std_info <- .compute_std_info( response = response, robust = robust, weights = w ) } deviations[i] <- std_info$sd means[i] <- std_info$mean } } else { deviations <- 1 means <- 0 } # Out data.frame( Parameter = types$Parameter[seq_along(names(model_matrix))], Deviation_Response_Smart = deviations, Mean_Response_Smart = means, stringsAsFactors = FALSE ) } #' @keywords internal .std_info_response_basic <- function(model, info, params, robust = FALSE, w = NULL, ...) { if (inherits(model, c("gls", "lme"))) { response <- insight::get_response(model) } else if (inherits(model, "fixest")) { response <- stats::model.matrix(model, type = "lhs") } else { response <- stats::model.frame(model)[[1]] } if (info$is_linear) { if (robust) { sd_y <- datawizard::weighted_mad(response, w) mean_y <- datawizard::weighted_median(response, w) } else { sd_y <- datawizard::weighted_sd(response, w) mean_y <- datawizard::weighted_mean(response, w) } } else { sd_y <- 1 mean_y <- 0 } # Out data.frame( Parameter = params, Deviation_Response_Basic = sd_y, Mean_Response_Basic = mean_y, stringsAsFactors = FALSE ) } # Pseudo (GLMM) ----------------------------------------------------------- .std_info_pseudo <- function(model, mi, params, model_matrix, data, types, robust = FALSE, two_sd = FALSE, verbose = verbose, ...) { if (robust && verbose) { insight::format_alert("`robust` standardization not available for `pseudo` method.") } insight::check_if_installed("performance") insight::check_if_installed("datawizard") f <- if (two_sd) 2 else 1 gv <- performance::check_group_variation(model) within_vars <- gv[gv$Variation %in% c("both", "within"), "Variable"] id <- insight::get_random(model)[[1]] w <- insight::get_weights(model, remove_na = TRUE) ## Find which parameters vary on level 1 ("within") is_within <- logical(length = length(params)) is_within[] <- NA for (i in seq_along(params)) { if (types[i] == "intercept") { # nolint is_within[i] <- FALSE } else if (types[i] == "numeric") { is_within[i] <- insight::clean_names(params[i]) %in% within_vars } else if (types[i] == "factor") { is_within[i] <- any(sapply(paste0("^", within_vars), grepl, insight::clean_names(params[i]))) } else if (types[i] == "interaction") { ints <- unlist(strsplit(params[i], ":", fixed = TRUE)) is_within[i] <- any(sapply(ints, function(int) { int <- insight::clean_names(int) int %in% within_vars | # numeric any(sapply(paste0("^", within_vars), grepl, int)) # factor })) } } ## test "within"s are fully "within" # only relevant to numeric predictors that can have variance check_within <- is_within & types == "numeric" if (any(check_within)) { p_check_within <- params[check_within] temp_d <- data.frame(model_matrix[, p_check_within, drop = FALSE]) colnames(temp_d) <- paste0("W", seq_len(ncol(temp_d))) # overwrite because can't deal with ":" dm <- datawizard::demean(cbind(id, temp_d), select = colnames(temp_d), by = "id" ) dm <- dm[, paste0(colnames(temp_d), "_between"), drop = FALSE] has_lvl2_var <- sapply(seq_along(colnames(temp_d)), function(i) { # If more than 1% of the variance in the within-var is between: stats::var(dm[, i]) / stats::var(temp_d[, i]) }) > 0.01 also_between <- p_check_within[has_lvl2_var] if (length(also_between) && verbose) { insight::format_alert( "The following within-group terms have between-group variance:", toString(also_between), "This can inflate standardized within-group parameters associated with these terms.", "See `help(\"demean\", package = \"datawizard\")` for modeling between- and within-subject effects." ) } } ## Get 2 types of Deviation_Response_Pseudo sd_y_within <- sd_y_between <- 1 if (mi$is_linear) { insight::check_if_installed("lme4") rand_name <- insight::find_random(model)$random # maintain any y-transformations frm <- insight::find_formula(model) frm <- paste0(frm$conditional[2], " ~ (1|", rand_name, ")") m0 <- suppressWarnings(suppressMessages( lme4::lmer(stats::as.formula(frm), weights = w, data = data ) )) m0v <- insight::get_variance(m0) sd_y_between <- unname(sqrt(m0v$var.intercept)) sd_y_within <- unname(sqrt(m0v$var.residual)) } ## Get scaling factors for each parameter Deviation_Response_Pseudo <- Deviation_Pseudo <- numeric(ncol(model_matrix)) for (i in seq_along(params)) { if (types[i] == "intercept") { Deviation_Response_Pseudo[i] <- sd_y_between # doesn't matter Deviation_Pseudo[i] <- 0 } else { ## dumb way if (is_within[i]) { ## is within X <- model_matrix[[i]] Deviation_Response_Pseudo[i] <- sd_y_within } else { ## is between X <- tapply(model_matrix[[i]], id, mean) Deviation_Response_Pseudo[i] <- sd_y_between } Deviation_Pseudo[i] <- f * datawizard::weighted_sd(X, w) ## smart way? ## DONT USE: see correspondence with between Mattan and Eran BC # m <- suppressWarnings(suppressMessages(lme4::lmer(model_matrix[[i]] ~ (1|id)))) # if (is_within[i]) { # ## is within # Deviation_Pseudo[i] <- sqrt(unname(unlist(suppressWarnings( # insight::get_variance(m, component = "residual") # )))) # Deviation_Response_Pseudo[i] <- sd_y_within # } else { # ## is between # Deviation_Pseudo[i] <- sqrt(unname(unlist(suppressWarnings( # insight::get_variance(m, component = "intercept") # )))) # Deviation_Response_Pseudo[i] <- sd_y_between # } } } data.frame( Parameter = params, Deviation_Response_Pseudo, Deviation_Pseudo, stringsAsFactors = FALSE ) } # Utils ------------------------------------------------------------------- #' @keywords internal .compute_std_info <- function(data = NULL, variable = NULL, response = NULL, robust = FALSE, two_sd = FALSE, weights = NULL) { f <- if (two_sd) 2 else 1 if (is.null(response)) { response <- as.numeric(data[, variable]) } if (robust) { sd_x <- datawizard::weighted_mad(response, weights) mean_x <- datawizard::weighted_median(response, weights) } else { sd_x <- datawizard::weighted_sd(response, weights) mean_x <- datawizard::weighted_mean(response, weights) } list(sd = f * sd_x, mean = mean_x) } parameters/R/methods_mass.R0000644000176200001440000000612614736731407015450 0ustar liggesusers# ci ----------------- #' @export ci.negbin <- ci.glm #' @export ci.polr <- function(x, ci = 0.95, dof = NULL, method = "profile", ...) { method <- match.arg(method, choices = c("profile", "wald", "robust")) robust <- !is.null(method) && method == "robust" if (.check_vcov_args(robust, ...)) { return(ci.default(x, ...)) } if (method == "profile") { out <- lapply(ci, function(i) .ci_profiled2(model = x, ci = i)) out <- do.call(rbind, out) } else { out <- .ci_generic(model = x, ci = ci, dof = dof, method = method, ...) } # for polr, profiled CI do not return CI for response levels # thus, we also calculate Wald CI and add missing rows to result out_missing <- .ci_generic(model = x, ci = ci) missing_rows <- out_missing$Parameter %in% setdiff(out_missing$Parameter, out$Parameter) out <- rbind(out, out_missing[missing_rows, ]) # fix names, to match standard error and p_value out$Parameter <- gsub("Intercept: ", "", out$Parameter, fixed = TRUE) row.names(out) <- NULL out } # SE ----------------- #' @export standard_error.polr <- function(model, method = NULL, ...) { robust <- !is.null(method) && method == "robust" if (.check_vcov_args(robust, ...)) { return(standard_error.default(model, ...)) } smry <- suppressMessages(as.data.frame(stats::coef(summary(model)))) se <- smry[[2]] names(se) <- rownames(smry) .data_frame( Parameter = .remove_backticks_from_string(names(se)), SE = as.vector(se) ) } # p ----------------- #' @export p_value.negbin <- p_value.default #' @export p_value.rlm <- function(model, ...) { cs <- stats::coef(summary(model)) p <- 2 * stats::pt(abs(cs[, 3]), df = insight::get_df(model, type = "wald"), lower.tail = FALSE) .data_frame( Parameter = .remove_backticks_from_string(names(p)), p = as.vector(p) ) } #' @export p_value.polr <- function(model, method = NULL, ...) { robust <- !is.null(method) && method == "robust" if (.check_vcov_args(robust, ...)) { return(p_value.default(model, ...)) } smry <- suppressMessages(as.data.frame(stats::coef(summary(model)))) tstat <- smry[[3]] p <- 2 * stats::pt(abs(tstat), df = insight::get_df(x = model, type = "wald"), lower.tail = FALSE) names(p) <- rownames(smry) .data_frame( Parameter = .remove_backticks_from_string(names(p)), p = as.vector(p) ) } # parameters ----------------- #' @export model_parameters.ridgelm <- function(model, verbose = TRUE, ...) { parameters <- insight::get_parameters(model) parameters$Scale <- as.vector(model$scales) # remove all complete-missing cases parameters <- parameters[apply(parameters, 1, function(i) !all(is.na(i))), ] rownames(parameters) <- NULL class(parameters) <- c("parameters_model", "see_parameters_model", class(parameters)) attr(parameters, "object_name") <- insight::safe_deparse_symbol(substitute(model)) parameters } #' @export model_parameters.polr <- model_parameters.glm #' @export model_parameters.negbin <- model_parameters.glm parameters/R/options.R0000644000176200001440000000556715057525051014456 0ustar liggesusers#' @title Global options from the parameters package #' @name parameters-options #' #' @section Global options to set defaults for function arguments: #' #' The `verbose` argument can be used to display or silence messages and #' warnings for the different functions in the **parameters** package. However, #' some messages providing additional information can be displayed or suppressed #' using `options()`: #' #' - `options(parameters_info = TRUE)` will override the `include_info` argument #' in `model_parameters()` and always show the model summary for non-mixed #' models. #' #' - `options(parameters_mixed_info = TRUE)` will override the `include_info` #' argument in `model_parameters()` for mixed models, and will then always #' show the model summary. #' #' - `options(parameters_cimethod = TRUE)` will show the additional information #' about the approximation method used to calculate confidence intervals and #' p-values. Set to `FALSE` to hide this message when printing #' `model_parameters()` objects. #' #' - `options(parameters_exponentiate = TRUE)` will show the additional #' information on how to interpret coefficients of models with log-transformed #' response variables or with log-/logit-links when the `exponentiate` #' argument in `model_parameters()` is not `TRUE`. Set this option to `FALSE` #' to hide this message when printing `model_parameters()` objects. #' #' There are further options that can be used to modify the default behaviour #' for printed outputs: #' #' - `options(parameters_labels = TRUE)` will use variable and value labels for #' pretty names, if data is labelled. If no labels available, default pretty #' names are used. #' #' - `options(parameters_interaction = )` will replace the #' interaction mark (by default, `*`) with the related character. #' #' - `options(parameters_select = )` will set the default for the #' `select` argument. See argument's documentation for available options. #' #' - `options(easystats_table_width = )` will set the default width for #' tables in text-format, i.e. for most of the outputs printed to console. If #' not specified, tables will be adjusted to the current available width, e.g. #' of the of the console (or any other source for textual output, like #' markdown files). The argument `table_width` can also be used in most #' `print()` methods to specify the table width as desired. #' #' - `options(insight_use_symbols = TRUE)` will try to print unicode-chars for #' symbols as column names, wherever possible (e.g., #' \ifelse{html}{\out{ω}}{\eqn{\omega}} instead of `Omega`). #' #' - `options(easystats_display_format = )` will set the default format for #' the `display()` methods. Can be one of `"markdown"`, `"html"`, or `"tt"`. #' See [`display.parameters_model()`] for details. #' NULL parameters/R/4_standard_error.R0000644000176200001440000001752114736731407016217 0ustar liggesusers#' @title Standard Errors #' @name standard_error #' #' @description `standard_error()` attempts to return standard errors of model #' parameters. #' #' @param model A model. #' @param force Logical, if `TRUE`, factors are converted to numerical #' values to calculate the standard error, with the lowest level being the #' value `1` (unless the factor has numeric levels, which are converted #' to the corresponding numeric value). By default, `NA` is returned for #' factors or character vectors. #' @param vcov Variance-covariance matrix used to compute uncertainty estimates #' (e.g., for robust standard errors). This argument accepts a covariance #' matrix, a function which returns a covariance matrix, or a string which #' identifies the function to be used to compute the covariance matrix. #' * A covariance matrix #' * A function which returns a covariance matrix (e.g., `stats::vcov()`) #' * A string which indicates the kind of uncertainty estimates to return. #' - Heteroskedasticity-consistent: `"HC"`, `"HC0"`, `"HC1"`, `"HC2"`, #' `"HC3"`, `"HC4"`, `"HC4m"`, `"HC5"`. See `?sandwich::vcovHC` #' - Cluster-robust: `"CR"`, `"CR0"`, `"CR1"`, `"CR1p"`, `"CR1S"`, #' `"CR2"`, `"CR3"`. See `?clubSandwich::vcovCR` #' - Bootstrap: `"BS"`, `"xy"`, `"residual"`, `"wild"`, `"mammen"`, #' `"fractional"`, `"jackknife"`, `"norm"`, `"webb"`. See #' `?sandwich::vcovBS` #' - Other `sandwich` package functions: `"HAC"`, `"PC"`, `"CL"`, `"OPG"`, #' `"PL"`. #' @param vcov_args List of arguments to be passed to the function identified by #' the `vcov` argument. This function is typically supplied by the #' **sandwich** or **clubSandwich** packages. Please refer to their #' documentation (e.g., `?sandwich::vcovHAC`) to see the list of available #' arguments. If no estimation type (argument `type`) is given, the default #' type for `"HC"` equals the default from the **sandwich** package; for type #' `"CR"`, the default is set to `"CR3"`. #' @param effects Should standard errors for fixed effects (`"fixed"`), random #' effects (`"random"`), or both (`"all"`) be returned? Only applies #' to mixed models. May be abbreviated. When standard errors for random #' effects are requested, for each grouping factor a list of standard errors #' (per group level) for random intercepts and slopes is returned. #' @param component Model component for which standard errors should be shown. #' See the documentation for your object's class in [`model_parameters()`] or #' [`p_value()`] for further details. #' @inheritParams simulate_model #' @inheritParams p_value #' @param ... Arguments passed to or from other methods. #' #' @note For Bayesian models (from **rstanarm** or **brms**), the standard #' error is the SD of the posterior samples. #' #' @return A data frame with at least two columns: the parameter names and the #' standard errors. Depending on the model, may also include columns for model #' components etc. #' #' @examplesIf require("sandwich") && require("clubSandwich") #' model <- lm(Petal.Length ~ Sepal.Length * Species, data = iris) #' standard_error(model) #' #' # robust standard errors #' standard_error(model, vcov = "HC3") #' #' # cluster-robust standard errors #' standard_error(model, #' vcov = "vcovCL", #' vcov_args = list(cluster = iris$Species) #' ) #' @export standard_error <- function(model, ...) { UseMethod("standard_error") } # Default methods --------------------------------------------------------- #' @rdname standard_error #' @export standard_error.default <- function(model, effects = "fixed", component = "all", vcov = NULL, vcov_args = NULL, verbose = TRUE, ...) { # check for valid input .is_model_valid(model) dots <- list(...) se <- NULL # if a vcov is provided, we calculate standard errors based on that matrix # this is usually the case for HC (robust) standard errors # ------------------------------------------------------------------------ # vcov: matrix if (is.matrix(vcov)) { se <- sqrt(diag(vcov)) } # vcov: function which returns a matrix if (is.function(vcov)) { fun_args <- c(list(model), vcov_args, dots) se <- .safe(sqrt(diag(do.call("vcov", fun_args)))) } # vcov: character if (is.character(vcov)) { .vcov <- insight::get_varcov( model, component = component, vcov = vcov, vcov_args = vcov_args, verbose = verbose, ... ) se <- sqrt(diag(.vcov)) } # classical SE from summary() # ------------------------------------------------------------------------ if (is.null(se)) { se <- .safe({ if (grepl("Zelig-", class(model)[1], fixed = TRUE)) { unlist(model$get_se()) } else { .get_se_from_summary(model) } }) } # if retrieving SE from summary() failed, we try to calculate SE based # on classical se from get_varcov() # ------------------------------------------------------------------------ if (is.null(se)) { se <- .safe({ varcov <- insight::get_varcov(model, component = component) se_from_varcov <- sqrt(diag(varcov)) names(se_from_varcov) <- colnames(varcov) se_from_varcov }) } # output if (is.null(se)) { if (isTRUE(verbose)) { insight::format_warning("Could not extract standard errors from model object.") } } else { params <- insight::get_parameters(model, component = component) if (length(se) == nrow(params) && "Component" %in% colnames(params)) { se <- .data_frame(Parameter = params$Parameter, SE = as.vector(se), Component = params$Component) } else { se <- .data_frame(Parameter = names(se), SE = as.vector(se)) } } se } # helper ----------------------------------------------------------------- .get_se_from_summary <- function(model, component = NULL) { cs <- .safe(suppressWarnings(stats::coef(summary(model)))) se <- NULL if (is.list(cs) && !is.null(component)) { cs <- cs[[component]] } if (!is.null(cs)) { # do we have a se column? se_col <- which(colnames(cs) == "Std. Error") # if not, default to 2 if (length(se_col) == 0) { se_col <- 2 } se <- as.vector(cs[, se_col]) if (is.null(names(se))) { coef_names <- rownames(cs) if (length(coef_names) == length(se)) { names(se) <- coef_names } } } names(se) <- .remove_backticks_from_string(names(se)) se } .check_vcov_args <- function(robust, ...) { dots <- list(...) isTRUE(isTRUE(robust) || isTRUE(dots$robust) || ("vcov" %in% names(dots) && !is.null(dots[["vcov"]]))) } # .ranef_se <- function(x) { # insight::check_if_installed("lme4") # # cc <- stats::coef(model) # # # get names of intercepts # inames <- names(cc) # # # variances of fixed effects # fixed.vars <- diag(as.matrix(stats::vcov(model))) # # # extract variances of conditional modes # r1 <- lme4::ranef(model, condVar = TRUE) # # # we may have multiple random intercepts, iterate all # se.merMod <- lapply(1:length(cc), function(i) { # cmode.vars <- t(apply(attr(r1[[i]], "postVar"), 3, diag)) # seVals <- sqrt(sweep(cmode.vars, 2, fixed.vars[names(r1[[i]])], "+", check.margin = FALSE)) # # if (length(r1[[i]]) == 1) { # seVals <- as.data.frame(t(seVals)) # stats::setNames(seVals, names(r1[[i]])) # } else { # seVals <- seVals[, 1:2] # stats::setNames(as.data.frame(seVals), names(r1[[i]])) # } # }) # # # set names of list # names(se.merMod) <- inames # # se.merMod # } parameters/R/parameters-package.R0000644000176200001440000000244714413011732016500 0ustar liggesusers#' @title parameters: Extracting, Computing and Exploring the Parameters of Statistical Models using R #' #' @description #' #' **parameters**' primary goal is to provide utilities for processing the #' parameters of various statistical models (see [here](https://easystats.github.io/insight/) #' for a list of supported models). Beyond computing *p-values*, *CIs*, #' *Bayesian indices* and other measures for a wide variety of models, this #' package implements features like *bootstrapping* of parameters and models, #' *feature reduction* (feature extraction and variable selection), or tools for #' data reduction like functions to perform cluster, factor or principal #' component analysis. #' #' Another important goal of the **parameters** package is to facilitate and #' streamline the process of reporting results of statistical models, which #' includes the easy and intuitive calculation of standardized estimates or #' robust standard errors and p-values. **parameters** therefor offers a #' simple and unified syntax to process a large variety of (model) objects #' from many different packages. #' #' References: Lüdecke et al. (2020) \doi{10.21105/joss.02445} #' #' @docType package #' @aliases parameters-package #' @name parameters-package #' @keywords internal "_PACKAGE" parameters/R/methods_mhurdle.R0000644000176200001440000000572714736731407016153 0ustar liggesusers#' @export model_parameters.mhurdle <- function(model, ci = 0.95, component = "all", exponentiate = FALSE, p_adjust = NULL, keep = NULL, drop = NULL, verbose = TRUE, ...) { component <- insight::validate_argument( component, c("all", "conditional", "zi", "zero_inflated", "infrequent_purchase", "ip", "auxiliary") ) params <- .model_parameters_generic( model, ci = ci, merge_by = c("Parameter", "Component"), exponentiate = exponentiate, effects = "fixed", component = component, p_adjust = p_adjust, keep_parameters = keep, drop_parameters = drop, verbose = verbose, ... ) params$Parameter <- gsub("^(h1|h2|h3)\\.(.*)", "\\2", params$Parameter) attr(params, "object_name") <- insight::safe_deparse_symbol(substitute(model)) params } #' @export p_value.mhurdle <- function(model, component = c("all", "conditional", "zi", "zero_inflated", "infrequent_purchase", "ip", "auxiliary"), ...) { component <- match.arg(component) s <- summary(model) params <- insight::get_parameters(model, component = "all") pvals <- data.frame( Parameter = rownames(s$coefficients), p = as.vector(s$coefficients[, 4]), stringsAsFactors = FALSE ) params <- merge(params, pvals, sort = FALSE) if (component != "all") { params <- params[params$Component == component, , drop = FALSE] } params[c("Parameter", "p", "Component")] } #' @export ci.mhurdle <- function(x, ci = 0.95, ...) { .ci_generic(model = x, ci = ci, ...) } #' @export standard_error.mhurdle <- function(model, component = c("all", "conditional", "zi", "zero_inflated", "infrequent_purchase", "ip", "auxiliary"), ...) { component <- match.arg(component) s <- summary(model) params <- insight::get_parameters(model, component = "all") se <- data.frame( Parameter = rownames(s$coefficients), SE = as.vector(s$coefficients[, 2]), stringsAsFactors = FALSE ) params <- merge(params, se, sort = FALSE) if (component != "all") { params <- params[params$Component == component, , drop = FALSE] } params[c("Parameter", "SE", "Component")] } #' @export simulate_model.mhurdle <- function(model, iterations = 1000, component = c("all", "conditional", "zi", "zero_inflated", "infrequent_purchase", "ip", "auxiliary"), ...) { component <- match.arg(component) out <- .simulate_model(model, iterations, component = component, effects = "fixed", ...) class(out) <- c("parameters_simulate_model", class(out)) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(model)) out } parameters/R/standardize_parameters.R0000644000176200001440000007030715057525051017510 0ustar liggesusers#' Parameters standardization #' #' Compute standardized model parameters (coefficients). #' #' @param model A statistical model. #' @param method The method used for standardizing the parameters. Can be #' `"refit"` (default), `"posthoc"`, `"smart"`, `"basic"`, `"pseudo"` or #' `"sdy"`. See Details'. #' @param include_response If `TRUE` (default), the response value will also be #' standardized. If `FALSE`, only the predictors will be standardized. For #' GLMs the response value will never be standardized (see *Generalized Linear #' Models* section). #' @inheritParams datawizard::standardize.default #' @inheritParams effectsize::chisq_to_phi #' @param ... For `standardize_parameters()`, arguments passed to #' [`model_parameters()`], such as: #' - `ci_method`, `centrality` for Mixed models and Bayesian models... #' - `exponentiate`, ... #' - etc. #' #' @details #' #' ## Standardization Methods #' - **refit**: This method is based on a complete model re-fit with a #' standardized version of the data. Hence, this method is equal to #' standardizing the variables before fitting the model. It is the "purest" and #' the most accurate (Neter et al., 1989), but it is also the most #' computationally costly and long (especially for heavy models such as Bayesian #' models). This method is particularly recommended for complex models that #' include interactions or transformations (e.g., polynomial or spline terms). #' The `robust` (default to `FALSE`) argument enables a robust standardization #' of data, i.e., based on the `median` and `MAD` instead of the `mean` and #' `SD`. **See [`datawizard::standardize()`] for more details.** #' - **Note** that `standardize_parameters(method = "refit")` may not return #' the same results as fitting a model on data that has been standardized with #' `standardize()`; `standardize_parameters()` used the data used by the model #' fitting function, which might not be same data if there are missing values. #' see the `remove_na` argument in `standardize()`. #' - **posthoc**: Post-hoc standardization of the parameters, aiming at #' emulating the results obtained by "refit" without refitting the model. The #' coefficients are divided by the standard deviation (or MAD if `robust`) of #' the outcome (which becomes their expression 'unit'). Then, the coefficients #' related to numeric variables are additionally multiplied by the standard #' deviation (or MAD if `robust`) of the related terms, so that they correspond #' to changes of 1 SD of the predictor (e.g., "A change in 1 SD of `x` is #' related to a change of 0.24 of the SD of `y`). This does not apply to binary #' variables or factors, so the coefficients are still related to changes in #' levels. This method is not accurate and tend to give aberrant results when #' interactions are specified. #' - **basic**: This method is similar to `method = "posthoc"`, but treats all #' variables as continuous: it also scales the coefficient by the standard #' deviation of model's matrix' parameter of factors levels (transformed to #' integers) or binary predictors. Although being inappropriate for these cases, #' this method is the one implemented by default in other software packages, #' such as [`lm.beta::lm.beta()`]. #' - **smart** (Standardization of Model's parameters with Adjustment, #' Reconnaissance and Transformation - *experimental*): Similar to `method = #' "posthoc"` in that it does not involve model refitting. The difference is #' that the SD (or MAD if `robust`) of the response is computed on the relevant #' section of the data. For instance, if a factor with 3 levels A (the #' intercept), B and C is entered as a predictor, the effect corresponding to B #' vs. A will be scaled by the variance of the response at the intercept only. #' As a results, the coefficients for effects of factors are similar to a Glass' #' delta. #' - **pseudo** (*for 2-level (G)LMMs only*): In this (post-hoc) method, the #' response and the predictor are standardized based on the level of prediction #' (levels are detected with [`performance::check_group_variation()`]): Predictors #' are standardized based on their SD at level of prediction (see also #' [`datawizard::demean()`]); The outcome (in linear LMMs) is standardized based #' on a fitted random-intercept-model, where `sqrt(random-intercept-variance)` #' is used for level 2 predictors, and `sqrt(residual-variance)` is used for #' level 1 predictors (Hoffman 2015, page 342). A warning is given when a #' within-group variable is found to have access between-group variance. #' - **sdy** (*for logistic regression models only*): This y-standardization #' is useful when comparing coefficients of logistic regression models across #' models for the same sample. Unobserved heterogeneity varies across models #' with different independent variables, and thus, odds ratios from the same #' predictor of different models cannot be compared directly. The #' y-standardization makes coefficients "comparable across models by dividing #' them with the estimated standard deviation of the latent variable for each #' model" (Mood 2010). Thus, whenever one has multiple logistic regression models #' that are fit to the same data and share certain predictors (e.g. nested #' models), it can be useful to use this standardization approach to make #' log-odds or odds ratios comparable. #' #' ## Transformed Variables #' When the model's formula contains transformations (e.g. `y ~ exp(X)`) `method #' = "refit"` will give different results compared to `method = "basic"` #' (`"posthoc"` and `"smart"` do not support such transformations): While #' `"refit"` standardizes the data *prior* to the transformation (e.g. #' equivalent to `exp(scale(X))`), the `"basic"` method standardizes the #' transformed data (e.g. equivalent to `scale(exp(X))`). #' \cr\cr #' See the *Transformed Variables* section in [`datawizard::standardize.default()`] #' for more details on how different transformations are dealt with when #' `method = "refit"`. #' #' ## Confidence Intervals #' The returned confidence intervals are re-scaled versions of the #' unstandardized confidence intervals, and not "true" confidence intervals of #' the standardized coefficients (cf. Jones & Waller, 2015). #' #' ## Generalized Linear Models #' Standardization for generalized linear models (GLM, GLMM, etc) is done only #' with respect to the predictors (while the outcome remains as-is, #' unstandardized) - maintaining the interpretability of the coefficients (e.g., #' in a binomial model: the exponent of the standardized parameter is the OR of #' a change of 1 SD in the predictor, etc.) #' #' ## Dealing with Factors #' `standardize(model)` or `standardize_parameters(model, method = "refit")` do #' *not* standardize categorical predictors (i.e. factors) / their #' dummy-variables, which may be a different behaviour compared to other R #' packages (such as \pkg{lm.beta}) or other software packages (like SPSS). To #' mimic such behaviours, either use `standardize_parameters(model, method = #' "basic")` to obtain post-hoc standardized parameters, or standardize the data #' with `datawizard::standardize(data, force = TRUE)` *before* fitting the #' model. #' #' @return A data frame with the standardized parameters (`Std_*`, depending on #' the model type) and their CIs (`CI_low` and `CI_high`). Where applicable, #' standard errors (SEs) are returned as an attribute (`attr(x, #' "standard_error")`). #' #' @family standardize #' @family effect size indices #' #' @seealso See also [package vignette](https://easystats.github.io/parameters/articles/standardize_parameters_effsize.html). #' #' @examples #' model <- lm(len ~ supp * dose, data = ToothGrowth) #' standardize_parameters(model, method = "refit") #' \donttest{ #' standardize_parameters(model, method = "posthoc") #' standardize_parameters(model, method = "smart") #' standardize_parameters(model, method = "basic") #' #' # Robust and 2 SD #' standardize_parameters(model, robust = TRUE) #' standardize_parameters(model, two_sd = TRUE) #' #' model <- glm(am ~ cyl * mpg, data = mtcars, family = "binomial") #' standardize_parameters(model, method = "refit") #' standardize_parameters(model, method = "posthoc") #' standardize_parameters(model, method = "basic", exponentiate = TRUE) #' } #' #' @examplesIf require("lme4", quietly = TRUE) #' \donttest{ #' m <- lme4::lmer(mpg ~ cyl + am + vs + (1 | cyl), mtcars) #' standardize_parameters(m, method = "pseudo", ci_method = "satterthwaite") #' } #' #' @examplesIf require("rstanarm", quietly = TRUE) #' \donttest{ #' model <- rstanarm::stan_glm(rating ~ critical + privileges, data = attitude, refresh = 0) #' standardize_posteriors(model, method = "refit", verbose = FALSE) #' standardize_posteriors(model, method = "posthoc", verbose = FALSE) #' standardize_posteriors(model, method = "smart", verbose = FALSE) #' head(standardize_posteriors(model, method = "basic", verbose = FALSE)) #' } #' #' @references #' - Hoffman, L. (2015). Longitudinal analysis: Modeling within-person fluctuation #' and change. Routledge. #' #' - Jones, J. A., & Waller, N. G. (2015). The normal-theory and asymptotic #' distribution-free (ADF) covariance matrix of standardized regression #' coefficients: theoretical extensions and finite sample behavior. #' Psychometrika, 80(2), 365-378. #' #' - Neter, J., Wasserman, W., & Kutner, M. H. (1989). Applied linear #' regression models. #' #' - Gelman, A. (2008). Scaling regression inputs by dividing by two standard #' deviations. Statistics in medicine, 27(15), 2865-2873. #' #' - Mood C. Logistic Regression: Why We Cannot Do What We Think We Can Do, and #' What We Can Do About It. European Sociological Review (2010) 26:67–82. #' #' @export #' @aliases standardise_parameters standardize_parameters <- function(model, method = "refit", ci = 0.95, robust = FALSE, two_sd = FALSE, include_response = TRUE, verbose = TRUE, ...) { UseMethod("standardize_parameters") } #' @export standardise_parameters <- standardize_parameters #' @export standardize_parameters.default <- function(model, method = "refit", ci = 0.95, robust = FALSE, two_sd = FALSE, include_response = TRUE, verbose = TRUE, ...) { # check for valid input .is_model_valid(model) object_name <- insight::safe_deparse_symbol(substitute(model)) method <- match.arg(method, c("refit", "posthoc", "smart", "basic", "classic", "pseudo", "sdy")) m_info <- .get_model_info(model, ...) include_response <- include_response && .safe_to_standardize_response(m_info, verbose = verbose) if (method == "refit") { model <- datawizard::standardize(model, robust = robust, two_sd = two_sd, include_response = include_response, verbose = verbose, m_info = m_info ) } # need model_parameters to return the parameters, not the terms if (inherits(model, "aov")) { class(model) <- class(model)[class(model) != "aov"] } pars <- model_parameters(model, ci = ci, standardize = NULL, effects = "fixed", as_draws = TRUE, ...) # save attributes for later, these are lost in between att <- attributes(pars) # should post hoc exponentiate? exponentiate <- isTRUE(eval(match.call()[["exponentiate"]], envir = parent.frame())) coefficient_name <- attr(pars, "coefficient_name") if (method %in% c("posthoc", "smart", "basic", "classic", "pseudo", "sdy")) { if (m_info$is_multivariate) { insight::format_error( "Cannot post-hoc standardize multivariate models. Try using method \"refit\" instead." ) } if (method == "sdy" && !m_info$is_binomial) { insight::format_error("Method \"sdy\" is only applicable to logistic regression models.") } pars <- .standardize_parameters_posthoc( pars, method, model, m_info, robust, two_sd, exponentiate, include_response, verbose ) method <- attr(pars, "std_method") robust <- attr(pars, "robust") } ## clean cols if (!is.null(ci)) pars$CI <- attr(pars, "ci") colnm <- c("Component", "Response", "Group", "Parameter", utils::head(.col_2_scale, -2), "CI", "CI_low", "CI_high") pars <- pars[, colnm[colnm %in% colnames(pars)]] if (!is.null(coefficient_name) && coefficient_name %in% c("Odds Ratio", "Risk Ratio", "IRR", "Prevalence Ratio")) { colnames(pars)[colnames(pars) == "Coefficient"] <- gsub(" ", "_", coefficient_name, fixed = TRUE) } i <- colnames(pars) %in% c("Coefficient", "Median", "Mean", "MAP", "Odds_Ratio", "Risk_Ratio", "IRR", "Prevalence_Ratio") colnames(pars)[i] <- paste0("Std_", colnames(pars)[i]) ## SE attribute? if ("SE" %in% colnames(pars)) { attr(pars, "standard_error") <- pars$SE pars$SE <- NULL } # add those attributes back here... if (!is.null(att)) { attributes(pars) <- utils::modifyList(att, attributes(pars)) } ## attributes attr(pars, "std_method") <- method attr(pars, "two_sd") <- two_sd attr(pars, "robust") <- robust attr(pars, "object_name") <- object_name attr(pars, "ci") <- ci attr(pars, "include_response") <- include_response class(pars) <- c("parameters_standardized", "effectsize_table", "see_effectsize_table", "data.frame") pars } #' @export standardize_parameters.mediate <- function(model, method = "refit", ci = 0.95, robust = FALSE, two_sd = FALSE, include_response = TRUE, verbose = TRUE, ...) { if (method != "refit") { insight::format_warning("Only `method=\"refit\"` is supported for mediation models.") } NextMethod("standardize_parameters", method = "refit", ci = ci, robust = robust, two_sd = two_sd, include_response = include_response, verbose = verbose ) } #' @export standardize_parameters.parameters_model <- function(model, method = "refit", ci = NULL, robust = FALSE, two_sd = FALSE, include_response = TRUE, verbose = TRUE, ...) { if (method == "refit") { insight::format_error( "Argument `refit` not supported for standardizing results from `model_parameters()`." ) } if (!is.null(ci)) { insight::format_alert( "Argument `ci` not supported for standardizing results from `model_parameters()`. It is ignored." ) } pars <- model ci <- attr(pars, "ci") model <- .get_object(pars) if (is.null(model)) model <- attr(pars, "object") m_info <- .get_model_info(model, ...) include_response <- include_response && .safe_to_standardize_response(m_info, verbose = verbose) exponentiate <- attr(pars, "exponentiate") if (is.null(exponentiate)) { exponentiate <- FALSE } pars <- .standardize_parameters_posthoc( pars, method, model, m_info, robust, two_sd, exponentiate, include_response, verbose ) method <- attr(pars, "std_method") robust <- attr(pars, "robust") ## clean cols if (!is.null(ci)) pars$CI <- attr(pars, "ci") colnm <- c("Component", "Response", "Group", "Parameter", utils::head(.col_2_scale, -2), "CI", "CI_low", "CI_high") pars <- pars[, colnm[colnm %in% colnames(pars)]] i <- colnames(pars) %in% c("Coefficient", "Median", "Mean", "MAP") colnames(pars)[i] <- paste0("Std_", colnames(pars)[i]) ## SE attribute? if ("SE" %in% colnames(pars)) { attr(pars, "standard_error") <- pars$SE pars$SE <- NULL } ## attributes attr(pars, "std_method") <- method attr(pars, "two_sd") <- two_sd attr(pars, "robust") <- robust attr(pars, "ci") <- ci attr(pars, "include_response") <- include_response class(pars) <- c("parameters_standardized", "effectsize_table", "see_effectsize_table", "data.frame") pars } #' @export standardize_parameters.bootstrap_model <- function(model, method = "refit", ci = 0.95, robust = FALSE, two_sd = FALSE, include_response = TRUE, verbose = TRUE, ...) { object_name <- insight::safe_deparse_symbol(substitute(model)) method <- match.arg(method, c("refit", "posthoc", "smart", "basic", "classic", "pseudo", "sdy")) pars <- model model <- attr(pars, "original_model") m_info <- .get_model_info(model, ...) include_response <- include_response && .safe_to_standardize_response(m_info, verbose = verbose) if (method == "refit") { insight::format_error("The `refit` method is not supported for bootstrapped models.") ## But it would look something like this: # model <- standardize(model, robust = robust, two_sd = two_sd, verbose = verbose, m_info = m_info) # model <- parameters::bootstrap_model(model, iterations = 1000, verbose = verbose) # return(model) } # need model_parameters to return the parameters, not the terms if (inherits(model, "aov")) class(model) <- class(model)[class(model) != "aov"] if (method %in% c("posthoc", "smart", "basic", "classic", "pseudo")) { pars <- .standardize_posteriors_posthoc(pars, method, model, m_info, robust, two_sd, include_response, verbose) method <- attr(pars, "std_method") robust <- attr(pars, "robust") } pars <- bayestestR::describe_posterior(pars, centrality = "median", ci = ci, ci_method = "quantile", test = NULL ) names(pars)[names(pars) == "Median"] <- "Std_Coefficient" attr(pars, "std_method") <- method attr(pars, "two_sd") <- two_sd attr(pars, "robust") <- robust attr(pars, "object_name") <- object_name attr(pars, "ci") <- ci attr(pars, "include_response") <- include_response class(pars) <- c("parameters_standardized", "effectsize_table", "see_effectsize_table", "data.frame") pars } #' @export standardize_parameters.bootstrap_parameters <- function(model, method = "refit", ci = 0.95, robust = FALSE, two_sd = FALSE, include_response = TRUE, verbose = TRUE, ...) { standardize_parameters(attr(model, "boot_samples"), method = method, ci = ci, robust = robust, two_sd = two_sd, include_response = include_response, verbose = verbose, ... ) } #' @export standardize_parameters.model_fit <- function(model, method = "refit", ci = 0.95, robust = FALSE, two_sd = FALSE, include_response = TRUE, verbose = TRUE, ...) { standardize_parameters( model$fit, method = method, ci = ci, robust = robust, two_sd = two_sd, include_response = include_response, verbose = verbose, ... ) } # methods -------------------------------- #' @export format.parameters_standardized <- function(x, digits = 2, format = c("text", "markdown", "html"), ...) { format <- match.arg(format) footer <- subtitle <- NULL caption <- sprintf("Standardization method: %s", attr(x, "std_method")) # robust / two_sd if (attr(x, "two_sd") || attr(x, "robust")) { footer <- sprintf( "Scaled by %s %s%s from the %s.", ifelse(attr(x, "two_sd"), "two", "one"), ifelse(attr(x, "robust"), "MAD", "SD"), ifelse(attr(x, "two_sd"), "s", ""), ifelse(attr(x, "robust"), "median", "mean") ) } # include_response if (!attr(x, "include_response")) { footer <- c(footer, "Response is unstandardized.") } if (format %in% c("markdown", "text") && !is.null(footer)) { footer <- lapply(footer, function(ftr) { c(paste0("\n- ", ftr), "blue") }) } attr(x, "table_footer") <- footer if (format %in% c("markdown", "text") && !is.null(caption)) { caption <- c(paste0("# ", caption), "blue") } attr(x, "table_caption") <- caption attr(x, "table_subtitle") <- subtitle attr(x, "ci") <- NULL attr(x, "ci_method") <- NULL insight::format_table(x, digits = digits, ci_digits = digits, preserve_attributes = TRUE, ...) } #' @export display.parameters_standardized <- function(object, format = "markdown", digits = 2, ...) { format <- .display_default_format(format) fun_args <- list(x = object, digits = digits) if (format %in% c("html", "tt")) { fun_args$backend <- format do.call(print_html, c(fun_args, list(...))) } else { do.call(print_md, c(fun_args, list(...))) } } #' @export print.parameters_standardized <- function(x, digits = 2, ...) { x_fmt <- format(x, digits = digits, output = "text", ...) cat(insight::export_table(x_fmt, format = NULL, ...)) invisible(x) } #' @export print_md.parameters_standardized <- function(x, digits = 2, ...) { x_fmt <- format(x, digits = digits, output = "markdown", ...) insight::export_table(x_fmt, format = "markdown", ...) } #' @export print_html.parameters_standardized <- function(x, digits = 2, ...) { # which engine? engine <- .check_format_backend(...) x_fmt <- format(x, digits = digits, output = "html", ...) insight::export_table(x_fmt, format = engine, ...) } # helper ------------------------- #' @keywords internal .standardize_parameters_posthoc <- function(pars, method, model, mi, robust, two_sd, exponentiate, include_response, verbose) { # validation check for "pseudo" method <- .should_pseudo(method, model, mi) method <- .cant_smart_or_posthoc(method, model, mi, pars$Parameter) if (robust && method == "pseudo") { insight::format_alert("`robust` standardization not available for `pseudo` method.") robust <- FALSE } ## Get scaling factors deviations <- standardize_info( model, robust = robust, include_pseudo = method == "pseudo", two_sd = two_sd, model_info = mi ) i_missing <- setdiff(seq_len(nrow(pars)), seq_len(nrow(deviations))) unstd <- pars if (length(i_missing)) { deviations[i_missing, ] <- NA } if (method == "basic") { # nolint col_dev_resp <- "Deviation_Response_Basic" col_dev_pred <- "Deviation_Basic" } else if (method == "posthoc") { col_dev_resp <- "Deviation_Response_Basic" col_dev_pred <- "Deviation_Smart" } else if (method == "smart") { col_dev_resp <- "Deviation_Response_Smart" col_dev_pred <- "Deviation_Smart" } else if (method == "pseudo") { col_dev_resp <- "Deviation_Response_Pseudo" col_dev_pred <- "Deviation_Pseudo" } else if (method == "sdy") { col_dev_resp <- "Deviation_Response_Basic" col_dev_pred <- "Deviation_SDy" include_response <- FALSE } else { insight::format_error("`method` must be one of \"basic\", \"posthoc\", \"smart\", \"pseudo\" or \"sdy\".") } .dev_pred <- deviations[[col_dev_pred]] .dev_resp <- deviations[[col_dev_resp]] if (!include_response) .dev_resp <- 1 .dev_factor <- .dev_pred / .dev_resp # Sapply standardization pars[, colnames(pars) %in% .col_2_scale] <- lapply( pars[, colnames(pars) %in% .col_2_scale, drop = FALSE], function(x) { if (exponentiate) { if (method == "sdy") { exp(x * .dev_factor) } else { x^.dev_factor } } else { x * .dev_factor } } ) to_complete <- apply(pars[, colnames(pars) %in% .col_2_scale], 1, anyNA) if (length(i_missing) || any(to_complete)) { i_missing <- union(i_missing, which(to_complete)) pars[i_missing, colnames(pars) %in% .col_2_scale] <- unstd[i_missing, colnames(pars) %in% .col_2_scale] } attr(pars, "std_method") <- method attr(pars, "two_sd") <- two_sd attr(pars, "robust") <- robust pars } #' @keywords internal .col_2_scale <- c("Coefficient", "Median", "Mean", "MAP", "SE", "CI_low", "CI_high") #' @keywords internal .cant_smart_or_posthoc <- function(method, model, mi, params) { if (method %in% c("smart", "posthoc")) { cant_posthocsmart <- FALSE if (mi$is_linear && colnames(stats::model.frame(model))[1] != insight::find_response(model)) { can_posthocsmart <- TRUE } # factors are allowed if (!cant_posthocsmart && !all(params == insight::clean_names(params) | grepl("(as.factor|factor)\\(", params))) { cant_posthocsmart <- TRUE } if (cant_posthocsmart) { insight::format_alert( "Method `", method, "` does not currently support models with transformed parameters.", "Reverting to `basic` method. Concider using the `refit` method directly." ) method <- "basic" } } method } #' @keywords internal .should_pseudo <- function(method, model, mi) { if (method == "pseudo" && !(mi$is_mixed && length(insight::find_random(model)$random) == 1)) { insight::format_alert( "`pseudo` method only available for 2-level (G)LMMs.", "Setting method to `basic`." ) method <- "basic" } method } #' @keywords internal .safe_to_standardize_response <- function(info, verbose = TRUE) { if (is.null(info)) { if (verbose) { insight::format_warning( "Unable to verify if response should not be standardized.", "Response will be standardized." ) } return(TRUE) } # check if model has a response variable that should not be standardized. info$is_linear && info$family != "inverse.gaussian" && !info$is_survival && !info$is_censored # # alternative would be to keep something like: # !info$is_count && # !info$is_ordinal && # !info$is_multinomial && # !info$is_beta && # !info$is_censored && # !info$is_binomial && # !info$is_survival # # And then treating response for "Gamma()" or "inverse.gaussian" similar to # # log-terms... } #' @keywords internal .get_model_info <- function(model, model_info = NULL, ...) { if (is.null(model_info)) model_info <- insight::model_info(model, verbose = FALSE) model_info } parameters/R/methods_bayestestR.R0000644000176200001440000000007214037763760016625 0ustar liggesusers#' @importFrom bayestestR ci #' @export bayestestR::ci parameters/R/methods_selection.R0000644000176200001440000000667315036175633016476 0ustar liggesusers#' @export model_parameters.selection <- function(model, ci = 0.95, component = "all", bootstrap = FALSE, iterations = 1000, standardize = NULL, exponentiate = FALSE, p_adjust = NULL, include_info = getOption("parameters_info", FALSE), keep = NULL, drop = NULL, verbose = TRUE, ...) { component <- insight::validate_argument( component, c("all", "selection", "outcome", "auxiliary") ) out <- .model_parameters_generic( model = model, ci = ci, bootstrap = bootstrap, iterations = iterations, component = component, merge_by = c("Parameter", "Component"), standardize = standardize, exponentiate = exponentiate, keep_parameters = keep, drop_parameters = drop, include_info = include_info, p_adjust = p_adjust, ... ) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(model)) out } #' @export p_value.selection <- function(model, component = "all", ...) { component <- insight::validate_argument( component, c("all", "selection", "outcome", "auxiliary") ) s <- summary(model) rn <- row.names(s$estimate) estimates <- as.data.frame(s$estimate, row.names = FALSE) params <- data.frame( Parameter = rn, p = estimates[[4]], Component = "selection", stringsAsFactors = FALSE, row.names = NULL ) params$Component[s$param$index$errTerms] <- "auxiliary" params$Component[s$param$index$outcome] <- "outcome" if (component != "all") { params <- params[params$Component == component, , drop = FALSE] } insight::text_remove_backticks(params, verbose = FALSE) } #' @export standard_error.selection <- function(model, component = "all", ...) { component <- insight::validate_argument( component, c("all", "selection", "outcome", "auxiliary") ) s <- summary(model) rn <- row.names(s$estimate) estimates <- as.data.frame(s$estimate, row.names = FALSE) params <- data.frame( Parameter = rn, SE = estimates[[2]], Component = "selection", stringsAsFactors = FALSE, row.names = NULL ) params$Component[s$param$index$errTerms] <- "auxiliary" params$Component[s$param$index$outcome] <- "outcome" if (component != "all") { params <- params[params$Component == component, , drop = FALSE] } insight::text_remove_backticks(params, verbose = FALSE) } #' @export simulate_model.selection <- function(model, iterations = 1000, component = "all", ...) { component <- insight::validate_argument( component, c("all", "selection", "outcome", "auxiliary") ) out <- .simulate_model(model, iterations, component = component, effects = "fixed", ...) class(out) <- c("parameters_simulate_model", class(out)) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(model)) out } #' @export ci.selection <- ci.default parameters/R/methods_bife.R0000644000176200001440000000157714717114773015417 0ustar liggesusers#' @export standard_error.bife <- function(model, ...) { cs <- summary(model) se <- cs$cm[, 2] .data_frame( Parameter = .remove_backticks_from_string(rownames(cs$cm)), SE = as.vector(se) ) } #' @export p_value.bife <- function(model, ...) { cs <- summary(model) p <- cs$cm[, 4] .data_frame( Parameter = .remove_backticks_from_string(rownames(cs$cm)), p = as.vector(p) ) } #' @export model_parameters.bifeAPEs <- function(model, ...) { est <- model[["delta"]] se <- sqrt(diag(model[["vcov"]])) z <- est / se p <- 2 * stats::pnorm(-abs(z)) nms <- names(est) out <- data.frame(nms, est, se, z, p) colnames(out) <- c("Parameter", "Coefficient", "Std. error", "z value", "p") rownames(out) <- NULL out <- as.data.frame(out) class(out) <- c("parameters_model", "see_parameters_model", class(out)) out } parameters/R/equivalence_test.R0000644000176200001440000010610415066721001016301 0ustar liggesusers#' @importFrom bayestestR equivalence_test #' @export bayestestR::equivalence_test #' @title Equivalence test #' #' @description Compute the (conditional) equivalence test for frequentist models. #' #' @param x A statistical model. #' @param range The range of practical equivalence of an effect. May be #' `"default"`, to automatically define this range based on properties of the #' model's data. #' @param ci Confidence Interval (CI) level. Default to `0.95` (`95%`). #' @param rule Character, indicating the rules when testing for practical #' equivalence. Can be `"bayes"`, `"classic"` or `"cet"`. See 'Details'. #' @param verbose Toggle warnings and messages. #' @param ... Arguments passed to or from other methods. #' @inheritParams model_parameters.glmmTMB #' @inheritParams p_value #' #' @seealso For more details, see [bayestestR::equivalence_test()]. Further #' readings can be found in the references. See also [`p_significance()`] for #' a unidirectional equivalence test. #' #' @details In classical null hypothesis significance testing (NHST) within a #' frequentist framework, it is not possible to accept the null hypothesis, H0 - #' unlike in Bayesian statistics, where such probability statements are #' possible. "[...] one can only reject the null hypothesis if the test #' statistics falls into the critical region(s), or fail to reject this #' hypothesis. In the latter case, all we can say is that no significant effect #' was observed, but one cannot conclude that the null hypothesis is true." #' (_Pernet 2017_). One way to address this issues without Bayesian methods is #' *Equivalence Testing*, as implemented in `equivalence_test()`. While you #' either can reject the null hypothesis or claim an inconclusive result in #' NHST, the equivalence test - according to _Pernet_ - adds a third category, #' *"accept"*. Roughly speaking, the idea behind equivalence testing in a #' frequentist framework is to check whether an estimate and its uncertainty #' (i.e. confidence interval) falls within a region of "practical equivalence". #' Depending on the rule for this test (see below), statistical significance #' does not necessarily indicate whether the null hypothesis can be rejected or #' not, i.e. the classical interpretation of the p-value may differ from the #' results returned from the equivalence test. #' #' ## Calculation of equivalence testing #' - "bayes" - Bayesian rule (Kruschke 2018) #' #' This rule follows the "HDI+ROPE decision rule" (_Kruschke, 2014, 2018_) used #' for the [`Bayesian counterpart()`][bayestestR::equivalence_test]. This #' means, if the confidence intervals are completely outside the ROPE, the #' "null hypothesis" for this parameter is "rejected". If the ROPE #' completely covers the CI, the null hypothesis is accepted. Else, it's #' undecided whether to accept or reject the null hypothesis. Desirable #' results are low proportions inside the ROPE (the closer to zero the #' better). #' #' - "classic" - The TOST rule (Lakens 2017) #' #' This rule follows the "TOST rule", i.e. a two one-sided test procedure #' (_Lakens 2017_). Following this rule... #' - practical equivalence is assumed (i.e. H0 *"accepted"*) when the narrow #' confidence intervals are completely inside the ROPE, no matter if the #' effect is statistically significant or not; #' - practical equivalence (i.e. H0) is *rejected*, when the coefficient is #' statistically significant, both when the narrow confidence intervals #' (i.e. `1-2*alpha`) include or exclude the the ROPE boundaries, but the #' narrow confidence intervals are *not fully covered* by the ROPE; #' - else the decision whether to accept or reject practical equivalence is #' undecided (i.e. when effects are *not* statistically significant *and* #' the narrow confidence intervals overlaps the ROPE). #' #' - "cet" - Conditional Equivalence Testing (Campbell/Gustafson 2018) #' #' The Conditional Equivalence Testing as described by _Campbell and #' Gustafson 2018_. According to this rule, practical equivalence is #' rejected when the coefficient is statistically significant. When the #' effect is *not* significant and the narrow confidence intervals are #' completely inside the ROPE, we accept (i.e. assume) practical equivalence, #' else it is undecided. #' #' ## Levels of Confidence Intervals used for Equivalence Testing #' For `rule = "classic"`, "narrow" confidence intervals are used for #' equivalence testing. "Narrow" means, the the intervals is not 1 - alpha, #' but 1 - 2 * alpha. Thus, if `ci = .95`, alpha is assumed to be 0.05 #' and internally a ci-level of 0.90 is used. `rule = "cet"` uses #' both regular and narrow confidence intervals, while `rule = "bayes"` #' only uses the regular intervals. #' #' ## p-Values #' The equivalence p-value is the area of the (cumulative) confidence #' distribution that is outside of the region of equivalence. It can be #' interpreted as p-value for *rejecting* the alternative hypothesis and #' *accepting* the "null hypothesis" (i.e. assuming practical equivalence). That #' is, a high p-value means we reject the assumption of practical equivalence #' and accept the alternative hypothesis. #' #' ## Second Generation p-Value (SGPV) #' Second generation p-values (SGPV) were proposed as a statistic that #' represents _the proportion of data-supported hypotheses that are also null #' hypotheses_ _(Blume et al. 2018, Lakens and Delacre 2020)_. It represents the #' proportion of the _full_ confidence interval range (assuming a normally or #' t-distributed, equal-tailed interval, based on the model) that is inside the #' ROPE. The SGPV ranges from zero to one. Higher values indicate that the #' effect is more likely to be practically equivalent ("not of interest"). #' #' Note that the assumed interval, which is used to calculate the SGPV, is an #' estimation of the _full interval_ based on the chosen confidence level. For #' example, if the 95% confidence interval of a coefficient ranges from -1 to 1, #' the underlying _full (normally or t-distributed) interval_ approximately #' ranges from -1.9 to 1.9, see also following code: #' #' ``` #' # simulate full normal distribution #' out <- bayestestR::distribution_normal(10000, 0, 0.5) #' # range of "full" distribution #' range(out) #' # range of 95% CI #' round(quantile(out, probs = c(0.025, 0.975)), 2) #' ``` #' #' This ensures that the SGPV always refers to the general compatible parameter #' space of coefficients, independent from the confidence interval chosen for #' testing practical equivalence. Therefore, the SGPV of the _full interval_ is #' similar to the ROPE coverage of Bayesian equivalence tests, see following #' code: #' #' ``` #' library(bayestestR) #' library(brms) #' m <- lm(mpg ~ gear + wt + cyl + hp, data = mtcars) #' m2 <- brm(mpg ~ gear + wt + cyl + hp, data = mtcars) #' # SGPV for frequentist models #' equivalence_test(m) #' # similar to ROPE coverage of Bayesian models #' equivalence_test(m2) #' # similar to ROPE coverage of simulated draws / bootstrap samples #' equivalence_test(simulate_model(m)) #' ``` #' #' ## ROPE range #' Some attention is required for finding suitable values for the ROPE limits #' (argument `range`). See 'Details' in [bayestestR::rope_range()] #' for further information. #' #' @inheritSection model_parameters Statistical inference - how to quantify evidence #' #' @note There is also a [`plot()`-method](https://easystats.github.io/see/articles/parameters.html) #' implemented in the [**see**-package](https://easystats.github.io/see/). #' #' @references #' #' - Amrhein, V., Korner-Nievergelt, F., and Roth, T. (2017). The earth is #' flat (p > 0.05): Significance thresholds and the crisis of unreplicable #' research. PeerJ, 5, e3544. \doi{10.7717/peerj.3544} #' #' - Blume, J. D., D'Agostino McGowan, L., Dupont, W. D., & Greevy, R. A. #' (2018). Second-generation p-values: Improved rigor, reproducibility, & #' transparency in statistical analyses. PLOS ONE, 13(3), e0188299. #' https://doi.org/10.1371/journal.pone.0188299 #' #' - Campbell, H., & Gustafson, P. (2018). Conditional equivalence #' testing: An alternative remedy for publication bias. PLOS ONE, 13(4), #' e0195145. doi: 10.1371/journal.pone.0195145 #' #' - Greenland S, Rafi Z, Matthews R, Higgs M. To Aid Scientific Inference, #' Emphasize Unconditional Compatibility Descriptions of Statistics. (2022) #' https://arxiv.org/abs/1909.08583v7 (Accessed November 10, 2022) #' #' - Kruschke, J. K. (2014). Doing Bayesian data analysis: A tutorial with #' R, JAGS, and Stan. Academic Press #' #' - Kruschke, J. K. (2018). Rejecting or accepting parameter values in #' Bayesian estimation. Advances in Methods and Practices in Psychological #' Science, 1(2), 270-280. doi: 10.1177/2515245918771304 #' #' - Lakens, D. (2017). Equivalence Tests: A Practical Primer for t Tests, #' Correlations, and Meta-Analyses. Social Psychological and Personality #' Science, 8(4), 355–362. doi: 10.1177/1948550617697177 #' #' - Lakens, D. (2024). Improving Your Statistical Inferences (Version v1.5.1). #' Retrieved from https://lakens.github.io/statistical_inferences/. #' \doi{10.5281/ZENODO.6409077} #' #' - Lakens, D., and Delacre, M. (2020). Equivalence Testing and the Second #' Generation P-Value. Meta-Psychology, 4. #' https://doi.org/10.15626/MP.2018.933 #' #' - Lakens, D., Scheel, A. M., and Isager, P. M. (2018). Equivalence Testing #' for Psychological Research: A Tutorial. Advances in Methods and Practices #' in Psychological Science, 1(2), 259–269. #' #' - Makowski, D., Ben-Shachar, M. S., Chen, S. H. A., and Lüdecke, D. (2019). #' Indices of Effect Existence and Significance in the Bayesian Framework. #' Frontiers in Psychology, 10, 2767. \doi{10.3389/fpsyg.2019.02767} #' #' - Pernet, C. (2017). Null hypothesis significance testing: A guide to #' commonly misunderstood concepts and recommendations for good practice. #' F1000Research, 4, 621. doi: 10.12688/f1000research.6963.5 #' #' - Rafi Z, Greenland S. Semantic and cognitive tools to aid statistical #' science: replace confidence and significance by compatibility and surprise. #' BMC Medical Research Methodology (2020) 20:244. #' #' - Schweder T. Confidence is epistemic probability for empirical science. #' Journal of Statistical Planning and Inference (2018) 195:116–125. #' \doi{10.1016/j.jspi.2017.09.016} #' #' - Schweder T, Hjort NL. Frequentist analogues of priors and posteriors. #' In Stigum, B. (ed.), Econometrics and the Philosophy of Economics: Theory #' Data Confrontation in Economics, pp. 285-217. Princeton University Press, #' Princeton, NJ, 2003 #' #' - Vos P, Holbert D. Frequentist statistical inference without repeated sampling. #' Synthese 200, 89 (2022). \doi{10.1007/s11229-022-03560-x} #' #' @return A data frame. #' @examplesIf requireNamespace("sandwich") #' data(qol_cancer) #' model <- lm(QoL ~ time + age + education, data = qol_cancer) #' #' # default rule #' equivalence_test(model) #' #' # using heteroscedasticity-robust standard errors #' equivalence_test(model, vcov = "HC3") #' #' # conditional equivalence test #' equivalence_test(model, rule = "cet") #' #' # plot method #' if (require("see", quietly = TRUE)) { #' result <- equivalence_test(model) #' plot(result) #' } #' @export equivalence_test.lm <- function(x, range = "default", ci = 0.95, rule = "classic", effects = "fixed", vcov = NULL, vcov_args = NULL, verbose = TRUE, ...) { rule <- insight::validate_argument(tolower(rule), c("bayes", "classic", "cet")) out <- .equivalence_test_frequentist( x, range = range, ci = ci, rule = rule, vcov = vcov, vcov_args = vcov_args, verbose, ... ) if (is.null(attr(out, "pretty_names", exact = TRUE))) { attr(out, "pretty_names") <- format_parameters(x) } attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) attr(out, "rule") <- rule class(out) <- c("equivalence_test_lm", "see_equivalence_test_lm", class(out)) out } # standard models, only fixed effects ---------------------- #' @export equivalence_test.glm <- equivalence_test.lm #' @export equivalence_test.wbm <- equivalence_test.lm #' @export equivalence_test.lme <- equivalence_test.lm #' @export equivalence_test.gee <- equivalence_test.lm #' @export equivalence_test.gls <- equivalence_test.lm #' @export equivalence_test.feis <- equivalence_test.lm #' @export equivalence_test.felm <- equivalence_test.lm #' @export equivalence_test.mixed <- equivalence_test.lm #' @export equivalence_test.hurdle <- equivalence_test.lm #' @export equivalence_test.zeroinfl <- equivalence_test.lm #' @export equivalence_test.rma <- equivalence_test.lm # mixed models, also random effects ---------------------- #' @export equivalence_test.merMod <- function(x, range = "default", ci = 0.95, rule = "classic", effects = "fixed", vcov = NULL, vcov_args = NULL, verbose = TRUE, ...) { # ==== argument matching ==== rule <- insight::validate_argument(tolower(rule), c("bayes", "classic", "cet")) effects <- insight::validate_argument(effects, c("fixed", "random")) # ==== equivalent testing for fixed or random effects ==== if (effects == "fixed") { out <- .equivalence_test_frequentist( x, range = range, ci = ci, rule = rule, vcov = vcov, vcov_args = vcov_args, verbose, ... ) } else { out <- .equivalence_test_frequentist_random(x, range, ci, rule, verbose, ...) } # ==== result ==== if (is.null(attr(out, "pretty_names", exact = TRUE))) { attr(out, "pretty_names") <- format_parameters(x) } attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) attr(out, "rule") <- rule class(out) <- c("equivalence_test_lm", "see_equivalence_test_lm", class(out)) out } #' @export equivalence_test.glmmTMB <- equivalence_test.merMod #' @export equivalence_test.MixMod <- equivalence_test.merMod # modelbased ------------------------------ #' @export equivalence_test.estimate_means <- function( x, range = "default", ci = 0.95, rule = "classic", vcov = NULL, vcov_args = NULL, verbose = TRUE, ... ) { # ==== define rope range ==== range <- .check_rope_range(x, range, verbose) if (length(ci) > 1) { insight::format_alert("`ci` may only be of length 1. Using first ci-value now.") ci <- ci[1] } # ==== check degrees of freedom ==== dof <- unique(insight::get_df(x)) if (length(dof) > 1) { dof <- Inf } # ==== requested confidence intervals ==== conf_int <- as.data.frame(t(x[c("CI_low", "CI_high")])) # ==== the "narrower" intervals (1-2*alpha) for CET-rules. ==== alpha <- 1 - ci insight::check_if_installed("modelbased") # we need to call the modelbased function again, so get the call # modify CI and evaluate that call cl <- insight::get_call(x) cl$ci <- ci - alpha x2 <- eval(cl) conf_int2 <- as.data.frame(t(x2[c("CI_low", "CI_high")])) # ==== equivalence test for each parameter ==== l <- Map( function(ci_wide, ci_narrow) { .equivalence_test_numeric( ci = ci, ci_wide, ci_narrow, range_rope = range, rule = rule, dof = dof, verbose = verbose ) }, conf_int, conf_int2 ) dat <- do.call(rbind, l) params <- insight::get_parameters(x) out <- data.frame( Parameter = params$Parameter, CI = ifelse(rule == "bayes", ci, ci - alpha), dat, stringsAsFactors = FALSE ) # ==== (adjusted) p-values for tests ==== if (!inherits(x, "estimate_means")) { out$p <- .add_p_to_equitest(x, ci, range, vcov = vcov, vcov_args = vcov_args, ...) } attr(out, "rope") <- range attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(x)) attr(out, "rule") <- rule class(out) <- c("equivalence_test_lm", "see_equivalence_test_lm", class(out)) out } #' @export equivalence_test.estimate_contrasts <- equivalence_test.estimate_means #' @export equivalence_test.estimate_slopes <- equivalence_test.estimate_means # Special classes ------------------------- #' @export equivalence_test.parameters_simulate_model <- function(x, range = "default", ci = 0.95, verbose = TRUE, ...) { # ==== retrieve model, to define rope range for simulated model parameters ==== model <- .get_object(x) if (all(range == "default") && !is.null(model)) { range <- bayestestR::rope_range(model, verbose = verbose) } else if (!all(is.numeric(range)) || length(range) != 2) { insight::format_error( "`range` should be \"default\" or a vector of 2 numeric values (e.g., `c(-0.1, 0.1)`)." ) } # ==== classical equivalent testing for data frames ==== out <- equivalence_test(as.data.frame(x), range = range, ci = ci, verbose = verbose, ...) if (is.null(attr(out, "pretty_names", exact = TRUE))) { attr(out, "pretty_names") <- format_parameters(x) } attr(out, "object_name") <- attr(x, "object_name") attr(out, "data") <- x class(out) <- unique(c("equivalence_test", "see_equivalence_test", "equivalence_test_simulate_model", class(out))) out } #' @export equivalence_test.parameters_model <- function(x, range = "default", ci = 0.95, rule = "classic", verbose = TRUE, ...) { model <- .get_object(x) equivalence_test(x = model, range = range, ci = ci, rule = rule, verbose = verbose, ...) } # helper ------------------- #' @keywords internal .check_rope_range <- function(x, range, verbose) { # for modelbased-objects, we extract the model to define the rope range if (inherits(x, c("estimate_means", "estimate_contrasts", "estimate_slopes"))) { x <- .safe(insight::get_model(x)) # if not successful, return defaults if (is.null(x)) { return(c(-1, 1)) } } if (all(range == "default")) { range <- bayestestR::rope_range(x, verbose = verbose) if (is.list(range)) { range <- range[[which.max(sapply(range, diff))]] } } else if (!all(is.numeric(range)) || length(range) != 2) { insight::format_error( "`range` should be \"default\" or a vector of 2 numeric values (e.g., `c(-0.1, 0.1)`)." ) } range } #' @keywords internal .equivalence_test_frequentist <- function(x, range = "default", ci = 0.95, rule = "classic", vcov = NULL, vcov_args = NULL, verbose = TRUE, ...) { # ==== define rope range ==== range <- .check_rope_range(x, range, verbose) if (length(ci) > 1) { insight::format_alert("`ci` may only be of length 1. Using first ci-value now.") ci <- ci[1] } # ==== check degrees of freedom ==== df_column <- grep("(df|df_error)", colnames(x)) if (length(df_column) > 0) { dof <- unique(x[[df_column]]) if (length(dof) > 1) { dof <- Inf } } else { dof <- Inf } # ==== requested confidence intervals ==== params <- conf_int <- .ci_generic(x, ci = ci, vcov = vcov, vcov_args = vcov_args, ...) conf_int <- as.data.frame(t(conf_int[, c("CI_low", "CI_high")])) # ==== the "narrower" intervals (1-2*alpha) for CET-rules. ==== alpha <- 1 - ci conf_int2 <- .ci_generic(x, ci = (ci - alpha), vcov = vcov, vcov_args = vcov_args, ...) conf_int2 <- as.data.frame(t(conf_int2[, c("CI_low", "CI_high")])) # ==== equivalence test for each parameter ==== l <- Map( function(ci_wide, ci_narrow) { .equivalence_test_numeric( ci = ci, ci_wide, ci_narrow, range_rope = range, rule = rule, dof = dof, verbose = verbose ) }, conf_int, conf_int2 ) dat <- do.call(rbind, l) if ("Component" %in% colnames(params)) dat$Component <- params$Component out <- data.frame( Parameter = params$Parameter, CI = ifelse(rule == "bayes", ci, ci - alpha), dat, stringsAsFactors = FALSE ) # ==== (adjusted) p-values for tests ==== out$p <- .add_p_to_equitest(x, ci, range, vcov = vcov, vcov_args = vcov_args, ...) attr(out, "rope") <- range out } #' @keywords internal .equivalence_test_frequentist_random <- function(x, range = "default", ci = 0.95, rule = "classic", verbose = TRUE, ...) { if (all(range == "default")) { range <- bayestestR::rope_range(x, verbose = verbose) } else if (!all(is.numeric(range)) || length(range) != 2) { insight::format_error( "`range` should be \"default\" or a vector of 2 numeric values (e.g., `c(-0.1, 0.1)`)." ) } if (length(ci) > 1) { if (isTRUE(verbose)) { insight::format_alert("`ci` may only be of length 1. Using first ci-value now.") } ci <- ci[1] } params <- insight::get_parameters(x, effects = "random", component = "conditional", verbose = FALSE) se <- standard_error(x, effects = "random", component = "conditional") alpha <- (1 + ci) / 2 fac <- stats::qnorm(alpha) alpha_narrow <- (1 + ci - (1 - ci)) / 2 fac_narrow <- stats::qnorm(alpha_narrow) out <- do.call(rbind, lapply(names(params), function(np) { est <- params[[np]][, "(Intercept)"] std_err <- se[[np]][, "(Intercept)"] d <- data.frame( Parameter = rownames(params[[np]]), Estimate = est, CI = ifelse(rule == "bayes", ci, ci - (1 - ci)), Group = np, stringsAsFactors = FALSE ) conf_int <- as.data.frame(t(data.frame( CI_low = est - std_err * fac, CI_high = est + std_err * fac ))) conf_int2 <- as.data.frame(t(data.frame( CI_low = est - std_err * fac_narrow, CI_high = est + std_err * fac_narrow ))) l <- Map( function(ci_wide, ci_narrow) { .equivalence_test_numeric( ci = ci, ci_wide, ci_narrow, range_rope = range, rule = rule, verbose = verbose ) }, conf_int, conf_int2 ) dat <- do.call(rbind, l) cbind(d, dat) })) attr(out, "rope") <- range out } #' @keywords internal .equivalence_test_numeric <- function(ci = 0.95, ci_wide, ci_narrow, range_rope, rule, dof = Inf, verbose) { final_ci <- NULL # ==== HDI+ROPE decision rule, by Kruschke ==== if (rule == "bayes") { final_ci <- ci_wide if (min(ci_wide) > max(range_rope) || max(ci_wide) < min(range_rope)) { decision <- "Rejected" } else if (max(ci_wide) <= max(range_rope) && min(ci_wide) >= min(range_rope)) { decision <- "Accepted" } else { decision <- "Undecided" } } # ==== Lakens' rule ==== if (rule == "classic") { final_ci <- ci_narrow if (all(ci_narrow < max(range_rope)) && all(ci_narrow > min(range_rope))) { # narrow CI is fully inside ROPE - always accept decision <- "Accepted" } else if (min(ci_narrow) < 0 && max(ci_narrow) > 0) { # non-significant results - undecided decision <- "Undecided" } else { decision <- "Rejected" } } # ==== CET rule ==== if (rule == "cet") { final_ci <- ci_narrow # significant result? if (min(ci_wide) > 0 || max(ci_wide) < 0) { decision <- "Rejected" # non-significant results, all narrow CI inside ROPE } else if (all(ci_narrow < max(range_rope)) && all(ci_narrow > min(range_rope))) { decision <- "Accepted" } else { decision <- "Undecided" } } data.frame( CI_low = final_ci[1], CI_high = final_ci[2], SGPV = .rope_coverage(ci = ci, range_rope, ci_range = final_ci, dof = dof), ROPE_low = range_rope[1], ROPE_high = range_rope[2], ROPE_Equivalence = decision, stringsAsFactors = FALSE ) } # helper --------------------- # this function simply takes the length of the range and calculates the proportion # of that range that is inside the rope. However, this assumed a "flat", i.e. # uniformly distributed interval, which is not accurate for standard confidence # intervals. thus, we no longer use this function, but switch to ".rope_coverage()". .sgpv <- function(range_rope, ci) { diff_rope <- abs(diff(range_rope)) diff_ci <- abs(diff(ci)) # inside? if (min(ci) >= min(range_rope) && max(ci) <= max(range_rope)) { coverage <- 1 # outside? } else if (max(ci) < min(range_rope) || min(ci) > max(range_rope)) { coverage <- 0 # CI covers completely rope? } else if (max(ci) > max(range_rope) && min(ci) < min(range_rope)) { coverage <- diff_rope / diff_ci # CI inside rope and outside max rope? } else if (min(ci) >= min(range_rope) && max(ci) > max(range_rope)) { diff_in_rope <- max(range_rope) - min(ci) coverage <- diff_in_rope / diff_ci # CI inside rope and outside min rope? } else if (max(ci) <= max(range_rope) && min(ci) < min(range_rope)) { diff_in_rope <- max(ci) - min(range_rope) coverage <- diff_in_rope / diff_ci } coverage } # this function simulates a normal distribution, which approximately has the # same range / limits as the confidence interval, thus indeed representing a # normally distributed confidence interval. We then calculate the probability # mass of this interval that is inside the ROPE. .rope_coverage <- function(ci = 0.95, range_rope, ci_range, dof = Inf) { out <- .generate_posterior_from_ci(ci, ci_range, dof = dof) # compare: ci_range and range(out) # The SGPV refers to the proportion of the confidence interval inside the # full ROPE - thus, we set ci = 1 here rc <- bayestestR::rope(out, range = range_rope, ci = 1) rc$ROPE_Percentage } .generate_posterior_from_ci <- function(ci = 0.95, ci_range, dof = Inf, precision = 10000) { # this function creates an approximate normal distribution that covers the # CI-range, i.e. we "simulate" a posterior distribution from a frequentist CI # sanity check - dof argument if (is.null(dof)) { dof <- Inf } # first we need the range of the CI (in units), also to calculate the mean of # the normal distribution diff_ci <- abs(diff(ci_range)) mean_dist <- ci_range[2] - (diff_ci / 2) # then we need the critical values of the quantiles from the CI range z_value <- stats::qt((1 + ci) / 2, df = dof) # the range of Z-scores (from lower to upper quantile) gives us the range of # the provided interval in terms of standard deviations. now we divide the # known range of the provided CI (in units) by the z-score-range, which will # give us the standard deviation of the distribution. sd_dist <- diff_ci / diff(c(-1 * z_value, z_value)) # generate normal-distribution if we don't have t-distribution, or if # we don't have necessary packages installed if (is.infinite(dof) || !insight::check_if_installed("distributional", quietly = TRUE)) { # tell user to install "distributional" if (!is.infinite(dof)) { insight::format_alert("For models with only few degrees of freedom, install the {distributional} package to increase accuracy of `p_direction()`, `p_significance()` and `equivalence_test()`.") # nolint } # we now know all parameters (mean and sd) to simulate a normal distribution bayestestR::distribution_normal(n = precision, mean = mean_dist, sd = sd_dist) } else { insight::check_if_installed("distributional") out <- distributional::dist_student_t(df = dof, mu = mean_dist, sigma = sd_dist) sort(unlist(distributional::generate(out, times = precision), use.names = FALSE)) } } .add_p_to_equitest <- function(model, ci, range, vcov = NULL, vcov_args = NULL, ...) { tryCatch( { params <- insight::get_parameters(model) # remove dispersion components if (!is.null(params$Component)) { params <- params[params$Component != "dispersion", ] } # degrees of freedom dof <- insight::get_df(x = model, type = "wald") # mu params$mu <- params$Estimate * -1 # se se <- standard_error(model, vcov = vcov, vcov_args = vcov_args, ...) # remove dispersion components if (!is.null(se$Component)) { se <- se[se$Component != "dispersion", ] } stats::pt((range[1] - params$mu) / se$SE, df = dof, lower.tail = TRUE) + stats::pt((range[2] - params$mu) / se$SE, df = dof, lower.tail = FALSE) }, error = function(e) { NULL } ) } # methods ---------------- #' @export format.equivalence_test_lm <- function(x, digits = 2, ci_digits = digits, p_digits = 3, ci_width = NULL, ci_brackets = NULL, format = "text", zap_small = FALSE, ...) { # default brackets are parenthesis for HTML / MD if ((is.null(ci_brackets) || isTRUE(ci_brackets)) && (identical(format, "html") || identical(format, "markdown"))) { ci_brackets <- c("(", ")") } else if (is.null(ci_brackets) || isTRUE(ci_brackets)) { ci_brackets <- c("[", "]") } # main formatting out <- insight::format_table( x, digits = digits, ci_digits = ci_digits, p_digits = p_digits, ci_width = ci_width, ci_brackets = ci_brackets, zap_small = zap_small, ... ) # format column names colnames(out)[which(colnames(out) == "Equivalence (ROPE)")] <- "Equivalence" out$ROPE <- NULL # only show supported components if ("Component" %in% colnames(out)) { out <- out[out$Component %in% c("conditional", "count"), ] } out } #' @export print.equivalence_test_lm <- function(x, digits = 2, ci_digits = digits, p_digits = 3, ci_brackets = NULL, zap_small = FALSE, ...) { orig_x <- x rule <- attributes(x)$rule if (is.null(rule)) { insight::print_color("# Test for Practical Equivalence\n\n", "blue") } else if (rule == "cet") { insight::print_color("# Conditional Equivalence Testing\n\n", "blue") } else if (rule == "classic") { insight::print_color("# TOST-test for Practical Equivalence\n\n", "blue") } else { insight::print_color("# Test for Practical Equivalence\n\n", "blue") } .rope <- attr(x, "rope", exact = TRUE) cat(sprintf(" ROPE: [%.*f %.*f]\n\n", digits, .rope[1], digits, .rope[2])) # formatting x <- format(x, digits = digits, ci_digits = ci_digits, p_digits = p_digits, ci_width = "auto", ci_brackets = ci_brackets, format = "text", zap_small = zap_small, ... ) if ("Group" %in% colnames(x)) { out <- split(x, x$Group) for (i in names(out)) { insight::print_color(sprintf("Group: %s\n\n", i), "red") cat(insight::export_table(out[[i]])) } } else { cat(insight::export_table(x)) } invisible(orig_x) } #' @export plot.equivalence_test_lm <- function(x, ...) { insight::check_if_installed("see") NextMethod() } # helper for print_html / print_md -------------------- .print_equivalence_test_lm <- function( x, digits = 2, ci_brackets = c("(", ")"), zap_small = FALSE, format = "markdown", ... ) { rule <- attributes(x)$rule rope <- attributes(x)$rope if (is.null(rule)) { table_caption <- "Test for Practical Equivalence" } else if (rule == "cet") { table_caption <- "Conditional Equivalence Testing" } else if (rule == "classic") { table_caption <- "TOST-test for Practical Equivalence" } else { table_caption <- "Test for Practical Equivalence" } if ("Component" %in% colnames(x)) { x <- x[x$Component %in% c("conditional", "count"), ] } formatted_table <- insight::format_table( x, pretty_names = TRUE, digits = digits, ci_width = NULL, ci_brackets = ci_brackets, zap_small = zap_small, ... ) colnames(formatted_table)[which(colnames(formatted_table) == "Equivalence (ROPE)")] <- "H0" formatted_table$ROPE <- NULL # col_order <- c("Parameter", "H0", "% in ROPE", colnames(formatted_table)[grepl(" CI$", colnames(formatted_table))]) # col_order <- c(col_order, setdiff(colnames(formatted_table), col_order)) # formatted_table <- formatted_table[col_order] # replace brackets by parenthesis if (!is.null(ci_brackets) && "Parameter" %in% colnames(formatted_table)) { formatted_table$Parameter <- gsub("[", ci_brackets[1], formatted_table$Parameter, fixed = TRUE) formatted_table$Parameter <- gsub("]", ci_brackets[2], formatted_table$Parameter, fixed = TRUE) } if (!is.null(rope)) { names(formatted_table)[names(formatted_table) == "% in ROPE"] <- sprintf( "%% in ROPE (%.*f, %.*f)", digits, rope[1], digits, rope[2] ) # nolint } insight::export_table( formatted_table, format = format, caption = table_caption, align = "firstleft", ... ) } parameters/R/methods_ivreg.R0000644000176200001440000000025214133222153015573 0ustar liggesusers#' @export p_value.ivreg <- p_value.default #' @export simulate_model.ivreg <- simulate_model.default #' @export standard_error.ivreg <- standard_error.default parameters/R/factor_analysis.R0000644000176200001440000000565015030725674016141 0ustar liggesusers#' @rdname principal_components #' @export factor_analysis <- function(x, ...) { UseMethod("factor_analysis") } #' @rdname principal_components #' @export factor_analysis.data.frame <- function(x, n = "auto", rotation = "oblimin", factor_method = "minres", sort = FALSE, threshold = NULL, standardize = FALSE, ...) { insight::check_if_installed("psych") # Standardize if (standardize) { x <- datawizard::standardize(x, ...) } # N factors n <- .get_n_factors( x, n = n, type = "FA", rotation = rotation ) # FA out <- model_parameters( psych::fa(x, nfactors = n, rotate = rotation, fm = factor_method, ...), threshold = threshold, sort = sort, ... ) attr(out, "dataset") <- x out } #' @rdname principal_components #' @export factor_analysis.matrix <- function(x, n = "auto", rotation = "oblimin", factor_method = "minres", n_obs = NULL, sort = FALSE, threshold = NULL, standardize = FALSE, ...) { # check if we have a square matrix. in this case, we assume that # the user wants to do a factor analysis on the correlation matrix if ((dim(x)[1] == dim(x)[2]) && is.null(n_obs)) { insight::format_error( "You provided a square matrix, which is assumed to be a correlation matrix. Please specify the number of observations with `n_obs`. If your matrix is not a correlation matrix, please provide a data frame instead." ) } # the default n.obs argument in `psych::fa()` is `NA`, so we change # our default `NULL` to `NA` to avoid errors n_matrix <- NULL if (is.null(n_obs)) { n_obs <- NA } else if (is.matrix(n_obs)) { n_matrix <- n_obs n_obs <- NA # check for correct dimensions if (dim(n_matrix)[1] != dim(x)[1] || dim(n_matrix)[2] != dim(x)[2]) { insight::format_error( "The provided `n_obs` matrix must have the same dimensions as the input matrix." ) } } factor_analysis.data.frame( x, n = n, rotation = rotation, factor_method = factor_method, sort = sort, threshold = threshold, standardize = standardize, n.obs = n_obs, np.obs = n_matrix, ... ) } .is_oblique_rotation <- function(rotation) { !is.null(rotation) && tolower(rotation) %in% c("promax", "oblimin", "simplimax", "bentlerQ", "geominQ", "biquartimin", "cluster") # nolint } parameters/R/dof.R0000644000176200001440000001333715073732442013527 0ustar liggesusers#' Degrees of Freedom (DoF) #' #' Estimate or extract degrees of freedom of models parameters. #' #' @param model A statistical model. #' @param method Type of approximation for the degrees of freedom. Can be one of #' the following: #' #' + `"residual"` (aka `"analytical"`) returns the residual degrees of #' freedom, which usually is what [`stats::df.residual()`] returns. If a #' model object has no method to extract residual degrees of freedom, these #' are calculated as `n-p`, i.e. the number of observations minus the number #' of estimated parameters. If residual degrees of freedom cannot be extracted #' by either approach, returns `Inf`. #' + `"wald"` returns residual (aka analytical) degrees of freedom for models #' with t-statistic, `1` for models with Chi-squared statistic, and `Inf` for #' all other models. Also returns `Inf` if residual degrees of freedom cannot #' be extracted. #' + `"normal"` always returns `Inf`. #' + `"model"` returns model-based degrees of freedom, i.e. the number of #' (estimated) parameters. #' + For mixed models, can also be `"ml1"` (or `"m-l-1"`, approximation of #' degrees of freedom based on a "m-l-1" heuristic as suggested by _Elff et #' al. 2019_) or `"between-within"` (or `"betwithin"`). #' + For mixed models of class `merMod`, `type` can also be `"satterthwaite"` #' or `"kenward-roger"` (or `"kenward"`). See 'Details'. #' #' Usually, when degrees of freedom are required to calculate p-values or #' confidence intervals, `type = "wald"` is likely to be the best choice in #' most cases. #' @param ... Currently not used. #' #' @note #' In many cases, `degrees_of_freedom()` returns the same as `df.residuals()`, #' or `n-k` (number of observations minus number of parameters). However, #' `degrees_of_freedom()` refers to the model's *parameters* degrees of freedom #' of the distribution for the related test statistic. Thus, for models with #' z-statistic, results from `degrees_of_freedom()` and `df.residuals()` differ. #' Furthermore, for other approximation methods like `"kenward"` or #' `"satterthwaite"`, each model parameter can have a different degree of #' freedom. #' #' @examplesIf require("lme4", quietly = TRUE) #' model <- lm(Sepal.Length ~ Petal.Length * Species, data = iris) #' dof(model) #' #' model <- glm(vs ~ mpg * cyl, data = mtcars, family = "binomial") #' dof(model) #' \donttest{ #' model <- lmer(Sepal.Length ~ Petal.Length + (1 | Species), data = iris) #' dof(model) #' #' if (require("rstanarm", quietly = TRUE)) { #' model <- stan_glm( #' Sepal.Length ~ Petal.Length * Species, #' data = iris, #' chains = 2, #' refresh = 0 #' ) #' dof(model) #' } #' } #' @export degrees_of_freedom <- function(model, method = "analytical", ...) { insight::get_df(x = model, type = method, ...) } #' @rdname degrees_of_freedom #' @export dof <- degrees_of_freedom # Helper, check args ------------------------------ .dof_method_ok <- function(model, method, type = "df_method", verbose = TRUE, ...) { if (is.null(method)) { return(TRUE) } method <- tolower(method) # exceptions 1 if (inherits(model, c("polr", "glm", "svyglm", "svyolr"))) { # fmt: skip if (method %in% c( "analytical", "any", "fit", "profile", "residual", "wald", "nokr", "likelihood", "normal" )) { return(TRUE) } else { if (verbose) { insight::format_alert(sprintf("`%s` must be one of \"wald\", \"residual\" or \"profile\". Using \"wald\" now.", type)) # nolint } return(FALSE) } } # exceptions 2 if (inherits(model, c("phylolm", "phyloglm"))) { if ( method %in% c("analytical", "any", "fit", "residual", "wald", "nokr", "normal", "boot") ) { return(TRUE) } else { if (verbose) { insight::format_alert(sprintf( "`%s` must be one of \"wald\", \"normal\" or \"boot\". Using \"wald\" now.", type )) } return(FALSE) } } info <- insight::model_info(model, verbose = FALSE) if (!is.null(info) && isFALSE(info$is_mixed) && method == "boot") { if (verbose) { insight::format_alert(sprintf( "`%s=boot` only works for mixed models of class `merMod`. To bootstrap this model, use `bootstrap=TRUE, ci_method=\"bcai\"`.", type )) } return(TRUE) } # fmt: skip if (is.null(info) || !info$is_mixed) { if (!(method %in% c( "analytical", "any", "fit", "betwithin", "nokr", "wald", "ml1", "profile", "boot", "uniroot", "residual", "normal" ))) { if (verbose) { insight::format_alert(sprintf( "`%s` must be one of \"residual\", \"wald\", \"normal\", \"profile\", \"boot\", \"uniroot\", \"betwithin\" or \"ml1\". Using \"wald\" now.", type )) } return(FALSE) } return(TRUE) } # fmt: skip if (!(method %in% c( "analytical", "any", "fit", "satterthwaite", "betwithin", "kenward", "kr", "nokr", "wald", "ml1", "profile", "boot", "uniroot", "residual", "normal" ))) { if (verbose) { insight::format_alert(sprintf( "`%s` must be one of \"residual\", \"wald\", \"normal\", \"profile\", \"boot\", \"uniroot\", \"kenward\", \"satterthwaite\", \"betwithin\" or \"ml1\". Using \"wald\" now.", type )) } return(FALSE) } if (!info$is_linear && method %in% c("satterthwaite", "kenward", "kr")) { if (verbose) { insight::format_alert(sprintf( "`%s`-degrees of freedoms are only available for linear mixed models.", method )) } return(FALSE) } return(TRUE) } parameters/R/methods_pam.R0000644000176200001440000000060114717111737015247 0ustar liggesusers#' @export model_parameters.pam <- function(model, data = NULL, clusters = NULL, ...) { if (is.null(data)) data <- as.data.frame(model$data) if (is.null(clusters)) clusters <- model$clustering params <- .cluster_centers_params(data, clusters, ...) attr(params, "model") <- model attr(params, "type") <- "pam" attr(params, "title") <- "K-Medoids" params } parameters/R/methods_fitdistr.R0000644000176200001440000000154514507235543016331 0ustar liggesusers#' @export model_parameters.fitdistr <- function(model, exponentiate = FALSE, verbose = TRUE, ...) { out <- data.frame( Parameter = names(model$estimate), Coefficient = as.vector(model$estimate), SE = as.vector(model$sd), stringsAsFactors = FALSE ) # exponentiate coefficients and SE/CI, if requested out <- .exponentiate_parameters(out, model, exponentiate) class(out) <- c("parameters_model", "see_parameters_model", class(out)) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(model)) out } #' @export standard_error.fitdistr <- function(model, ...) { data.frame( Parameter = names(model$estimate), SE = as.vector(model$sd), stringsAsFactors = FALSE ) } parameters/R/methods_mfx.R0000644000176200001440000002362614736731407015303 0ustar liggesusers# model parameters --------------------- #' @export model_parameters.logitor <- function(model, ci = 0.95, bootstrap = FALSE, iterations = 1000, standardize = NULL, exponentiate = TRUE, p_adjust = NULL, verbose = TRUE, ...) { model_parameters.default( model$fit, ci = ci, bootstrap = bootstrap, iterations = iterations, standardize = standardize, exponentiate = exponentiate, p_adjust = p_adjust, ... ) } #' @export model_parameters.poissonirr <- model_parameters.logitor #' @export model_parameters.negbinirr <- model_parameters.logitor #' @export model_parameters.poissonmfx <- function(model, ci = 0.95, bootstrap = FALSE, iterations = 1000, component = "all", standardize = NULL, exponentiate = FALSE, p_adjust = NULL, keep = NULL, drop = NULL, verbose = TRUE, ...) { component <- insight::validate_argument( component, c("all", "conditional", "marginal") ) out <- .model_parameters_generic( model = model, ci = ci, bootstrap = bootstrap, iterations = iterations, merge_by = c("Parameter", "Component"), standardize = standardize, exponentiate = exponentiate, component = component, p_adjust = p_adjust, keep_parameters = keep, drop_parameters = drop, verbose = verbose, ... ) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(model)) out } #' @export model_parameters.logitmfx <- model_parameters.poissonmfx #' @export model_parameters.probitmfx <- model_parameters.poissonmfx #' @export model_parameters.negbinmfx <- model_parameters.poissonmfx #' @export model_parameters.betaor <- function(model, ci = 0.95, bootstrap = FALSE, iterations = 1000, component = "conditional", standardize = NULL, exponentiate = FALSE, p_adjust = NULL, verbose = TRUE, ...) { component <- insight::validate_argument( component, c("conditional", "precision", "all") ) model_parameters.betareg( model$fit, ci = ci, bootstrap = bootstrap, iterations = iterations, component = component, standardize = standardize, exponentiate = exponentiate, p_adjust = p_adjust, ... ) } #' @export model_parameters.betamfx <- function(model, ci = 0.95, bootstrap = FALSE, iterations = 1000, component = "all", standardize = NULL, exponentiate = FALSE, p_adjust = NULL, keep = NULL, drop = NULL, verbose = TRUE, ...) { component <- insight::validate_argument( component, c("all", "conditional", "precision", "marginal") ) out <- .model_parameters_generic( model = model, ci = ci, bootstrap = bootstrap, iterations = iterations, merge_by = c("Parameter", "Component"), standardize = standardize, exponentiate = exponentiate, component = component, p_adjust = p_adjust, keep_parameters = keep, drop_parameters = drop, verbose = verbose, ... ) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(model)) out } # ci ------------------ #' @export ci.logitor <- function(x, ci = 0.95, method = NULL, ...) { .ci_generic(model = x$fit, ci = ci, method = method, ...) } #' @export ci.poissonirr <- ci.logitor #' @export ci.negbinirr <- ci.logitor #' @export ci.poissonmfx <- function(x, ci = 0.95, component = "all", method = NULL, ...) { component <- insight::validate_argument( component, c("all", "conditional", "marginal") ) .ci_generic(model = x, ci = ci, component = component, method = method, ...) } #' @export ci.negbinmfx <- ci.poissonmfx #' @export ci.logitmfx <- ci.poissonmfx #' @export ci.probitmfx <- ci.poissonmfx #' @export ci.betaor <- function(x, ci = 0.95, component = "all", ...) { component <- insight::validate_argument( component, c("all", "conditional", "precision") ) .ci_generic(model = x$fit, ci = ci, dof = Inf, component = component) } #' @export ci.betamfx <- function(x, ci = 0.95, method = NULL, component = "all", ...) { component <- insight::validate_argument( component, c("all", "conditional", "precision", "marginal") ) .ci_generic(model = x, ci = ci, component = component, method = method, ...) } # standard error ------------------ #' @export standard_error.negbin <- standard_error.default #' @export standard_error.logitor <- function(model, ...) { standard_error.default(model$fit, ...) } #' @export standard_error.poissonirr <- standard_error.logitor #' @export standard_error.negbinirr <- standard_error.logitor #' @export standard_error.poissonmfx <- function(model, component = "all", ...) { parms <- insight::get_parameters(model, component = "all") cs <- stats::coef(summary(model$fit)) se <- c(as.vector(model$mfxest[, 2]), as.vector(cs[, 2])) out <- .data_frame( Parameter = parms$Parameter, SE = se, Component = parms$Component ) component <- insight::validate_argument( component, c("all", "conditional", "marginal") ) if (component != "all") { out <- out[out$Component == component, ] } out } #' @export standard_error.logitmfx <- standard_error.poissonmfx #' @export standard_error.probitmfx <- standard_error.poissonmfx #' @export standard_error.negbinmfx <- standard_error.poissonmfx #' @export standard_error.betaor <- function(model, component = "all", ...) { component <- insight::validate_argument( component, c("all", "conditional", "precision") ) standard_error.betareg(model$fit, component = component, ...) } #' @export standard_error.betamfx <- function(model, component = "all", ...) { parms <- insight::get_parameters(model, component = "all") cs <- do.call(rbind, stats::coef(summary(model$fit))) se <- c(as.vector(model$mfxest[, 2]), as.vector(cs[, 2])) out <- .data_frame( Parameter = parms$Parameter, SE = se, Component = parms$Component ) component <- insight::validate_argument( component, c("all", "conditional", "precision", "marginal") ) if (component != "all") { out <- out[out$Component == component, ] } out } # p values ------------------ #' @export p_value.poissonmfx <- function(model, component = "all", ...) { parms <- insight::get_parameters(model, component = "all") cs <- stats::coef(summary(model$fit)) p <- c(as.vector(model$mfxest[, 4]), as.vector(cs[, 4])) out <- .data_frame( Parameter = parms$Parameter, p = p, Component = parms$Component ) component <- insight::validate_argument( component, c("all", "conditional", "marginal") ) if (component != "all") { out <- out[out$Component == component, ] } out } #' @export p_value.logitor <- function(model, method = NULL, ...) { p_value.default(model$fit, method = method, ...) } #' @export p_value.poissonirr <- p_value.logitor #' @export p_value.negbinirr <- p_value.logitor #' @export p_value.logitmfx <- p_value.poissonmfx #' @export p_value.probitmfx <- p_value.poissonmfx #' @export p_value.negbinmfx <- p_value.poissonmfx #' @export p_value.betaor <- function(model, component = "all", ...) { component <- insight::validate_argument( component, c("all", "conditional", "precision") ) p_value.betareg(model$fit, component = component, ...) } #' @export p_value.betamfx <- function(model, component = "all", ...) { parms <- insight::get_parameters(model, component = "all") cs <- do.call(rbind, stats::coef(summary(model$fit))) p <- c(as.vector(model$mfxest[, 4]), as.vector(cs[, 4])) out <- .data_frame( Parameter = parms$Parameter, p = p, Component = parms$Component ) component <- insight::validate_argument( component, c("all", "conditional", "precision", "marginal") ) if (component != "all") { out <- out[out$Component == component, ] } out } # simulate model ------------------ #' @export simulate_model.betaor <- function(model, iterations = 1000, component = "all", ...) { component <- insight::validate_argument( component, c("all", "conditional", "precision") ) simulate_model.betareg(model$fit, iterations = iterations, component = component, ... ) } #' @export simulate_model.betamfx <- simulate_model.betaor parameters/R/cluster_performance.R0000644000176200001440000000515214717111737017017 0ustar liggesusers#' Performance of clustering models #' #' Compute performance indices for clustering solutions. #' #' @inheritParams model_parameters.hclust #' #' @examples #' # kmeans #' model <- kmeans(iris[1:4], 3) #' cluster_performance(model) #' #' # hclust #' data <- iris[1:4] #' model <- hclust(dist(data)) #' clusters <- cutree(model, 3) #' cluster_performance(model, data, clusters) #' #' # Retrieve performance from parameters #' params <- model_parameters(kmeans(iris[1:4], 3)) #' cluster_performance(params) #' @export cluster_performance <- function(model, ...) { UseMethod("cluster_performance") } #' @export cluster_performance.kmeans <- function(model, ...) { out <- as.data.frame(model[c("totss", "betweenss", "tot.withinss")]) colnames(out) <- c("Sum_Squares_Total", "Sum_Squares_Between", "Sum_Squares_Within") out$R2 <- out$Sum_Squares_Between / out$Sum_Squares_Total row.names(out) <- NULL class(out) <- c("performance_model", class(out)) out } #' @rdname cluster_performance #' @export cluster_performance.hclust <- function(model, data, clusters, ...) { if (is.null(data)) { insight::format_error( "This function requires the data used to compute the clustering to be provided via `data` as it is not accessible from the clustering object itself." ) } if (is.null(clusters)) { insight::format_error( "This function requires a vector of clusters assignments of same length as data to be passed, as it is not contained in the clustering object itself." ) } params <- model_parameters(model, data = data, clusters = clusters, ...) cluster_performance(params) } #' @export cluster_performance.dbscan <- function(model, data, ...) { if (is.null(data)) { insight::format_error( "This function requires the data used to compute the clustering to be provided via `data` as it is not accessible from the clustering object itself." ) } params <- model_parameters(model, data = data, ...) cluster_performance(params) } # Base -------------------------------------------------------------------- #' @export cluster_performance.parameters_clusters <- function(model, ...) { valid <- model$Cluster != 0 & model$Cluster != "0" # Valid clusters out <- data.frame( Sum_Squares_Total = attributes(model)$Sum_Squares_Total, Sum_Squares_Between = attributes(model)$Sum_Squares_Between, Sum_Squares_Within = sum(model$Sum_Squares[valid], na.rm = TRUE) ) out$R2 <- out$Sum_Squares_Between / out$Sum_Squares_Total class(out) <- c("performance_model", class(out)) out } parameters/R/standard_error_satterthwaite.R0000644000176200001440000000040714415527411020727 0ustar liggesusers#' @rdname p_value_satterthwaite #' @export se_satterthwaite <- function(model) { UseMethod("se_satterthwaite") } #' @export se_satterthwaite.default <- function(model) { # check for valid input .is_model_valid(model) standard_error(model) } parameters/R/print.compare_parameters.R0000644000176200001440000000571015053035103017743 0ustar liggesusers#' @title Print comparisons of model parameters #' @name print.compare_parameters #' #' @description A `print()`-method for objects from [`compare_parameters()`]. #' #' @param x An object returned by [`compare_parameters()`]. #' @inheritParams print.parameters_model #' @inheritSection print.parameters_model Global Options to Customize Messages and Tables when Printing #' #' @return Invisibly returns the original input object. #' #' @examplesIf require("gt", quietly = TRUE) #' \donttest{ #' data(iris) #' lm1 <- lm(Sepal.Length ~ Species, data = iris) #' lm2 <- lm(Sepal.Length ~ Species + Petal.Length, data = iris) #' #' # custom style #' result <- compare_parameters(lm1, lm2, select = "{estimate}{stars} ({se})") #' print(result) #' #' # custom style, in HTML #' result <- compare_parameters(lm1, lm2, select = "{estimate}
({se})|{p}") #' print_html(result) #' } #' @export print.compare_parameters <- function(x, split_components = TRUE, caption = NULL, subtitle = NULL, footer = NULL, digits = 2, ci_digits = digits, p_digits = 3, zap_small = FALSE, groups = NULL, column_width = NULL, ci_brackets = c("[", "]"), select = NULL, ...) { # save original input orig_x <- x # check if user supplied digits attributes if (missing(digits)) { digits <- .additional_arguments(x, "digits", digits) } if (missing(ci_digits)) { ci_digits <- .additional_arguments(x, "ci_digits", digits) } if (missing(p_digits)) { p_digits <- .additional_arguments(x, "p_digits", p_digits) } # get attributes if (missing(select)) { select <- attributes(x)$output_style } if (missing(groups)) { groups <- attributes(x)$parameter_groups } formatted_table <- format( x, select = select, split_components = split_components, digits = digits, ci_digits = ci_digits, p_digits = p_digits, ci_width = "auto", ci_brackets = ci_brackets, format = "text", groups = groups, zap_small = zap_small, ... ) # if we have multiple components, we can align colum width across components here if (!is.null(column_width) && all(column_width == "fixed") && is.list(formatted_table)) { column_width <- .find_min_colwidth(formatted_table) } cat(insight::export_table( formatted_table, format = "text", caption = caption, subtitle = subtitle, footer = footer, empty_line = "-", width = column_width, ... )) invisible(orig_x) } parameters/R/cluster_centers.R0000644000176200001440000001031114473626002016145 0ustar liggesusers#' Find the cluster centers in your data #' #' For each cluster, computes the mean (or other indices) of the variables. Can be used #' to retrieve the centers of clusters. Also returns the within Sum of Squares. #' #' @param data A data.frame. #' @param clusters A vector with clusters assignments (must be same length as rows in data). #' @param fun What function to use, `mean` by default. #' @param ... Other arguments to be passed to or from other functions. #' #' @return A dataframe containing the cluster centers. Attributes include #' performance statistics and distance between each observation and its #' respective cluster centre. #' #' #' @examples #' k <- kmeans(iris[1:4], 3) #' cluster_centers(iris[1:4], clusters = k$cluster) #' cluster_centers(iris[1:4], clusters = k$cluster, fun = median) #' @export cluster_centers <- function(data, clusters, fun = mean, ...) { # Get n obs params <- data.frame(table(clusters)) names(params) <- c("Cluster", "n_Obs") # Get Within clusters sum of squares (WCSS) ss <- .cluster_centers_SS(data, clusters) params$Sum_Squares <- ss$WSS # Get Cluster Centers centers <- stats::aggregate(data, list(Cluster = clusters), fun) params <- merge(params, centers, by = "Cluster") # Get distance of observations from cluster # Add attributes attr(params, "Sum_Squares_Total") <- ss$TSS attr(params, "Sum_Squares_Between") <- ss$BSS attr(params, "variance") <- ss$BSS / ss$TSS attr(params, "scale") <- vapply(data, stats::sd, numeric(1)) attr(params, "distance") <- .cluster_centers_distance(data, clusters, centers, attributes(params)$scale) params } # Performance ------------------------------------------------------------- #' @keywords internal .cluster_centers_params <- function(data, clusters, ...) { # This function actually wraps *around* the exported cluster_centers() # to be used within the different model_parameters() functions for clusters params <- cluster_centers(data = data, clusters = clusters, ...) # Long means means <- datawizard::reshape_longer(params, select = 4:ncol(params), values_to = "Mean", names_to = "Variable" ) attr(params, "variance") <- attributes(params)$variance attr(params, "Sum_Squares_Between") <- attributes(params)$Sum_Squares_Between attr(params, "Sum_Squares_Total") <- attributes(params)$Sum_Squares_Total attr(params, "scale") <- attributes(params)$scale attr(params, "distance") <- attributes(params)$distance attr(params, "scores") <- attributes(params)$scores attr(params, "means") <- means class(params) <- c("parameters_clusters", class(params)) params } # Distance ---------------------------------------------------------------- #' @keywords internal .cluster_centers_distance <- function(data, clusters, centers, scale) { dis <- NULL for (c in unique(clusters)) { center <- centers[centers$Cluster == c, ] center$Cluster <- NULL # Remove column d <- apply(data[clusters == c, ], 1, function(x) { z <- x - center[names(data)] z <- z / scale sqrt(sum((z)^2)) }) dis <- c(dis, d) } dis } # Performance ------------------------------------------------------------- #' @keywords internal .cluster_centers_SS <- function(data, clusters) { # https://stackoverflow.com/questions/68714612/compute-between-clusters-sum-of-squares-bcss-and-total-sum-of-squares-manually # total sum of squares TSS <- sum(scale(data, scale = FALSE)^2) # Within clusters sum of squares (WCSS) WSS <- sapply(split(data, clusters), function(x) sum(scale(x, scale = FALSE)^2)) # Between clusters sum of squares BSS <- TSS - sum(WSS) # Compute BSS directly (without TSS to double check) gmeans <- sapply(split(data, clusters), colMeans) means <- colMeans(data) BSS2 <- sum(colSums((gmeans - means)^2) * table(clusters)) # Double check if (BSS2 - BSS > 1e-05) { insight::format_error("The between sum of squares computation went wrong. Please open an issue at {.url https://github.com/easystats/parameters/issues} so we can fix the bug (provide an example and mention that `BSS != BSS2`).") } list(WSS = WSS, BSS = BSS, TSS = TSS) } parameters/R/convert_efa_to_cfa.R0000644000176200001440000000715014736731407016566 0ustar liggesusers#' Conversion between EFA results and CFA structure #' #' Enables a conversion between Exploratory Factor Analysis (EFA) and #' Confirmatory Factor Analysis (CFA) `lavaan`-ready structure. #' #' @param model An EFA model (e.g., a `psych::fa` object). #' @param names Vector containing dimension names. #' @param max_per_dimension Maximum number of variables to keep per dimension. #' @inheritParams principal_components #' #' @examplesIf require("psych") && require("lavaan") #' \donttest{ #' library(parameters) #' data(attitude) #' efa <- psych::fa(attitude, nfactors = 3) #' #' model1 <- efa_to_cfa(efa) #' model2 <- efa_to_cfa(efa, threshold = 0.3) #' model3 <- efa_to_cfa(efa, max_per_dimension = 2) #' #' suppressWarnings(anova( #' lavaan::cfa(model1, data = attitude), #' lavaan::cfa(model2, data = attitude), #' lavaan::cfa(model3, data = attitude) #' )) #' } #' @return Converted index. #' @export convert_efa_to_cfa <- function(model, ...) { UseMethod("convert_efa_to_cfa") } #' @rdname convert_efa_to_cfa #' @inheritParams model_parameters.principal #' @export convert_efa_to_cfa.fa <- function(model, threshold = "max", names = NULL, max_per_dimension = NULL, ...) { .efa_to_cfa(model_parameters(model, threshold = threshold, ...), names = names, max_per_dimension = max_per_dimension, ... ) } #' @export convert_efa_to_cfa.fa.ci <- convert_efa_to_cfa.fa #' @export convert_efa_to_cfa.parameters_efa <- function(model, threshold = NULL, names = NULL, max_per_dimension = NULL, ...) { if (!is.null(threshold)) { model <- model_parameters(attributes(model)$model, threshold = threshold, ...) } .efa_to_cfa(model, names = names, max_per_dimension = max_per_dimension, ...) } #' @export convert_efa_to_cfa.parameters_pca <- convert_efa_to_cfa.parameters_efa #' @rdname convert_efa_to_cfa #' @export efa_to_cfa <- convert_efa_to_cfa #' @keywords internal .efa_to_cfa <- function(loadings, names = NULL, max_per_dimension = NULL, ...) { loadings <- attributes(loadings)$loadings_long # Get dimension names if (is.null(names)) { names <- unique(loadings$Component) } # Catch error if (length(names) != insight::n_unique(loadings$Component)) { insight::format_error( paste0( "The `names` vector must be of same length as the number of dimensions, in this case ", length(unique(loadings$Component)), "." ) ) } cfa <- NULL # Iterate over dimensions for (i in seq_along(names)) { # Find correct subset items <- loadings[loadings$Component == unique(loadings$Component)[i], ] # Find corresponding items items <- as.character(loadings[loadings$Component == unique(loadings$Component)[i], "Variable"]) # Subset if need be to keep only a certain number if (!is.null(max_per_dimension) && max_per_dimension > 0) { items <- as.character(stats::na.omit(items[1:max_per_dimension])) } # Append that list cfa <- c(cfa, paste0(names[i], " =~ ", paste(items, collapse = " + "))) } cfa <- paste0(cfa, collapse = "\n") cfa <- paste0("# Latent variables\n", cfa) class(cfa) <- c("cfa_model", class(cfa)) cfa } #' @export print.cfa_model <- function(x, ...) { cat(x) invisible(x) } parameters/R/methods_bamlss.R0000644000176200001440000000453114717111737015761 0ustar liggesusers#' @export model_parameters.bamlss <- function(model, centrality = "median", dispersion = FALSE, ci = 0.95, ci_method = "eti", test = "pd", rope_range = "default", rope_ci = 0.95, component = "all", exponentiate = FALSE, standardize = NULL, keep = NULL, drop = NULL, verbose = TRUE, ...) { # Processing params <- .extract_parameters_bayesian( model, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, bf_prior = NULL, diagnostic = NULL, priors = FALSE, effects = "all", component = component, standardize = standardize, keep_parameters = keep, drop_parameters = drop, verbose = verbose, ... ) params <- .add_pretty_names(params, model) # exponentiate coefficients and SE/CI, if requested params <- .exponentiate_parameters(params, model, exponentiate) params <- .add_model_parameters_attributes(params, model, ci, exponentiate, ci_method = ci_method, verbose = verbose, ... ) attr(params, "parameter_info") <- insight::clean_parameters(model) attr(params, "object_name") <- insight::safe_deparse_symbol(substitute(model)) class(params) <- unique(c("parameters_model", "see_parameters_model", class(params))) params } #' @export standard_error.bamlss <- function(model, component = c("all", "conditional", "location", "distributional", "auxilliary"), ...) { component <- match.arg(component) params <- insight::get_parameters(model, component = component, ...) .data_frame( Parameter = colnames(params), SE = unname(sapply(params, stats::sd, na.rm = TRUE)) ) } #' @export p_value.bamlss <- p_value.BFBayesFactor parameters/R/5_simulate_model.R0000644000176200001440000001731714736731407016215 0ustar liggesusers#' @title Simulated draws from model coefficients #' @name simulate_model #' #' @description Simulate draws from a statistical model to return a data frame #' of estimates. #' #' @param model Statistical model (no Bayesian models). #' @param component Should all parameters, parameters for the conditional model, #' for the zero-inflation part of the model, or the dispersion model be returned? #' Applies to models with zero-inflation and/or dispersion component. `component` #' may be one of `"conditional"`, `"zi"`, `"zero-inflated"`, `"dispersion"` or #' `"all"` (default). May be abbreviated. #' @param ... Arguments passed to [`insight::get_varcov()`], e.g. to allow simulated #' draws to be based on heteroscedasticity consistent variance covariance matrices. #' @inheritParams bootstrap_model #' @inheritParams p_value #' #' @inheritSection model_parameters.zcpglm Model components #' #' @return A data frame. #' #' @seealso [`simulate_parameters()`], [`bootstrap_model()`], [`bootstrap_parameters()`] #' #' @details #' ## Technical Details #' `simulate_model()` is a computationally faster alternative #' to `bootstrap_model()`. Simulated draws for coefficients are based #' on a multivariate normal distribution (`MASS::mvrnorm()`) with mean #' `mu = coef(model)` and variance `Sigma = vcov(model)`. #' #' ## Models with Zero-Inflation Component #' For models from packages **glmmTMB**, **pscl**, **GLMMadaptive** and #' **countreg**, the `component` argument can be used to specify #' which parameters should be simulated. For all other models, parameters #' from the conditional component (fixed effects) are simulated. This may #' include smooth terms, but not random effects. #' #' @examples #' model <- lm(Sepal.Length ~ Species * Petal.Width + Petal.Length, data = iris) #' head(simulate_model(model)) #' \donttest{ #' if (require("glmmTMB", quietly = TRUE)) { #' model <- glmmTMB( #' count ~ spp + mined + (1 | site), #' ziformula = ~mined, #' family = poisson(), #' data = Salamanders #' ) #' head(simulate_model(model)) #' head(simulate_model(model, component = "zero_inflated")) #' } #' } #' @export simulate_model <- function(model, iterations = 1000, ...) { UseMethod("simulate_model") } # Models with single component only ----------------------------------------- #' @rdname simulate_model #' @export simulate_model.default <- function(model, iterations = 1000, component = "all", ...) { # check for valid input .is_model_valid(model) out <- .simulate_model(model, iterations, component = "conditional", effects = "fixed", ...) class(out) <- c("parameters_simulate_model", class(out)) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(model)) out } #' @export simulate_model.lm <- simulate_model.default #' @export simulate_model.glmmadmb <- simulate_model.default #' @export simulate_model.cglm <- simulate_model.default #' @export simulate_model.cpglm <- simulate_model.default #' @export simulate_model.cpglmm <- simulate_model.default #' @export simulate_model.feglm <- simulate_model.default #' @export simulate_model.fixest <- simulate_model.default #' @export simulate_model.iv_robust <- simulate_model.default #' @export simulate_model.rq <- simulate_model.default #' @export simulate_model.crq <- simulate_model.default #' @export simulate_model.nlrq <- simulate_model.default #' @export simulate_model.speedglm <- simulate_model.default #' @export simulate_model.speedlm <- simulate_model.default #' @export simulate_model.glm <- simulate_model.default #' @export simulate_model.glmRob <- simulate_model.default #' @export simulate_model.lmRob <- simulate_model.default #' @export simulate_model.gls <- simulate_model.default #' @export simulate_model.lme <- simulate_model.default #' @export simulate_model.crch <- simulate_model.default #' @export simulate_model.biglm <- simulate_model.default #' @export simulate_model.plm <- simulate_model.default #' @export simulate_model.flexsurvreg <- simulate_model.default #' @export simulate_model.LORgee <- simulate_model.default #' @export simulate_model.feis <- simulate_model.default #' @export simulate_model.lmrob <- simulate_model.default #' @export simulate_model.glmrob <- simulate_model.default #' @export simulate_model.merMod <- simulate_model.default #' @export simulate_model.gamlss <- simulate_model.default #' @export simulate_model.lm_robust <- simulate_model.default #' @export simulate_model.coxme <- simulate_model.default #' @export simulate_model.geeglm <- simulate_model.default #' @export simulate_model.gee <- simulate_model.default #' @export simulate_model.clm <- simulate_model.default #' @export simulate_model.polr <- simulate_model.default #' @export simulate_model.coxph <- simulate_model.default #' @export simulate_model.logistf <- simulate_model.default #' @export simulate_model.flic <- simulate_model.default #' @export simulate_model.flac <- simulate_model.default #' @export simulate_model.truncreg <- simulate_model.default #' @export simulate_model.glimML <- simulate_model.default #' @export simulate_model.lrm <- simulate_model.default #' @export simulate_model.psm <- simulate_model.default #' @export simulate_model.ols <- simulate_model.default #' @export simulate_model.rms <- simulate_model.default #' @export simulate_model.vglm <- simulate_model.default #' @export simulate_model.censReg <- simulate_model.default #' @export simulate_model.survreg <- simulate_model.default #' @export simulate_model.multinom <- simulate_model.default #' @export simulate_model.brmultinom <- simulate_model.default #' @export simulate_model.bracl <- simulate_model.default # helper ----------------------------------------- .simulate_model <- function(model, iterations, component = "conditional", effects = "fixed", ...) { if (is.null(iterations)) iterations <- 1000 params <- insight::get_parameters(model, effects = effects, component = component, verbose = FALSE) beta_mu <- stats::setNames(params$Estimate, params$Parameter) # Transform to named vector # "..." allow specification of vcov-args (#784) varcov <- insight::get_varcov(model, component = component, effects = effects, ...) as.data.frame(.mvrnorm(n = iterations, mu = beta_mu, Sigma = varcov)) ## Alternative approach, similar to arm::sim() # k <- length(insight::find_parameters(model, effects = "fixed", component = "conditional", flatten = TRUE)) # n <- insight::n_obs(model) # beta.cov <- stats::vcov(model) / stats::sigma(model) # s <- vector("double", iterations) # b <- array(NA, c(100, k)) # for (i in 1:iterations) { # s[i] <- stats::sigma(model) * sqrt((n - k) / rchisq(1, n - k)) # b[i,] <- .mvrnorm(n = 1, mu = beta_mu, Sigma = beta.cov * s[i] ^ 2) # } } .mvrnorm <- function(n = 1, mu, Sigma, tol = 1e-06) { p <- length(mu) if (!all(dim(Sigma) == c(p, p))) { insight::format_error( "Incompatible arguments to calculate multivariate normal distribution." ) } eS <- eigen(Sigma, symmetric = TRUE) ev <- eS$values if (!all(ev >= -tol * abs(ev[1L]))) { insight::format_error("`Sigma` is not positive definite.") } X <- drop(mu) + eS$vectors %*% diag(sqrt(pmax(ev, 0)), p) %*% t(matrix(stats::rnorm(p * n), n)) nm <- names(mu) dn <- dimnames(Sigma) if (is.null(nm) && !is.null(dn)) { nm <- dn[[1L]] } dimnames(X) <- list(nm, NULL) if (n == 1) { drop(X) } else { t(X) } } parameters/R/methods_weightit.R0000644000176200001440000000120014716604200016301 0ustar liggesusers# model parameters ------------------- #' @export model_parameters.ordinal_weightit <- model_parameters.clm2 #' @export model_parameters.multinom_weightit <- model_parameters.bracl # CI --------------------- #' @export ci.ordinal_weightit <- ci.clm2 #' @export ci.multinom_weightit <- ci.bracl # standard errors ----------------- #' @export standard_error.ordinal_weightit <- standard_error.clm2 #' @export standard_error.multinom_weightit <- standard_error.bracl # p values ---------------- #' @export p_value.ordinal_weightit <- p_value.clm2 #' @export p_value.multinom_weightit <- p_value.bracl parameters/R/methods_svy2lme.R0000644000176200001440000000554014761570351016102 0ustar liggesusers#' @export model_parameters.svy2lme <- function(model, ci = 0.95, effects = "all", include_sigma = FALSE, keep = NULL, drop = NULL, verbose = TRUE, ...) { dots <- list(...) # which component to return? effects <- match.arg(effects, choices = c("fixed", "random", "all")) params <- params_variance <- NULL if (effects %in% c("fixed", "all")) { # Processing fun_args <- list( model, ci = ci, ci_method = "wald", standardize = NULL, p_adjust = NULL, wb_component = FALSE, keep_parameters = keep, drop_parameters = drop, verbose = verbose, include_sigma = include_sigma, include_info = FALSE, vcov = NULL, vcov_args = NULL ) fun_args <- c(fun_args, dots) params <- do.call(".extract_parameters_mixed", fun_args) params$Effects <- "fixed" } att <- attributes(params) if (effects %in% c("random", "all")) { params_variance <- .extract_random_variances( model, ci = ci, effects = effects ) } # merge random and fixed effects, if necessary if (!is.null(params) && !is.null(params_variance)) { params$Level <- NA params$Group <- "" params <- params[match(colnames(params_variance), colnames(params))] } params <- rbind(params, params_variance) # remove empty column if (!is.null(params$Level) && all(is.na(params$Level))) { params$Level <- NULL } # due to rbind(), we lose attributes from "extract_parameters()", # so we add those attributes back here... if (!is.null(att)) { attributes(params) <- utils::modifyList(att, attributes(params)) } params <- .add_model_parameters_attributes( params, model, ci = ci, exponentiate = FALSE, bootstrap = FALSE, iterations = 1000, ci_method = "wald", p_adjust = NULL, verbose = verbose, include_info = FALSE, group_level = FALSE, wb_component = FALSE, ... ) attr(params, "object_name") <- insight::safe_deparse_symbol(substitute(model)) class(params) <- c("parameters_model", "see_parameters_model", class(params)) params } #' @export standard_error.svy2lme <- function(model, ...) { .data_frame( Parameter = .remove_backticks_from_string(colnames(model$Vbeta)), SE = as.vector(sqrt(diag(model$Vbeta))) ) } #' @export p_value.svy2lme <- function(model, ...) { stat <- insight::get_statistic(model) p <- 2 * stats::pnorm(abs(stat$Statistic), lower.tail = FALSE) .data_frame( Parameter = stat$Parameter, p = as.vector(p) ) } parameters/R/methods_bggm.R0000644000176200001440000000016414030655331015401 0ustar liggesusers#' @export model_parameters.BGGM <- model_parameters.bayesQR #' @export p_value.BGGM <- p_value.BFBayesFactor parameters/R/methods_gamm4.R0000644000176200001440000000065414355245205015503 0ustar liggesusers#' @export ci.gamm4 <- function(x, ci = 0.95, ...) { x <- x$gam class(x) <- c("gam", "lm", "glm") ci(x, ci = ci, ...) } #' @export standard_error.gamm4 <- function(model, ...) { model <- model$gam class(model) <- c("gam", "lm", "glm") standard_error(model) } #' @export p_value.gamm4 <- function(model, ...) { model <- model$gam class(model) <- c("gam", "lm", "glm") p_value(model) } parameters/R/methods_brms.R0000644000176200001440000002622015022763445015441 0ustar liggesusers#' @title Parameters from Bayesian Models #' @name model_parameters.brmsfit #' #' @description #' Model parameters from Bayesian models. This function internally calls #' [`bayestestR::describe_posterior()`] to get the relevant information for #' the output. #' #' @param model Bayesian model (including SEM from **blavaan**. May also be #' a data frame with posterior samples, however, `as_draws` must be set to #' `TRUE` (else, for data frames `NULL` is returned). #' @param ci Credible Interval (CI) level. Default to `0.95` (`95%`). See #' [bayestestR::ci()] for further details. #' @param group_level Logical, for multilevel models (i.e. models with random #' effects) and when `effects = "random"`, return the parameters for each group #' level from random effects only. If `group_level = FALSE` (the default), also #' information on SD and COR are returned. Note that this argument is superseded #' by the new options for the `effects` argument. `effects = "grouplevel"` should #' be used instead of `group_level = TRUE`. #' @param component Which type of parameters to return, such as parameters for the #' conditional model, the zero-inflation part of the model, the dispersion #' term, or other auxiliary parameters be returned? Applies to models with #' zero-inflation and/or dispersion formula, or if parameters such as `sigma` #' should be included. May be abbreviated. Note that the *conditional* #' component is also called *count* or *mean* component, depending on the #' model. There are three convenient shortcuts: `component = "all"` returns #' all possible parameters. If `component = "location"`, location parameters #' such as `conditional`, `zero_inflated`, or `smooth_terms`, are returned #' (everything that are fixed or random effects - depending on the `effects` #' argument - but no auxiliary parameters). For `component = "distributional"` #' (or `"auxiliary"`), components like `sigma`, `dispersion`, or `beta` #' (and other auxiliary parameters) are returned. #' @param as_draws Logical, if `TRUE` and `model` is of class `data.frame`, #' the data frame is treated as posterior samples and handled similar to #' Bayesian models. All arguments in `...` are passed to #' `model_parameters.draws()`. #' @inheritParams model_parameters.default #' @inheritParams bayestestR::describe_posterior #' @inheritParams insight::get_parameters #' #' @seealso [insight::standardize_names()] to rename columns into a consistent, #' standardized naming scheme. #' #' @note When `standardize = "refit"`, columns `diagnostic`, `bf_prior` and #' `priors` refer to the *original* `model`. If `model` is a data frame, #' arguments `diagnostic`, `bf_prior` and `priors` are ignored. #' #' There is also a #' [`plot()`-method](https://easystats.github.io/see/articles/parameters.html) #' implemented in the [**see**-package](https://easystats.github.io/see/). #' #' @inheritSection model_parameters Confidence intervals and approximation of degrees of freedom #' #' @inheritSection model_parameters.zcpglm Model components #' #' @examplesIf require("rstanarm") #' \donttest{ #' library(parameters) #' model <- suppressWarnings(stan_glm( #' Sepal.Length ~ Petal.Length * Species, #' data = iris, iter = 500, refresh = 0 #' )) #' model_parameters(model) #' } #' @return A data frame of indices related to the model's parameters. #' @export model_parameters.brmsfit <- function(model, centrality = "median", dispersion = FALSE, ci = 0.95, ci_method = "eti", test = "pd", rope_range = "default", rope_ci = 0.95, bf_prior = NULL, diagnostic = c("ESS", "Rhat"), priors = FALSE, effects = "fixed", component = "all", exponentiate = FALSE, standardize = NULL, group_level = FALSE, keep = NULL, drop = NULL, verbose = TRUE, ...) { modelinfo <- insight::model_info(model, verbose = FALSE) # Bayesian meta analysis if (!insight::is_multivariate(model) && isTRUE(modelinfo$is_meta)) { params <- .model_parameters_brms_meta( model, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, diagnostic = diagnostic, priors = priors, exponentiate = exponentiate, standardize = standardize, keep_parameters = keep, drop_parameters = drop, ... ) } else if (effects %in% c("total", "random_total")) { # group level total effects (coef()) params <- .group_level_total( model, centrality, dispersion, ci, ci_method, test, rope_range, rope_ci, ... ) params$Effects <- "total" class(params) <- c("parameters_coef", "see_parameters_coef", class(params)) } else { # update argument if (effects == "random" && group_level) { effects <- "grouplevel" } # Processing params <- .extract_parameters_bayesian( model, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, bf_prior = bf_prior, diagnostic = diagnostic, priors = priors, effects = effects, component = component, standardize = standardize, keep_parameters = keep, drop_parameters = drop, verbose = verbose, ... ) # add prettified names as attribute. Furthermore, group column is added params <- .add_pretty_names(params, model) # exponentiate coefficients and SE/CI, if requested params <- .exponentiate_parameters(params, model, exponentiate) params <- .add_model_parameters_attributes( params, model, ci, exponentiate, ci_method = ci_method, group_level = group_level, modelinfo = modelinfo, verbose = verbose, ... ) attr(params, "parameter_info") <- .get_cleaned_parameters(params, model) attr(params, "object_name") <- insight::safe_deparse_symbol(substitute(model)) attr(params, "dpars") <- insight::find_auxiliary(model, verbose = FALSE) class(params) <- unique(c("parameters_model", "see_parameters_model", class(params))) } params } # brms meta analysis ------- .model_parameters_brms_meta <- function(model, centrality = "median", dispersion = FALSE, ci = 0.95, ci_method = "eti", test = "pd", rope_range = "default", rope_ci = 0.95, diagnostic = c("ESS", "Rhat"), priors = FALSE, exponentiate = FALSE, standardize = NULL, keep_parameters = NULL, drop_parameters = NULL, verbose = TRUE, ...) { # parameters smd <- insight::get_parameters(model, effects = "fixed", component = "conditional") studies <- insight::get_parameters(model, effects = "random", parameters = "^(?!sd_)") studies[] <- lapply(studies, function(i) i + smd[[1]]) tau <- insight::get_parameters(model, effects = "random", parameters = "^sd_") params <- bayestestR::describe_posterior( cbind(studies, smd), centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, ... ) params_diagnostics <- bayestestR::diagnostic_posterior( model, effects = "all", diagnostic = diagnostic, ... ) params_tau <- bayestestR::describe_posterior( tau, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, ... ) # add weights params$Weight <- 1 / c(insight::get_response(model)[[2]], NA) # merge description with diagnostic params <- merge(params, params_diagnostics, by = "Parameter", all.x = TRUE, sort = FALSE) # Renaming re_name <- insight::find_random(model, flatten = TRUE) study_names <- gsub(sprintf("r_%s\\[(.*)\\]", re_name[1]), "\\1", colnames(studies)) # replace dots by white space study_names <- gsub(".", " ", study_names, fixed = TRUE) # remove "Intercept" study_names <- insight::trim_ws(gsub(",Intercept", "", study_names, fixed = TRUE)) cleaned_parameters <- c(study_names, "Overall", "tau") # components params$Component <- "Studies" params_tau$Component <- "tau" # merge with tau params <- merge(params, params_tau, all = TRUE, sort = FALSE) # reorder columns ci_column <- which(colnames(params) == "CI_high") weight_column <- which(colnames(params) == "Weight") first_cols <- c(1:ci_column, weight_column) params <- params[, c(first_cols, seq_len(ncol(params))[-first_cols])] # filter parameters, if requested if (!is.null(keep_parameters) || !is.null(drop_parameters)) { params <- .filter_parameters(params, keep = keep_parameters, drop = drop_parameters, verbose = verbose ) } # add attributes attr(params, "tau") <- params_tau attr(params, "pretty_names") <- cleaned_parameters attr(params, "cleaned_parameters") <- cleaned_parameters attr(params, "ci") <- ci attr(params, "ci_method") <- ci_method attr(params, "exponentiate") <- exponentiate attr(params, "model_class") <- class(model) attr(params, "is_bayes_meta") <- TRUE attr(params, "study_weights") <- params$Weight attr(params, "data") <- cbind(studies, smd, tau) class(params) <- unique(c("parameters_brms_meta", "see_parameters_brms_meta", class(params))) params } #' @export standard_error.brmsfit <- function(model, effects = "fixed", component = "all", ...) { params <- insight::get_parameters(model, effects = effects, component = component, ...) .data_frame( Parameter = colnames(params), SE = unname(sapply(params, stats::sd, na.rm = TRUE)) ) } #' @export p_value.brmsfit <- p_value.BFBayesFactor parameters/R/methods_mixor.R0000644000176200001440000000562614507235543015643 0ustar liggesusers#' @export model_parameters.mixor <- function(model, ci = 0.95, effects = "all", bootstrap = FALSE, iterations = 1000, standardize = NULL, exponentiate = FALSE, include_sigma = FALSE, p_adjust = NULL, keep = NULL, drop = NULL, verbose = TRUE, ...) { effects <- match.arg(effects, choices = c("all", "fixed", "random")) # standardize only works for fixed effects... if (!is.null(standardize) && standardize != "refit") { if (!missing(effects) && effects != "fixed" && verbose) { insight::format_warning( "Standardizing coefficients only works for fixed effects of the mixed model." ) } effects <- "fixed" } out <- .model_parameters_generic( model = model, ci = ci, bootstrap = bootstrap, iterations = iterations, merge_by = c("Parameter", "Effects"), standardize = standardize, exponentiate = exponentiate, effects = effects, include_sigma = include_sigma, keep_parameters = keep, drop_parameters = drop, verbose = verbose, ... ) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(model)) out } #' @export ci.mixor <- function(x, ci = 0.95, effects = "all", ...) { effects <- match.arg(effects, choices = c("all", "fixed", "random")) .ci_generic(model = x, ci = ci, dof = Inf, effects = effects, ...) } #' @export standard_error.mixor <- function(model, effects = "all", ...) { effects <- match.arg(effects, choices = c("all", "fixed", "random")) stats <- model$Model[, "Std. Error"] parms <- insight::get_parameters(model, effects = effects) .data_frame( Parameter = parms$Parameter, SE = stats[parms$Parameter], Effects = parms$Effects ) } #' @export p_value.mixor <- function(model, effects = "all", ...) { effects <- match.arg(effects, choices = c("all", "fixed", "random")) stats <- model$Model[, "P(>|z|)"] parms <- insight::get_parameters(model, effects = effects) .data_frame( Parameter = parms$Parameter, p = stats[parms$Parameter], Effects = parms$Effects ) } #' @export simulate_model.mixor <- function(model, iterations = 1000, effects = "all", ...) { effects <- match.arg(effects, choices = c("all", "fixed", "random")) out <- .simulate_model(model, iterations, component = "conditional", effects = effects, ...) class(out) <- c("parameters_simulate_model", class(out)) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(model)) out } parameters/R/standard_error_kenward.R0000644000176200001440000000167715073732442017507 0ustar liggesusers#' @rdname p_value_kenward #' @export se_kenward <- function(model, ...) { UseMethod("se_kenward") } #' @export se_kenward.default <- function(model, ...) { if (!.check_REML_fit(model)) { model <- stats::update(model, . ~ ., REML = TRUE) } vcov_adjusted <- insight::get_varcov(model, vcov = "kenward-roger") params <- insight::get_parameters(model, effects = "fixed") .data_frame(Parameter = params$Parameter, SE = abs(sqrt(diag(vcov_adjusted)))) } #' @export se_kenward.glmmTMB <- function(model, component = "conditional", ...) { if (!.check_REML_fit(model)) { model <- stats::update(model, . ~ ., REML = TRUE) } vcov_adjusted <- insight::get_varcov(model, vcov = "kenward-roger") params <- insight::get_parameters(model, effects = "fixed", component = component) .data_frame( Parameter = params$Parameter, SE = abs(sqrt(diag(vcov_adjusted))), Component = component ) } parameters/R/methods_robmixglm.R0000644000176200001440000000057514355245205016500 0ustar liggesusers#' @export standard_error.robmixglm <- function(model, ...) { se <- stats::na.omit(.get_se_from_summary(model)) .data_frame( Parameter = names(se), SE = as.vector(se) ) } #' @export p_value.robmixglm <- function(model, ...) { p <- stats::na.omit(.get_pval_from_summary(model)) .data_frame( Parameter = names(p), p = as.vector(p) ) } parameters/R/methods_lqmm.R0000644000176200001440000000572314736731407015455 0ustar liggesusers#' @export model_parameters.lqmm <- function(model, ci = 0.95, bootstrap = FALSE, iterations = 1000, p_adjust = NULL, verbose = TRUE, ...) { # Processing if (bootstrap) { parameters <- bootstrap_parameters( model, iterations = iterations, ci = ci, ... ) } else { parameters <- .extract_parameters_lqmm( model, ci = ci, p_adjust = p_adjust, verbose = verbose, ... ) } parameters <- .add_model_parameters_attributes( parameters, model, ci, exponentiate = FALSE, p_adjust = p_adjust, verbose = verbose, ... ) attr(parameters, "object_name") <- insight::safe_deparse_symbol(substitute(model)) class(parameters) <- c("parameters_model", "see_parameters_model", class(parameters)) parameters } #' @export model_parameters.lqm <- model_parameters.lqmm #' @export ci.lqmm <- function(x, ...) { out <- model_parameters(x, ...) as.data.frame(out[c("Parameter", "CI_low", "CI_high")]) } #' @export ci.lqm <- ci.lqmm #' @export standard_error.lqmm <- function(model, ...) { out <- model_parameters(model, ...) as.data.frame(out[c("Parameter", "SE")]) } #' @export standard_error.lqm <- standard_error.lqmm #' @export p_value.lqmm <- function(model, ...) { out <- model_parameters(model, ...) as.data.frame(out[c("Parameter", "p")]) } #' @export p_value.lqm <- p_value.lqmm # helper ------------------ .extract_parameters_lqmm <- function(model, ci, p_adjust, verbose = TRUE, ...) { cs <- summary(model) parameters <- insight::get_parameters(model) if (is.list(cs$tTable)) { summary_table <- do.call(rbind, cs$tTable) } else { summary_table <- cs$tTable } # ==== Coefficient, SE and test statistic parameters$Coefficient <- parameters$Estimate parameters$SE <- summary_table[, 2] parameters$t <- parameters$Estimate / parameters$SE # ==== DF parameters$df_error <- tryCatch( { if (!is.null(cs$rdf)) { cs$rdf } else { attr(cs$B, "R") - 1 } }, error = function(e) { Inf } ) # ==== Conf Int parameters$CI_low <- parameters$Coefficient - stats::qt((1 + ci) / 2, df = parameters$df_error) * parameters$SE parameters$CI_high <- parameters$Coefficient + stats::qt((1 + ci) / 2, df = parameters$df_error) * parameters$SE # ==== p-value parameters$p <- summary_table[, 5] if (!is.null(p_adjust)) { parameters <- .p_adjust(parameters, p_adjust, model, verbose) } # ==== Reorder col_order <- c("Parameter", "Coefficient", "SE", "CI_low", "CI_high", "t", "df_error", "p", "Component") parameters[col_order[col_order %in% names(parameters)]] } parameters/R/methods_sarlm.R0000644000176200001440000000123514317274256015616 0ustar liggesusers#' @export p_value.Sarlm <- function(model, ...) { stat <- insight::get_statistic(model) .data_frame( Parameter = stat$Parameter, p = 2 * stats::pnorm(abs(stat$Statistic), lower.tail = FALSE) ) } #' @export ci.Sarlm <- function(x, ci = 0.95, ...) { .ci_generic(model = x, ci = ci, ...) } #' @export standard_error.Sarlm <- function(model, ...) { params <- insight::get_parameters(model) s <- summary(model) # add rho, if present if (!is.null(s$rho)) { rho <- as.numeric(s$rho.se) } else { rho <- NULL } .data_frame( Parameter = params$Parameter, SE = c(rho, as.vector(s$Coef[, 2])) ) } parameters/R/format_parameters.R0000644000176200001440000004172715073712214016471 0ustar liggesusers#' @title Parameter names formatting #' @name format_parameters #' #' @description This functions formats the names of model parameters (coefficients) #' to make them more human-readable. #' #' @param model A statistical model. #' @param brackets A character vector of length two, indicating the opening and closing brackets. #' @param ... Currently not used. #' #' @section Interpretation of Interaction Terms: #' Note that the *interpretation* of interaction terms depends on many #' characteristics of the model. The number of parameters, and overall #' performance of the model, can differ *or not* between `a * b`, #' `a : b`, and `a / b`, suggesting that sometimes interaction terms #' give different parameterizations of the same model, but other times it gives #' completely different models (depending on `a` or `b` being factors #' of covariates, included as main effects or not, etc.). Their interpretation #' depends of the full context of the model, which should not be inferred #' from the parameters table alone - rather, we recommend to use packages #' that calculate estimated marginal means or marginal effects, such as #' \CRANpkg{modelbased}, \CRANpkg{emmeans}, \CRANpkg{ggeffects}, or #' \CRANpkg{marginaleffects}. To raise awareness for this issue, you may use #' `print(...,show_formula=TRUE)` to add the model-specification to the output #' of the [`print()`][print.parameters_model] method for `model_parameters()`. #' #' @examples #' model <- lm(Sepal.Length ~ Species * Sepal.Width, data = iris) #' format_parameters(model) #' #' model <- lm(Sepal.Length ~ Petal.Length + (Species / Sepal.Width), data = iris) #' format_parameters(model) #' #' model <- lm(Sepal.Length ~ Species + poly(Sepal.Width, 2), data = iris) #' format_parameters(model) #' #' model <- lm(Sepal.Length ~ Species + poly(Sepal.Width, 2, raw = TRUE), data = iris) #' format_parameters(model) #' @return A (names) character vector with formatted parameter names. The value #' names refer to the original names of the coefficients. #' @export format_parameters <- function(model, ...) { UseMethod("format_parameters") } #' @rdname format_parameters #' @export format_parameters.default <- function(model, brackets = c("[", "]"), ...) { # check for valid input .is_model_valid(model) .safe(.format_parameter_default(model, brackets = brackets, ...)) } #' @export format_parameters.parameters_model <- function(model, ...) { if (!is.null(attributes(model)$pretty_names)) { model$Parameter <- attributes(model)$pretty_names[model$Parameter] } model } # Utilities --------------------------------------------------------------- .format_parameter_default <- function( model, effects = "fixed", brackets = c("[", "]"), ... ) { original_names <- parameter_names <- insight::find_parameters( model, effects = effects, flatten = TRUE ) # save some time, if model info is passed as argument dot_args <- list(...) if (is.null(dot_args$model_info)) { info <- insight::model_info(model, verbose = FALSE) } else { info <- dot_args$model_info } ## TODO remove is.list() when insight 0.8.3 on CRAN if (is.null(info) || !is.list(info)) { info <- list(family = "unknown", link_function = "unknown") } # quick fix, for multivariate response models, we use # info from first model only if ( insight::is_multivariate(model) && !"is_zero_inflated" %in% names(info) && !inherits(model, c("vgam", "vglm")) ) { info <- info[[1]] } # Type-specific changes types <- parameters_type(model) if (is.null(types)) { return(NULL) } types$Parameter <- .clean_parameter_names(types$Parameter, full = TRUE) # special handling hurdle- and zeroinfl-models --------------------- if (isTRUE(info$is_zero_inflated) || isTRUE(info$is_hurdle)) { parameter_names <- gsub("^(count_|zero_)", "", parameter_names) types$Parameter <- gsub("^(count_|zero_)", "", types$Parameter) } # special handling polr --------------------- if (inherits(model, c("polr", "svyolr"))) { original_names <- gsub("Intercept: ", "", original_names, fixed = TRUE) parameter_names <- gsub("Intercept: ", "", parameter_names, fixed = TRUE) } # special handling bracl --------------------- if (inherits(model, "bracl")) { parameter_names <- gsub("(.*):(.*)", "\\2", parameter_names) } # special handling DirichletRegModel --------------------- dirich_names <- NULL if (inherits(model, "DirichletRegModel")) { cf <- stats::coef(model) if (model$parametrization == "common") { pattern <- paste0("(", paste(model$varnames, collapse = "|"), ")\\.(.*)") dirich_names <- parameter_names <- gsub(pattern, "\\2", names(unlist(cf))) } else { dirich_names <- parameter_names <- gsub( "(.*)\\.(.*)\\.(.*)", "\\3", names(unlist(cf)) ) } original_names <- parameter_names if (!is.null(dirich_names)) { types$Parameter <- dirich_names } } # remove "as.factor()", "log()" etc. from parameter names parameter_names <- .clean_parameter_names(parameter_names) for (i in seq_len(nrow(types))) { name <- types$Parameter[i] if (types$Type[i] %in% c("interaction", "nested", "simple")) { # Interaction or nesting # for "serp" models, coefficients end with ":1", ":2", etc. - we need # to take this into account when splitting the name into components. if (inherits(model, "serp")) { pattern <- "(:(?![0-9]+$))" components <- unlist(strsplit(name, pattern, perl = TRUE), use.names = FALSE) } else { components <- unlist(strsplit(name, ":", fixed = TRUE), use.names = FALSE) } is_nested <- types$Type[i] == "nested" is_simple <- types$Type[i] == "simple" for (j in seq_along(components)) { if (components[j] %in% types$Parameter) { type <- types[types$Parameter == components[j], ] ## TODO check if this is ok... # for models with multiple response categories, we might have same # variable for each response, thus we have multiple rows here, # where only one row is required. if (nrow(type) > 1) { type <- type[1, ] } components[j] <- .format_parameter( components[j], variable = type$Variable, type = type$Type, level = type$Level, brackets = brackets ) } else if (components[j] %in% types$Secondary_Parameter) { type <- types[ !is.na(types$Secondary_Parameter) & types$Secondary_Parameter == components[j], ] components[j] <- .format_parameter( components[j], variable = type[1, ]$Secondary_Variable, type = type[1, ]$Secondary_Type, level = type[1, ]$Secondary_Level, brackets = brackets ) } } parameter_names[i] <- .format_interaction( components = components, type = types[i, "Type"], is_nested = is_nested, is_simple = is_simple, ... ) } else { # No interaction type <- types[i, ] parameter_names[i] <- .format_parameter( name, variable = type$Variable, type = type$Type, level = type$Level, brackets = brackets ) } } # do some final formatting, like replacing underscores or dots with whitespace. parameter_names <- gsub("(\\.|_)(?![^\\[]*\\])", " ", parameter_names, perl = TRUE) # remove double spaces parameter_names <- gsub(" ", " ", parameter_names, fixed = TRUE) # "types$Parameter" here is cleaned, i.e. patterns like "log()", "as.factor()" # etc. are removed. However, these patterns are needed in "format_table()", # code-line x$Parameter <- attributes(x)$pretty_names[x$Parameter] # when we use "types$Parameter" here, matching of pretty names does not work, # so output will be NA resp. blank fields... Thus, I think we should use # the original parameter-names here. names(parameter_names) <- original_names # types$Parameter parameter_names } #' @keywords internal .format_parameter <- function(name, variable, type, level, brackets = brackets) { # Factors if (type == "factor") { name <- .format_factor(name = name, variable = variable, brackets = brackets) } # Polynomials if (type %in% c("poly", "poly_raw")) { name <- .format_poly( name = name, variable = variable, type = type, degree = level, brackets = brackets ) } # Splines if (type == "spline") { name <- .format_poly( name = name, variable = variable, type = type, degree = level, brackets = brackets ) } # log-transformation if (type == "logarithm") { name <- .format_log( name = name, variable = variable, type = type, brackets = brackets ) } # exp-transformation if (type == "exponentiation") { name <- .format_log( name = name, variable = variable, type = type, brackets = brackets ) } # log-transformation if (type == "squareroot") { name <- .format_log( name = name, variable = variable, type = type, brackets = brackets ) } # As Is if (type == "asis") { name <- variable } # Smooth if (type == "smooth") { name <- gsub("^smooth_(.*)\\[(.*)\\]", "\\2", name) name <- gsub("s(", "Smooth term (", name, fixed = TRUE) } # Ordered if (type == "ordered") { name <- paste(variable, level) } name } #' @keywords internal .format_interaction <- function( components, type, is_nested = FALSE, is_simple = FALSE, interaction_mark = NULL, ... ) { # sep <- ifelse(is_nested | is_simple, " : ", " * ") # sep <- ifelse(is_nested, " / ", " * ") # sep <- ifelse(is_simple, " : ", ifelse(is_nested, " / ", " * ")) if (is.null(interaction_mark)) { if (.unicode_symbols()) { sep <- "\u00D7" } else { sep <- "*" } } else { sep <- interaction_mark } # either use argument, or override with options sep <- paste0(" ", getOption("parameters_interaction", insight::trim_ws(sep)), " ") if (length(components) > 2) { if (type == "interaction") { components <- paste0( "(", paste(utils::head(components, -1), collapse = sep), ")", sep, utils::tail(components, 1) ) } else { components <- paste(components, collapse = sep) } } else { components <- paste(components, collapse = sep) } components } # format classes ----------------------------- #' @keywords internal .format_factor <- function(name, variable, brackets = c("[", "]")) { level <- sub(variable, "", name, fixed = TRUE) # special handling for "cut()" pattern_cut_right <- "^\\((.*),(.*)\\]$" pattern_cut_left <- "^\\[(.*),(.*)\\)$" if (all(grepl(pattern_cut_right, level))) { lower_bounds <- gsub(pattern_cut_right, "\\1", level) upper_bounds <- gsub(pattern_cut_right, "\\2", level) level <- paste0(">", as.numeric(lower_bounds), "-", upper_bounds) } else if (all(grepl(pattern_cut_left, level))) { lower_bounds <- gsub(pattern_cut_left, "\\1", level) upper_bounds <- gsub(pattern_cut_left, "\\2", level) level <- paste0(lower_bounds, "-<", as.numeric(upper_bounds)) } paste0(variable, " ", brackets[1], level, brackets[2]) } #' @keywords internal .format_poly <- function(name, variable, type, degree, brackets = c("[", "]")) { paste0( variable, " ", brackets[1], format_order(as.numeric(degree), textual = FALSE), " degree", brackets[2] ) } #' @keywords internal .format_log <- function(name, variable, type, brackets = c("[", "]")) { paste0(variable, " ", brackets[1], gsub("(.*)\\((.*)\\)", "\\1", name), brackets[2]) } #' @keywords internal .format_ordered <- function(degree, brackets = c("[", "]")) { switch( degree, .L = paste0(brackets[1], "linear", brackets[2]), .Q = paste0(brackets[1], "quadratic", brackets[2]), .C = paste0(brackets[1], "cubic", brackets[2]), paste0( brackets[1], parameters::format_order( as.numeric(gsub("^", "", degree, fixed = TRUE)), textual = FALSE ), " degree", brackets[2] ) ) } # replace pretty names with value labels, when present --------------- .format_value_labels <- function(params, model = NULL) { pretty_labels <- NULL if (is.null(model)) { model <- .get_object(params) } # validation check if (!is.null(model) && insight::is_regression_model(model) && !is.data.frame(model)) { # get data, but exclude response - we have no need for that label mf <- insight::get_data(model, source = "mf", verbose = FALSE) # sanity check - any labels? has_labels <- vapply( mf, function(i) !is.null(attr(i, "labels", exact = TRUE)), logical(1) ) # if we don't have labels, we try to get data from environment if (!any(has_labels)) { mf <- insight::get_data(model, source = "environment", verbose = FALSE) } resp <- insight::find_response(model, combine = FALSE) mf <- mf[, setdiff(colnames(mf), resp), drop = FALSE] # return variable labels, and for factors, add labels for each level lbs <- lapply(colnames(mf), function(i) { vec <- mf[[i]] if (is.factor(vec)) { variable_label <- attr(vec, "label", exact = TRUE) value_labels <- names(attr(vec, "labels", exact = TRUE)) if (is.null(variable_label)) { variable_label <- i } if (is.null(value_labels)) { value_labels <- levels(vec) } out <- paste0(variable_label, " [", value_labels, "]") } else { out <- attr(vec, "label", exact = TRUE) } if (is.null(out)) { i } else { out } }) # coefficient names (not labels) preds <- lapply(colnames(mf), function(i) { if (is.character(mf[[i]])) { mf[[i]] <- as.factor(mf[[i]]) } if (is.factor(mf[[i]])) { i <- paste0(i, levels(mf[[i]])) } i }) # name elements names(lbs) <- names(preds) <- colnames(mf) pretty_labels <- .safe(stats::setNames( unlist(lbs, use.names = FALSE), unlist(preds, use.names = FALSE) )) # retrieve pretty names attribute pn <- attributes(params)$pretty_names # replace former pretty names with labels, if we have any labels # (else, default pretty names are returned) if (!is.null(pretty_labels)) { # for models from pscl, we have "count_" and "zero_" prefixes, which # we need to add to the "pretty_labels" names, so that we can match # them with the parameters if (inherits(model, c("zeroinfl", "hurdle"))) { pretty_labels <- c( stats::setNames(pretty_labels, paste0("count_", names(pretty_labels))), stats::setNames(pretty_labels, paste0("zero_", names(pretty_labels))) ) } # check if we have any interactions, and if so, create combined labels interactions <- pn[grepl(":", names(pn), fixed = TRUE)] if (length(interactions)) { labs <- NULL for (i in names(interactions)) { # extract single coefficient names from interaction term out <- unlist(strsplit(i, ":", fixed = TRUE)) # combine labels labs <- c( labs, paste(sapply(out, function(l) pretty_labels[l]), collapse = " * ") ) } # add interaction terms to labels string names(labs) <- names(interactions) pretty_labels <- c(pretty_labels, labs) } # make sure "invalid" labels are ignored common_labels <- intersect(names(pretty_labels), names(pn)) pn[common_labels] <- pretty_labels[common_labels] } pretty_labels <- pn } # missing labels return original parameter name (e.g., variance components in mixed models) out <- stats::setNames(params$Parameter, params$Parameter) pretty_labels <- pretty_labels[names(pretty_labels) %in% params$Parameter] out[match(names(pretty_labels), params$Parameter)] <- pretty_labels out } # helper ------------------- .unicode_symbols <- function() { # symbols only work on windows from R 4.2 and higher win_os <- tryCatch( { si <- Sys.info() if (is.null(si["sysname"])) { FALSE } else { si["sysname"] == "Windows" || startsWith(R.version$os, "mingw") } }, error = function(e) { TRUE } ) l10n_info()[["UTF-8"]] && ((win_os && getRversion() >= "4.2") || (!win_os && getRversion() >= "4.0")) } parameters/R/methods_aod.R0000644000176200001440000000666214761570351015252 0ustar liggesusers# classes: .glimML ## TODO add ci_method later? #################### .glimML ------ #' @title Parameters from special models #' @name model_parameters.glimML #' #' @description #' Parameters from special regression models not listed under one of the #' previous categories yet. #' #' @param component Model component for which parameters should be shown. May be #' one of `"conditional"`, `"precision"` (e.g. **betareg**), `"scale"` (e.g. #' **ordinal**), `"extra"` (e.g. **glmx**), `"marginal"` (e.g. **mfx**), #' `"conditional"` or `"full"` (for `MuMIn::model.avg()`) or `"all"`. See section #' _Model components_ for an overview of possible options for `component`. #' @inheritParams model_parameters.default #' @inheritParams model_parameters.brmsfit #' @inheritParams simulate_model #' #' @seealso [insight::standardize_names()] to rename columns into a consistent, #' standardized naming scheme. #' #' @inheritSection model_parameters.zcpglm Model components #' #' @examples #' library(parameters) #' if (require("brglm2", quietly = TRUE)) { #' data("stemcell") #' model <- bracl( #' research ~ as.numeric(religion) + gender, #' weights = frequency, #' data = stemcell, #' type = "ML" #' ) #' model_parameters(model) #' } #' @return A data frame of indices related to the model's parameters. #' @export model_parameters.glimML <- function(model, ci = 0.95, bootstrap = FALSE, iterations = 1000, component = "conditional", standardize = NULL, exponentiate = FALSE, p_adjust = NULL, include_info = getOption("parameters_info", FALSE), keep = NULL, drop = NULL, verbose = TRUE, ...) { component <- insight::validate_argument( component, c("conditional", "random", "dispersion", "all") ) if (component == "all") { merge_by <- c("Parameter", "Component") } else { merge_by <- "Parameter" } # dispersion is just an alias... if (component == "dispersion") { component <- "random" } out <- .model_parameters_generic( model = model, ci = ci, component = component, bootstrap = bootstrap, iterations = iterations, merge_by = "Parameter", standardize = standardize, exponentiate = exponentiate, p_adjust = p_adjust, keep_parameters = keep, drop_parameters = drop, include_info = include_info, verbose = verbose, ... ) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(model)) out } #' @export standard_error.glimML <- function(model, ...) { insight::check_if_installed("aod") s <- methods::slot(aod::summary(model), "Coef") se <- s[, 2] .data_frame( Parameter = .remove_backticks_from_string(rownames(s)), SE = as.vector(se) ) } #' @export p_value.glimML <- function(model, ...) { insight::check_if_installed("aod") s <- methods::slot(aod::summary(model), "Coef") p <- s[, 4] .data_frame( Parameter = .remove_backticks_from_string(rownames(s)), p = as.vector(p) ) } parameters/R/print_md.R0000644000176200001440000002313515053035103014554 0ustar liggesusers# normal print ---------------------------- #' @rdname print.parameters_model #' @export print_md.parameters_model <- function(x, pretty_names = TRUE, split_components = TRUE, select = NULL, caption = NULL, subtitle = NULL, footer = NULL, align = NULL, digits = 2, ci_digits = digits, p_digits = 3, footer_digits = 3, ci_brackets = c("(", ")"), show_sigma = FALSE, show_formula = FALSE, zap_small = FALSE, groups = NULL, include_reference = FALSE, verbose = TRUE, ...) { # check if user supplied digits attributes if (missing(digits)) { digits <- .additional_arguments(x, "digits", digits) } if (missing(ci_digits)) { ci_digits <- .additional_arguments(x, "ci_digits", digits) } if (missing(p_digits)) { p_digits <- .additional_arguments(x, "p_digits", p_digits) } if (missing(footer_digits)) { footer_digits <- .additional_arguments(x, "footer_digits", footer_digits) } # check options --------------- # check if pretty names should be replaced by value labels # (if we have labelled data) if (isTRUE(getOption("parameters_labels", FALSE)) || identical(pretty_names, "labels")) { attr(x, "pretty_names") <- attr(x, "pretty_labels", exact = TRUE) pretty_names <- TRUE } # select which columns to print if (is.null(select)) { select <- getOption("parameters_select") } # table caption table_caption <- .print_caption(x, caption, format = "markdown") # main table formatted_table <- format( x = x, pretty_names = pretty_names, split_components = split_components, select = select, digits = digits, ci_digits = ci_digits, p_digits = p_digits, zap_small = zap_small, ci_width = NULL, ci_brackets = ci_brackets, format = "markdown", groups = groups, include_reference = include_reference, ... ) # replace brackets by parenthesis if (!is.null(ci_brackets) && "Parameter" %in% colnames(formatted_table)) { formatted_table$Parameter <- gsub("[", ci_brackets[1], formatted_table$Parameter, fixed = TRUE) formatted_table$Parameter <- gsub("]", ci_brackets[2], formatted_table$Parameter, fixed = TRUE) } # footer footer_stats <- .print_footer( x, digits = footer_digits, show_sigma = show_sigma, show_formula = show_formula, format = "markdown" ) # check if footer should be printed at all. can be FALSE, or "" to suppress footer if (isFALSE(footer)) { footer <- "" } if (!identical(footer, "")) { if (is.null(footer)) { footer <- footer_stats } else { footer <- paste0("\n", footer, "\n", footer_stats) } } insight::export_table( formatted_table, format = "markdown", caption = table_caption, subtitle = subtitle, footer = footer, align = "firstleft", ... ) } #' @export print_md.parameters_brms_meta <- print_md.parameters_model #' @export print_md.parameters_simulate <- print_md.parameters_model # compare parameters ------------------------- #' @rdname print.compare_parameters #' @export print_md.compare_parameters <- function(x, digits = 2, ci_digits = digits, p_digits = 3, caption = NULL, subtitle = NULL, footer = NULL, select = NULL, split_components = TRUE, ci_brackets = c("(", ")"), zap_small = FALSE, groups = NULL, ...) { # check if user supplied digits attributes if (missing(digits)) { digits <- .additional_arguments(x, "digits", digits) } if (missing(ci_digits)) { ci_digits <- .additional_arguments(x, "ci_digits", digits) } if (missing(p_digits)) { p_digits <- .additional_arguments(x, "p_digits", p_digits) } # get attributes if (missing(select) || is.null(select)) { select <- attributes(x)$output_style } if (missing(groups)) { groups <- attributes(x)$parameter_groups } formatted_table <- format( x, select = select, split_components = split_components, digits = digits, ci_digits = ci_digits, p_digits = p_digits, ci_width = NULL, ci_brackets = ci_brackets, format = "markdown", zap_small = zap_small, groups = groups ) # replace brackets by parenthesis if (!is.null(ci_brackets) && "Parameter" %in% colnames(formatted_table)) { formatted_table$Parameter <- gsub("[", ci_brackets[1], formatted_table$Parameter, fixed = TRUE) formatted_table$Parameter <- gsub("]", ci_brackets[2], formatted_table$Parameter, fixed = TRUE) } insight::export_table( formatted_table, format = "markdown", caption = caption, subtitle = subtitle, footer = footer ) } # SEM print ---------------------------- #' @export print_md.parameters_sem <- function(x, digits = 2, ci_digits = digits, p_digits = 3, ci_brackets = c("(", ")"), ...) { # check if user supplied digits attributes # check if user supplied digits attributes if (missing(digits)) { digits <- .additional_arguments(x, "digits", digits) } if (missing(ci_digits)) { ci_digits <- .additional_arguments(x, "ci_digits", digits) } if (missing(p_digits)) { p_digits <- .additional_arguments(x, "p_digits", p_digits) } formatted_table <- format( x = x, digits = digits, ci_digits, p_digits = p_digits, format = "markdown", ci_width = NULL, ci_brackets = ci_brackets, ... ) insight::export_table(formatted_table, format = "markdown", align = "firstleft", ...) } # PCA / EFA / CFA ---------------------------- #' @export print_md.parameters_efa_summary <- function(x, digits = 3, ...) { table_caption <- "(Explained) Variance of Components" if ("Parameter" %in% names(x)) { x$Parameter <- c("Eigenvalues", "Variance Explained", "Variance Explained (Cumulative)", "Variance Explained (Proportion)") # nolint } else if ("Component" %in% names(x)) { names(x) <- c("Component", "Eigenvalues", "Variance Explained", "Variance Explained (Cumulative)", "Variance Explained (Proportion)") # nolint } # we may have factor correlations fc <- attributes(x)$factor_correlations # if we have factor correlations, we need to add them to the table if (!is.null(fc)) { x <- list(x, fc) table_caption <- list( table_caption, "Factor Correlations" ) } insight::export_table(x, digits = digits, format = "markdown", caption = table_caption, align = "firstleft") } #' @export print_md.parameters_pca_summary <- print_md.parameters_efa_summary #' @export print_md.parameters_omega_summary <- function(x, ...) { out <- .print_omega_summary(x, format = "markdown") insight::export_table(out$tables, caption = out$captions, format = "markdown", ...) } #' @export print_md.parameters_efa <- function(x, digits = 2, sort = FALSE, threshold = NULL, labels = NULL, ...) { # extract attributes if (is.null(threshold)) { threshold <- attributes(x)$threshold } .print_parameters_cfa_efa( x, threshold = threshold, sort = sort, format = "markdown", digits = digits, labels = labels, ... ) } #' @export print_md.parameters_pca <- print_md.parameters_efa #' @export print_md.parameters_omega <- print_md.parameters_efa # Equivalence test ---------------------------- #' @export print_md.equivalence_test_lm <- function( x, digits = 2, ci_brackets = c("(", ")"), zap_small = FALSE, ... ) { .print_equivalence_test_lm( x, digits = digits, ci_brackets = ci_brackets, zap_small = zap_small, format = "markdown", ... ) } # p_function ---------------------------- #' @export print_md.parameters_p_function <- function(x, digits = 2, ci_width = "auto", ci_brackets = c("(", ")"), pretty_names = TRUE, ...) { .print_p_function(x, digits, ci_width, ci_brackets, pretty_names, format = "markdown", ...) } parameters/R/extract_random_variances.R0000644000176200001440000006747515057525051020036 0ustar liggesusers.extract_random_variances <- function(model, ...) { UseMethod(".extract_random_variances") } # default method ------------------- .extract_random_variances.default <- function(model, ci = 0.95, effects = "random", component = "conditional", ci_method = NULL, ci_random = NULL, verbose = FALSE, ...) { out <- suppressWarnings( .extract_random_variances_helper( model, ci = ci, effects = effects, component = component, ci_method = ci_method, ci_random = ci_random, verbose = verbose, ... ) ) # check for errors if (is.null(out) && isTRUE(verbose)) { insight::format_warning("Something went wrong when calculating random effects parameters. Only showing model's fixed effects now. You may use `effects=\"fixed\"` to speed up the call to `model_parameters()`.") # nolint } out } # glmmTMB ------------------- .extract_random_variances.glmmTMB <- function(model, ci = 0.95, effects = "random", component = "all", ci_method = NULL, ci_random = NULL, verbose = FALSE, ...) { component <- insight::validate_argument( component, c("all", "conditional", "zero_inflated", "zi", "dispersion") ) out <- suppressWarnings( .extract_random_variances_helper( model, ci = ci, effects = effects, component = "conditional", ci_method = ci_method, ci_random = ci_random, verbose = verbose, ... ) ) # check for errors if (is.null(out)) { if (isTRUE(verbose)) { insight::format_warning("Something went wrong when calculating random effects parameters. Only showing model's fixed effects now. You may use `effects=\"fixed\"` to speed up the call to `model_parameters()`.") # nolint } return(NULL) } out$Component <- "conditional" if (insight::model_info(model, verbose = FALSE)$is_zero_inflated && !is.null(insight::find_random(model)$zero_inflated_random)) { # nolint zi_var <- suppressWarnings( .extract_random_variances_helper( model, ci = ci, effects = effects, component = "zi", ci_method = ci_method, ci_random = ci_random, verbose = FALSE, ... ) ) # bind if any zi-components could be extracted if (!is.null(zi_var)) { zi_var$Component <- "zero_inflated" out <- rbind(out, zi_var) } } # filter if (component != "all") { if (component == "zi") { component <- "zero_inflated" } out <- out[out$Component == component, ] } out } # GLMMadpative ------------------- .extract_random_variances.MixMod <- .extract_random_variances.glmmTMB # svy2lme ------------------------ .extract_random_variances.svy2lme <- function(model, ci = 0.95, effects = "random", ...) { s <- sqrt(as.vector(model$s2)) stdev <- matrix(s * sqrt(diag(model$L)), ncol = 1) vcnames <- c(paste0("SD (", model$znames, ")"), "SD (Observations)") grp_names <- names(model$znames) if (is.null(grp_names)) { grp_names <- model$znames } out <- data.frame( Parameter = vcnames, Level = NA, Coefficient = c(as.vector(stdev), s), SE = NA, CI_low = NA, CI_high = NA, t = NA, df_error = NA, p = NA, Effects = "random", Group = c(grp_names, "Residual"), stringsAsFactors = FALSE ) # fix intercept names out$Parameter <- gsub("(Intercept)", "Intercept", out$Parameter, fixed = TRUE) if (effects == "random") { out[c("t", "df_error", "p")] <- NULL } rownames(out) <- NULL out } # workhorse ------------------------ .extract_random_variances_helper <- function(model, ci = 0.95, effects = "random", component = "conditional", ci_method = NULL, ci_random = NULL, verbose = FALSE, ...) { # special handling for lme objects if (inherits(model, "lme")) { insight::check_if_installed("lme4") varcorr <- lme4::VarCorr(model) class(varcorr) <- "VarCorr.lme" } else { varcorr <- insight::get_mixed_info(model, component = component, verbose = FALSE)$vc } # we have own data frame methods for VarCorr objects from lme and coxme, so # only change class attribute for other models if (!inherits(model, c("lme", "coxme"))) { class(varcorr) <- "VarCorr.merMod" } # return varcorr matrix re_data <- as.data.frame(varcorr, order = "lower.tri") # extract parameters from SD and COR separately, for sorting re_sd_intercept <- re_data$var1 == "(Intercept)" & is.na(re_data$var2) & re_data$grp != "Residual" re_sd_slope <- re_data$var1 != "(Intercept)" & is.na(re_data$var2) & re_data$grp != "Residual" re_cor_intercept <- re_data$var1 == "(Intercept)" & !is.na(re_data$var2) & re_data$grp != "Residual" re_cor_slope <- re_data$var1 != "(Intercept)" & !is.na(re_data$var2) & re_data$grp != "Residual" re_sigma <- re_data$grp == "Residual" # merge to sorted data frame out <- rbind( re_data[re_sd_intercept, ], re_data[re_sd_slope, ], re_data[re_cor_intercept, ], re_data[re_cor_slope, ], re_data[re_sigma, ] ) out$Parameter <- NA # rename SD sds <- !is.na(out$var1) & is.na(out$var2) if (any(sds)) { out$Parameter[sds] <- paste0("SD (", out$var1[sds], ")") } # rename correlations corrs <- !is.na(out$var2) if (any(corrs)) { out$Parameter[corrs] <- paste0("Cor (", out$var1[corrs], "~", out$var2[corrs], ")") } # rename sigma sigma_res <- out$grp == "Residual" if (any(sigma_res)) { out$Parameter[sigma_res] <- "SD (Observations)" } # rename columns out <- datawizard::data_rename( out, select = c("grp", "sdcor"), replacement = c("Group", "Coefficient") ) # fix names for uncorrelated slope-intercepts pattern <- paste0("(", paste(insight::find_random(model, flatten = TRUE), collapse = "|"), ")\\.\\d+$") out$Group <- gsub(pattern, "\\1", out$Group) # remove non-used columns out$var1 <- NULL out$var2 <- NULL out$grp <- NULL out$vcov <- NULL out$sdcor <- NULL # fix intercept names out$Parameter <- gsub("(Intercept)", "Intercept", out$Parameter, fixed = TRUE) stat_column <- gsub("-statistic", "", insight::find_statistic(model), fixed = TRUE) # to match rbind out[[stat_column]] <- NA out$SE <- NA out$df_error <- NA out$p <- NA out$Level <- NA out$CI <- NA out$Effects <- "random" if (length(ci) == 1) { ci_cols <- c("CI_low", "CI_high") } else { ci_cols <- NULL for (i in ci) { ci_low <- paste0("CI_low_", i) ci_high <- paste0("CI_high_", i) ci_cols <- c(ci_cols, ci_low, ci_high) } } out[ci_cols] <- NA # variances to SD (sqrt), except correlations and Sigma corr_param <- startsWith(out$Parameter, "Cor ") sigma_param <- out$Parameter == "SD (Observations)" # add confidence intervals? if (!is.null(ci) && !all(is.na(ci)) && length(ci) == 1 && !isFALSE(ci_random)) { out <- .random_sd_ci(model, out, ci_method, ci, ci_random, corr_param, sigma_param, component, verbose = verbose) } out <- out[c("Parameter", "Level", "Coefficient", "SE", ci_cols, stat_column, "df_error", "p", "Effects", "Group")] if (effects == "random") { out[c(stat_column, "df_error", "p", "CI")] <- NULL } rownames(out) <- NULL out } #' @export as.data.frame.VarCorr.lme <- function(x, row.names = NULL, optional = FALSE, ...) { # retrieve RE SD and COR stddevs <- sapply(x[, "StdDev"], as.numeric) if ("Corr" %in% colnames(x)) { corrs <- suppressWarnings(sapply(x[, "Corr"], as.numeric)) } else { corrs <- NULL } grps <- endsWith(names(stddevs), " =") # for multiple grouping factors, split at each group if (any(grps)) { from <- which(grps) to <- c(which(grps) - 1, length(grps))[-1] out_sd <- do.call(rbind, lapply(seq_along(from), function(i) { values <- stddevs[from[i]:to[i]] .data_frame( grp = gsub("(.*) =$", "\\1", names(values[1])), var1 = names(values[-1]), var2 = NA_character_, sdcor = unname(values[-1]) ) })) if (is.null(corrs)) { out_cor <- NULL } else { out_cor <- do.call(rbind, lapply(seq_along(from), function(i) { values <- corrs[from[i]:to[i]] .data_frame( grp = gsub("(.*) =$", "\\1", names(values[1])), var1 = "(Intercept)", var2 = names(values[-1]), sdcor = unname(values[-1]) ) })) } } else { out_sd <- .data_frame( grp = gsub("(.*) =(.*)", "\\1", attributes(x)$title), var1 = names(stddevs), var2 = NA_character_, sdcor = unname(stddevs) ) if (is.null(corrs)) { out_cor <- NULL } else { out_cor <- .data_frame( grp = gsub("(.*) =(.*)", "\\1", attributes(x)$title), var1 = "(Intercept)", var2 = names(corrs), sdcor = unname(corrs) ) } } out_sd$grp[out_sd$var1 == "Residual"] <- "Residual" out_sd$var1[out_sd$grp == "Residual"] <- NA_character_ out_sd$var2[out_sd$grp == "Residual"] <- NA_character_ out_cor <- out_cor[!is.na(out_cor$sdcor), ] rbind(out_sd, out_cor) } #' @export as.data.frame.VarCorr.coxme <- function(x, row.names = NULL, optional = FALSE, ...) { # extract variances from VarCorr object variances <- lapply(x, diag) # create data frame, similar to as.data.frame.VarCorr.merMod out <- do.call(rbind, lapply(names(variances), function(i) { # information on variances d <- data.frame( grp = i, var1 = names(variances[[i]]), var2 = NA_character_, vcov = as.numeric(variances[[i]]), sdcor = sqrt(as.numeric(variances[[i]])), stringsAsFactors = FALSE ) # add correlations, if any if (nrow(x[[i]]) > 1) { d <- rbind(d, data.frame( grp = i, var1 = "(Intercept)", var2 = rownames(x[[i]])[2], vcov = NA_real_, sdcor = as.numeric(x[[i]][2, 1]), stringsAsFactors = FALSE )) } d })) # bind residual variance rbind(out, data.frame( grp = "Residual", var1 = NA_character_, var2 = NA_character_, vcov = NA_real_, sdcor = NA_real_, stringsAsFactors = FALSE )) } # extract CI for random SD ------------------------ .random_sd_ci <- function(model, out, ci_method, ci, ci_random, corr_param, sigma_param, component = NULL, verbose = FALSE) { ## TODO needs to be removed once MCM > 0.1.5 is on CRAN if (startsWith(insight::safe_deparse(insight::get_call(model)), "mcm_lmer")) { return(out) } # heuristic to check whether CIs for random effects should be computed or # not. If `ci_random=NULL`, we check model complexity and decide whether to # go on or not. For models with larger samples sized or more complex random # effects, this might be quite time consuming. if (is.null(ci_random)) { # check sample size, don't compute by default when larger than 1000 n_obs <- insight::n_obs(model) if (n_obs >= 1000) { return(out) } # check complexity of random effects re <- insight::find_random(model, flatten = TRUE) rs <- insight::find_random_slopes(model) # quit if if random slopes and larger sample size or more than 1 grouping factor if (!is.null(rs) && (n_obs >= 500 || length(re) > 1)) { return(out) } # quit if if than two grouping factors if (length(re) > 2) { return(out) } } if (inherits(model, c("merMod", "glmerMod", "lmerMod"))) { # lme4 - boot and profile if (!is.null(ci_method) && ci_method %in% c("profile", "boot")) { out <- tryCatch( { var_ci <- as.data.frame(suppressWarnings(stats::confint( model, parm = "theta_", oldNames = FALSE, method = ci_method, level = ci ))) colnames(var_ci) <- c("CI_low", "CI_high") rn <- row.names(var_ci) rn <- gsub("sd_(.*)(\\|)(.*)", "\\1: \\3", rn) rn <- gsub("|", ":", rn, fixed = TRUE) rn <- gsub("[\\(\\)]", "", rn) rn <- gsub("cor_(.*)\\.(.*)", "cor \\2", rn) var_ci_corr_param <- startsWith(rn, "cor ") var_ci_sigma_param <- rn == "sigma" out$CI_low[!corr_param & !sigma_param] <- var_ci$CI_low[!var_ci_corr_param & !var_ci_sigma_param] out$CI_high[!corr_param & !sigma_param] <- var_ci$CI_high[!var_ci_corr_param & !var_ci_sigma_param] if (any(sigma_param) && any(var_ci_sigma_param)) { out$CI_low[sigma_param] <- var_ci$CI_low[var_ci_sigma_param] out$CI_high[sigma_param] <- var_ci$CI_high[var_ci_sigma_param] } if (any(corr_param) && any(var_ci_corr_param)) { out$CI_low[corr_param] <- var_ci$CI_low[var_ci_corr_param] out$CI_high[corr_param] <- var_ci$CI_high[var_ci_corr_param] } out }, error = function(e) { if (isTRUE(verbose)) { insight::format_alert( "Cannot compute profiled standard errors and confidence intervals for random effects parameters.", "Your model may suffer from singularity (see '?lme4::isSingular' and '?performance::check_singularity').", "You may try to impose a prior on the random effects parameters, e.g. using the {.pkg glmmTMB} package." ) } out } ) } else if (!is.null(ci_method)) { # lme4 - wald / normal CI merDeriv_loaded <- isNamespaceLoaded("merDeriv") # detach on exit on.exit( if (!merDeriv_loaded) { .unregister_vcov() }, add = TRUE, after = FALSE ) # Wald based CIs # see https://stat.ethz.ch/pipermail/r-sig-mixed-models/2022q1/029985.html if (all(suppressMessages(insight::check_if_installed(c("merDeriv", "lme4"), quietly = TRUE)))) { # this may fail, so wrap in try-catch out <- tryCatch( { # vcov from full model. the parameters from vcov have a different # order, so we need to restore the "original" order of random effect # parameters using regex to match the naming patterns (of the column # names from the vcov) vv <- stats::vcov(model, full = TRUE, ranpar = "sd") # only keep random effect variances cov_columns <- grepl("(^cov_|residual)", colnames(vv)) vv <- vv[cov_columns, cov_columns, drop = FALSE] # iterate random effect variables re_groups <- setdiff(unique(out$Group), "Residual") # create data frame with group and parameter names and SE var_ci <- do.call(rbind, lapply(re_groups, function(i) { pattern <- paste0("^cov_", i, "\\.(.*)") re_group_columns <- grepl(pattern, colnames(vv)) vv_sub <- as.matrix(vv[re_group_columns, re_group_columns, drop = FALSE]) cn <- gsub(pattern, "\\1", colnames(vv_sub)) .data_frame(Group = i, Parameter = cn, SE = sqrt(diag(vv_sub))) })) # add residual variance res_column <- which(colnames(vv) == "residual") if (length(res_column)) { var_ci <- rbind( var_ci, .data_frame( Group = "Residual", Parameter = "SD (Observations)", SE = sqrt(vv[res_column, res_column, drop = TRUE]) ) ) } # renaming var_ci$Parameter[var_ci$Parameter == "(Intercept)"] <- "SD (Intercept)" # correlations var_ci_corr_param <- grepl("(.*)\\.\\(Intercept\\)", var_ci$Parameter) if (any(var_ci_corr_param)) { rnd_slope_terms <- gsub("(.*)\\.\\(Intercept\\)", "\\1", var_ci$Parameter[var_ci_corr_param]) var_ci$Parameter[var_ci_corr_param] <- paste0("Cor (Intercept~", rnd_slope_terms, ")") } # correlations w/o intercept? usually only for factors # or: correlation among slopes. we need to recover the (categorical) # term names from our prepared data frame, then match vcov-names rnd_slope_corr <- grepl("^Cor \\((?!Intercept~)", out$Parameter, perl = TRUE) if (any(rnd_slope_corr)) { for (gr in setdiff(unique(out$Group), "Residual")) { rnd_slope_corr_grp <- rnd_slope_corr & out$Group == gr dummy <- gsub("Cor \\((.*)~(.*)\\)", "\\2.\\1", out$Parameter[rnd_slope_corr_grp]) var_ci$Parameter[var_ci$Group == gr][match(dummy, var_ci$Parameter[var_ci$Group == gr])] <- out$Parameter[rnd_slope_corr_grp] # nolint } } # remaining var_ci_others <- !grepl("^(Cor|SD) (.*)", var_ci$Parameter) var_ci$Parameter[var_ci_others] <- gsub("(.*)", "SD (\\1)", var_ci$Parameter[var_ci_others]) # merge with random effect coefficients out$.sort_id <- seq_len(nrow(out)) tmp <- merge( datawizard::data_remove(out, "SE", verbose = FALSE), var_ci, all.x = TRUE, sort = FALSE ) tmp <- tmp[order(tmp$.sort_id), ] out$SE <- tmp$SE out$.sort_id <- NULL # ensure correlation CI are within -1/1 bounds var_ci_corr_param <- startsWith(out$Parameter, "Cor ") if (any(var_ci_corr_param)) { coefs <- out$Coefficient[var_ci_corr_param] delta_se <- out$SE[var_ci_corr_param] / (1 - coefs^2) out$CI_low[var_ci_corr_param] <- tanh(atanh(coefs) - stats::qnorm(0.975) * delta_se) out$CI_high[var_ci_corr_param] <- tanh(atanh(coefs) + stats::qnorm(0.975) * delta_se) } # Wald CI, based on delta-method. # SD is chi square distributed. So it has a long tail. CIs should # therefore be asymmetrical. log(SD) is normally distributed. # Also, if the SD is small, then the CI might go negative coefs <- out$Coefficient[!var_ci_corr_param] delta_se <- out$SE[!var_ci_corr_param] / coefs out$CI_low[!var_ci_corr_param] <- exp(log(coefs) - stats::qnorm(0.975) * delta_se) out$CI_high[!var_ci_corr_param] <- exp(log(coefs) + stats::qnorm(0.975) * delta_se) # warn if singular fit if (isTRUE(verbose) && insight::check_if_installed("performance", quietly = TRUE) && isTRUE(performance::check_singularity(model))) { # nolint insight::format_alert( "Your model may suffer from singularity (see see `?lme4::isSingular` and `?performance::check_singularity`).", # nolint "Some of the standard errors and confidence intervals of the random effects parameters are probably not meaningful!", # nolint "You may try to impose a prior on the random effects parameters, e.g. using the {.pkg glmmTMB} package." # nolint ) } out }, error = function(e) { if (isTRUE(verbose)) { if (grepl("nAGQ of at least 1 is required", e$message, fixed = TRUE)) { insight::format_alert("Argument `nAGQ` needs to be larger than 0 to compute confidence intervals for random effect parameters.") # nolint } if (grepl("Multiple cluster variables detected.", e$message, fixed = TRUE)) { insight::format_alert("Confidence intervals for random effect parameters are currently not supported for multiple grouping variables.") # nolint } if (grepl("exactly singular", e$message, fixed = TRUE) || grepl("computationally singular", e$message, fixed = TRUE) || grepl("Exact singular", e$message, fixed = TRUE)) { insight::format_alert( "Cannot compute standard errors and confidence intervals for random effects parameters.", "Your model may suffer from singularity (see see `?lme4::isSingular` and `?performance::check_singularity`).", # nolint "You may try to impose a prior on the random effects parameters, e.g. using the {.pkg glmmTMB} package." # nolint ) } } out } ) } else if (isTRUE(verbose)) { insight::format_alert("Package 'merDeriv' needs to be installed to compute confidence intervals for random effect parameters.") # nolint } } } else if (inherits(model, "glmmTMB")) { # glmmTMB random-effects-CI ## TODO "profile" seems to be less stable, so only wald? out <- tryCatch( { var_ci <- rbind( as.data.frame(suppressWarnings(stats::confint(model, parm = "theta_", method = "wald", level = ci))), as.data.frame(suppressWarnings(stats::confint(model, parm = "sigma", method = "wald", level = ci))) ) colnames(var_ci) <- c("CI_low", "CI_high", "not_used") var_ci$Component <- "conditional" var_ci$Parameter <- row.names(var_ci) if (utils::packageVersion("glmmTMB") > "1.1.3") { var_ci$Component[startsWith(var_ci$Parameter, "zi.")] <- "zi" # remove cond/zi prefix var_ci$Parameter <- gsub("^(cond\\.|zi\\.)(.*)", "\\2", var_ci$Parameter) # copy RE group var_ci$Group <- gsub("(.*)\\|(.*)$", "\\2", var_ci$Parameter) var_ci$Parameter <- gsub("(.*)\\|(.*)$", "\\1", var_ci$Parameter) var_ci$Group[rownames(var_ci) == "sigma"] <- "Residual" } else { # regex-pattern to find conditional and ZI components group_factor <- insight::find_random(model, flatten = TRUE) group_factor2 <- paste0("(", paste(group_factor, collapse = "|"), ")") pattern <- paste0("^(zi\\.|", group_factor2, "\\.zi\\.)") zi_rows <- grepl(pattern, var_ci$Parameter) if (any(zi_rows)) { var_ci$Component[zi_rows] <- "zi" } # add Group var_ci$Group <- NA if (length(group_factor) > 1) { var_ci$Group[var_ci$Component == "conditional"] <- gsub(paste0("^", group_factor2, "\\.cond\\.(.*)"), "\\1", var_ci$Parameter[var_ci$Component == "conditional"]) # nolint var_ci$Group[var_ci$Component == "zi"] <- gsub(paste0("^", group_factor2, "\\.zi\\.(.*)"), "\\1", var_ci$Parameter[var_ci$Component == "zi"]) # nolint } else { var_ci$Group <- group_factor # check if sigma was properly identified if (!"sigma" %in% var_ci$Group && "sigma" %in% rownames(var_ci)) { var_ci$Group[rownames(var_ci) == "sigma"] <- "Residual" } } # remove cond/zi prefix pattern <- paste0("^(cond\\.|zi\\.|", group_factor, "\\.cond\\.|", group_factor, "\\.zi\\.)(.*)") for (p in pattern) { var_ci$Parameter <- gsub(p, "\\2", var_ci$Parameter) } } # fix SD and Cor names var_ci$Parameter <- gsub(".Intercept.", "(Intercept)", var_ci$Parameter, fixed = TRUE) var_ci$Parameter <- gsub("^(Std\\.Dev\\.)(.*)", "SD \\(\\2\\)", var_ci$Parameter) var_ci$Parameter <- gsub("^Cor\\.(.*)\\.(.*)", "Cor \\(\\2~\\1\\)", var_ci$Parameter) # minor cleaning var_ci$Parameter <- gsub("((", "(", var_ci$Parameter, fixed = TRUE) var_ci$Parameter <- gsub("))", ")", var_ci$Parameter, fixed = TRUE) var_ci$Parameter <- gsub(")~", "~", var_ci$Parameter, fixed = TRUE) # fix sigma var_ci$Parameter[var_ci$Parameter == "sigma"] <- "SD (Observations)" var_ci$Group[var_ci$Group == "sigma"] <- "Residual" # remove unused columns (that are added back after merging) out$CI_low <- NULL out$CI_high <- NULL # filter component var_ci <- var_ci[var_ci$Component == component, ] var_ci$not_used <- NULL var_ci$Component <- NULL # check results - warn user if (isTRUE(verbose)) { missing_ci <- any(is.na(var_ci$CI_low) | is.na(var_ci$CI_high)) singular_fit <- insight::check_if_installed("performance", quietly = TRUE) & isTRUE(performance::check_singularity(model)) # nolint if (singular_fit) { insight::format_alert( "Your model may suffer from singularity (see `?lme4::isSingular` and `?performance::check_singularity`).", "Some of the confidence intervals of the random effects parameters are probably not meaningful!", "You may try to impose a prior on the random effects parameters, e.g. using the {.pkg glmmTMB} package." # nolint ) } else if (missing_ci) { insight::format_alert( "Your model may suffer from singularity (see `?lme4::isSingular` and `?performance::check_singularity`).", "Some of the confidence intervals of the random effects parameters could not be calculated or are probably not meaningful!", # nolint "You may try to impose a prior on the random effects parameters, e.g. using the {.pkg glmmTMB} package." # nolint ) } } # merge and sort out$.sort_id <- seq_len(nrow(out)) out <- merge(out, var_ci, sort = FALSE, all.x = TRUE) out <- out[order(out$.sort_id), ] out$.sort_id <- NULL out }, error = function(e) { if (isTRUE(verbose)) { insight::format_alert( "Cannot compute confidence intervals for random effects parameters.", "Your model may suffer from singularity (see `?lme4::isSingular` and `?performance::check_singularity`)." ) } out } ) } out } # this is used to only temporarily load merDeriv and to point registered # methods from merDeriv to lme4-methods. if merDeriv was loaded before, # nothing will be changed. If merDeriv was not loaded, vcov-methods registered # by merDeriv will be re-registered to use lme4::vcov.merMod. This is no problem, # because *if* useres load merDeriv later manually, merDeriv-vcov-methods will # be registered again. .unregister_vcov <- function() { unloadNamespace("merDeriv") suppressWarnings(suppressMessages(registerS3method("vcov", "lmerMod", method = lme4::vcov.merMod))) suppressWarnings(suppressMessages(registerS3method("vcov", "glmerMod", method = lme4::vcov.merMod))) } parameters/R/methods_pscl.R0000644000176200001440000001351314736731407015444 0ustar liggesusers# .zeroinfl, .hurdle, .zerocount # model parameters ----------------- #' @export model_parameters.zeroinfl <- model_parameters.zcpglm #' @export model_parameters.hurdle <- model_parameters.zcpglm #' @export model_parameters.zerocount <- model_parameters.zcpglm # ci ----------------- #' @export ci.zeroinfl <- function(x, ci = 0.95, dof = NULL, method = "wald", component = "all", verbose = TRUE, ...) { method <- tolower(method) method <- insight::validate_argument( method, c("wald", "normal", "residual", "robust") ) component <- insight::validate_argument( component, c("all", "conditional", "zi", "zero_inflated") ) if (is.null(.check_component(x, component, verbose = verbose))) { return(NULL) } # all other .ci_generic(model = x, ci = ci, dof = dof, method = method, component = component, ...) } #' @export ci.hurdle <- ci.zeroinfl #' @export ci.zerocount <- ci.zeroinfl # standard error ----------------- #' @export standard_error.zeroinfl <- function(model, component = "all", method = NULL, verbose = TRUE, ...) { component <- insight::validate_argument( component, c("all", "conditional", "zi", "zero_inflated") ) if (is.null(.check_component(model, component, verbose = verbose))) { return(NULL) } robust <- !is.null(method) && method == "robust" if (.check_vcov_args(robust, ...)) { return(standard_error.default(model, component = component, ...)) } cs <- insight::compact_list(stats::coef(summary(model))) x <- lapply(names(cs), function(i) { if (i == "count") { comp <- "conditional" } else { comp <- "zi" } stats <- cs[[i]] # remove log(theta) theta <- grepl("Log(theta)", rownames(stats), fixed = TRUE) if (any(theta)) { stats <- stats[!theta, ] } .data_frame( Parameter = insight::find_parameters(model, effects = "fixed", component = comp, flatten = TRUE), SE = as.vector(stats[, 2]), Component = comp ) }) se <- do.call(rbind, x) se$Component <- .rename_values(se$Component, "cond", "conditional") se$Component <- .rename_values(se$Component, "zi", "zero_inflated") .filter_component(se, component) } #' @export standard_error.hurdle <- standard_error.zeroinfl #' @export standard_error.zerocount <- standard_error.zeroinfl # p values ----------------------- #' @export p_value.zeroinfl <- function(model, component = "all", method = NULL, verbose = TRUE, ...) { component <- insight::validate_argument( component, c("all", "conditional", "zi", "zero_inflated") ) if (is.null(.check_component(model, component, verbose = verbose))) { return(NULL) } robust <- !is.null(method) && method == "robust" if (.check_vcov_args(robust, ...)) { return(p_value.default(model, component = component, ...)) } cs <- insight::compact_list(stats::coef(summary(model))) x <- lapply(names(cs), function(i) { if (i == "count") { comp <- "conditional" } else { comp <- "zi" } stats <- cs[[i]] # remove log(theta) theta <- grepl("Log(theta)", rownames(stats), fixed = TRUE) if (any(theta)) { stats <- stats[!theta, ] } .data_frame( Parameter = insight::find_parameters(model, effects = "fixed", component = comp, flatten = TRUE), p = as.vector(stats[, 4]), Component = comp ) }) p <- do.call(rbind, x) p$Component <- .rename_values(p$Component, "cond", "conditional") p$Component <- .rename_values(p$Component, "zi", "zero_inflated") .filter_component(p, component) } #' @export p_value.hurdle <- p_value.zeroinfl #' @export p_value.zerocount <- p_value.zeroinfl # simulate model ----------------- #' @export simulate_model.zeroinfl <- simulate_model.glmmTMB #' @export simulate_model.hurdle <- simulate_model.zeroinfl #' @export simulate_model.zerocount <- simulate_model.zeroinfl # simulate paramaters ----------------- #' @export simulate_parameters.zeroinfl <- function(model, iterations = 1000, centrality = "median", ci = 0.95, ci_method = "quantile", test = "p-value", ...) { data <- simulate_model(model, iterations = iterations, ...) out <- .summary_bootstrap( data = data, test = test, centrality = centrality, ci = ci, ci_method = ci_method, ... ) params <- insight::get_parameters(model) if ("Effects" %in% colnames(params) && insight::n_unique(params$Effects) > 1) { out$Effects <- params$Effects } if ("Component" %in% colnames(params) && insight::n_unique(params$Component) > 1) { out$Component <- params$Component } if (inherits(model, c("zeroinfl", "hurdle", "zerocount"))) { out$Parameter <- gsub("^(count_|zero_)", "", out$Parameter) } class(out) <- c("parameters_simulate", "see_parameters_simulate", class(out)) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(model)) attr(out, "iterations") <- iterations attr(out, "ci") <- ci attr(out, "ci_method") <- ci_method attr(out, "centrality") <- centrality out } #' @export simulate_parameters.hurdle <- simulate_parameters.zeroinfl #' @export simulate_parameters.zerocount <- simulate_parameters.zeroinfl parameters/R/methods_wrs2.R0000644000176200001440000002425715002455357015401 0ustar liggesusers#' Parameters from robust statistical objects in `WRS2` #' #' @param model Object from `WRS2` package. #' @param ... Arguments passed to or from other methods. #' @inheritParams model_parameters.default #' #' @examples #' if (require("WRS2") && packageVersion("WRS2") >= "1.1.3") { #' model <- t1way(libido ~ dose, data = viagra) #' model_parameters(model) #' } #' @return A data frame of indices related to the model's parameters. #' @export # anova ---------------------- model_parameters.t1way <- function(model, keep = NULL, verbose = TRUE, ...) { parameters <- .extract_wrs2_t1way(model) parameters <- .add_htest_parameters_attributes(parameters, model, ...) class(parameters) <- c("parameters_model", "see_parameters_model", class(parameters)) parameters } .extract_wrs2_t1way <- function(model) { fcall <- insight::safe_deparse(model$call) # effect sizes are by default contained for `t1way` but not `rmanova` if (grepl("^(t1way|WRS2::t1way)", fcall)) { data.frame( `F` = model$test, df = model$df1, df_error = model$df2, p = model$p.value, Method = "A heteroscedastic one-way ANOVA for trimmed means", Estimate = model$effsize, CI = 1 - model$alpha, CI_low = model$effsize_ci[1], CI_high = model$effsize_ci[2], Effectsize = "Explanatory measure of effect size", stringsAsFactors = FALSE ) } else if (grepl("^(rmanova|WRS2::rmanova)", fcall)) { data.frame( `F` = model$test, df = model$df1, df_error = model$df2, p = model$p.value, Method = "A heteroscedastic one-way repeated measures ANOVA for trimmed means", stringsAsFactors = FALSE ) } } #' @export model_parameters.med1way <- function(model, verbose = TRUE, ...) { parameters <- .extract_wrs2_med1way(model) parameters <- .add_htest_parameters_attributes(parameters, model, ...) class(parameters) <- c("parameters_model", "see_parameters_model", class(parameters)) parameters } .extract_wrs2_med1way <- function(model) { data.frame( `F` = model$test, `Critical value` = model$crit.val, p = model$p.value, Method = "Heteroscedastic one-way ANOVA for medians", stringsAsFactors = FALSE ) } #' @export model_parameters.dep.effect <- function(model, keep = NULL, verbose = TRUE, ...) { parameters <- .extract_wrs2_dep.effect(model, keep = keep) class(parameters) <- c("parameters_model", "see_parameters_model", class(parameters)) parameters } .extract_wrs2_dep.effect <- function(model, keep = NULL, ...) { out <- as.data.frame(model) out$Parameter <- c(attributes(out)$row.names) # effectsize descriptions out$Effectsize <- c( "Algina-Keselman-Penfield robust standardized difference", # AKP "Quantile shift based on the median of the distribution of difference scores", # QS (median) "Quantile shift based on the trimmed mean of the distribution of X-Y", # QStr "P(X2ε}}{\eqn{\sigma^2_\epsilon}}, #' is the sum of the distribution-specific variance and the variance due to additive dispersion. #' It indicates the *within-group variance*. #' #' ## Between-group random intercept variance #' The random intercept variance, or *between-group* variance #' for the intercept (\ifelse{html}{\out{τ00}}{\eqn{\tau_{00}}}), #' is obtained from `VarCorr()`. It indicates how much groups #' or subjects differ from each other. #' #' ## Between-group random slope variance #' The random slope variance, or *between-group* variance #' for the slopes (\ifelse{html}{\out{τ11}}{\eqn{\tau_{11}}}) #' is obtained from `VarCorr()`. This measure is only available #' for mixed models with random slopes. It indicates how much groups #' or subjects differ from each other according to their slopes. #' #' ## Random slope-intercept correlation #' The random slope-intercept correlation #' (\ifelse{html}{\out{ρ01}}{\eqn{\rho_{01}}}) #' is obtained from `VarCorr()`. This measure is only available #' for mixed models with random intercepts and slopes. #' #' **Note:** For the within-group and between-group variance, variance #' and standard deviations (which are simply the square root of the variance) #' are shown. #' #' @examples #' if (require("lme4")) { #' data(sleepstudy) #' model <- lmer(Reaction ~ Days + (1 + Days | Subject), data = sleepstudy) #' random_parameters(model) #' } #' @export random_parameters <- function(model, component = "conditional") { component <- match.arg(component, choices = c("conditional", "zi", "zero_inflated")) out <- .randomeffects_summary(model, component) class(out) <- c("parameters_random", class(out)) out } # helper ----------------------------------- .n_randomeffects <- function(model) { vapply( insight::get_data(model, verbose = FALSE)[insight::find_random(model, split_nested = TRUE, flatten = TRUE)], insight::n_unique, numeric(1) ) } .randomeffects_summary <- function(model, component = "conditional") { out <- list() re_variances <- suppressWarnings(insight::get_variance(model, model_component = component)) model_re <- insight::find_random(model, split_nested = FALSE, flatten = TRUE) model_rs <- unlist(insight::find_random_slopes(model)) if (length(re_variances) && sum(!is.na(re_variances)) > 0 && !is.null(re_variances)) { # Residual Variance (Sigma^2) out$Sigma2 <- re_variances$var.residual # Random Intercept Variance if (!insight::is_empty_object(re_variances$var.intercept)) { var_intercept <- as.list(re_variances$var.intercept) names(var_intercept) <- paste0("tau00_", names(re_variances$var.intercept)) out <- c(out, var_intercept) } # Random Slope Variance if (!insight::is_empty_object(re_variances$var.slope) && !insight::is_empty_object(model_rs)) { var_slope <- as.list(re_variances$var.slope) names(var_slope) <- paste0("tau11_", names(re_variances$var.slope)) out <- c(out, var_slope) } # Slope-Intercept Correlation if (!insight::is_empty_object(re_variances$cor.slope_intercept) && !insight::is_empty_object(model_rs)) { cor_slope_intercept <- as.list(re_variances$cor.slope_intercept) csi_names <- gsub("(.*)(\\.\\d)(.*)", "\\1\\3", names(re_variances$var.slope)) # csi_names <- names(re_variances$var.slope) names(cor_slope_intercept) <- paste0("rho01_", csi_names) out <- c(out, cor_slope_intercept) } # Slopes Correlation if (!insight::is_empty_object(re_variances$cor.slopes) && !insight::is_empty_object(model_rs)) { cor_slopes <- as.list(re_variances$cor.slopes) names(cor_slopes) <- paste0("rho00_", names(cor_slopes)) out <- c(out, cor_slopes) } } # Number of levels per random-effect groups n_re <- as.list(.n_randomeffects(model)) if (insight::is_empty_object(n_re)) { n_re <- stats::setNames(NA_real_, "N") } else { names(n_re) <- paste0("N_", names(n_re)) out <- c(out, n_re) } # number of observations out$Observations <- insight::n_obs(model) # make nice data frame out <- as.data.frame(do.call(rbind, out), stringsAsFactors = FALSE) out$Description <- rownames(out) rownames(out) <- NULL colnames(out) <- c("Value", "Description") # Additional information out$Component <- "" out$Component[out$Description == "Sigma2"] <- "sigma2" out$Component[startsWith(out$Description, "tau00_")] <- "tau00" out$Component[startsWith(out$Description, "tau11_")] <- "tau11" out$Component[startsWith(out$Description, "rho01_")] <- "rho01" out$Component[startsWith(out$Description, "rho00_")] <- "rho00" # Additional information out$Term <- "" out$Term[out$Component == "tau00"] <- gsub("^tau00_(.*)", "\\1", out$Description[out$Component == "tau00"]) out$Term[out$Component == "tau11"] <- gsub("^tau11_(.*)", "\\1", out$Description[out$Component == "tau11"]) out$Term[out$Component == "rho01"] <- gsub("^rho01_(.*)", "\\1", out$Description[out$Component == "rho01"]) out$Term[out$Component == "rho00"] <- gsub("^rho00_(.*)(\\.\\.\\.)(.*)", "\\3", out$Description[out$Component == "rho00"]) # renaming out$Type <- "" # Within-Group Variance out$Type[out$Description == "Sigma2"] <- "" out$Description[out$Description == "Sigma2"] <- "Within-Group Variance" # Between-Group Variance out$Type[startsWith(out$Description, "tau00_")] <- "Random Intercept" out$Description <- gsub("^tau00_(.*)", "Between-Group Variance", out$Description) out$Type[startsWith(out$Description, "tau11_")] <- "Random Slope" out$Description <- gsub("^tau11_(.*)", "Between-Group Variance", out$Description) # correlations out$Type[startsWith(out$Description, "rho01_")] <- "" out$Description <- gsub("^rho01_(.*)", "Correlations", out$Description) out$Type[startsWith(out$Description, "rho00_")] <- "" out$Description <- gsub("^rho00_(.*)", "Correlations", out$Description) out$Type[grepl("N_(.*)", out$Description)] <- "" out$Term[grepl("N_(.*)", out$Description)] <- gsub("N_(.*)", "\\1", grep("N_(.*)", out$Description, value = TRUE)) out$Description <- gsub("_(.*)", "", out$Description) out$Type[startsWith(out$Description, "X")] <- "" out$Description[startsWith(out$Description, "X")] <- NA out$Component[out$Component == ""] <- NA out$Term[out$Term == ""] <- NA out[c("Description", "Component", "Type", "Term", "Value")] } parameters/R/methods_AER.R0000644000176200001440000000127214133222153015071 0ustar liggesusers# classes: .tobit # The `AER::ivreg` is being spun off to a separate package. The methods in # `methods_ivreg.R` should work for objects produce by `AER`. #################### .tobit ------ #' @export p_value.tobit <- function(model, method = NULL, ...) { params <- insight::get_parameters(model) p <- p_value.default(model, method = method, ...) p[p$Parameter %in% params$Parameter, ] } #' @export simulate_model.tobit <- simulate_model.default #' @export standard_error.tobit <- function(model, ...) { params <- insight::get_parameters(model) std.error <- standard_error.default(model, ...) std.error[std.error$Parameter %in% params$Parameter, ] } parameters/R/p_value_kenward.R0000644000176200001440000001154015073732442016117 0ustar liggesusers#' @title Kenward-Roger approximation for SEs, CIs and p-values #' @name p_value_kenward #' #' @description An approximate F-test based on the Kenward-Roger (1997) approach. #' #' @param model A statistical model. #' @param dof Degrees of Freedom. #' @inheritParams ci.default #' #' @details Inferential statistics (like p-values, confidence intervals and #' standard errors) may be biased in mixed models when the number of clusters #' is small (even if the sample size of level-1 units is high). In such cases #' it is recommended to approximate a more accurate number of degrees of freedom #' for such inferential statistics. Unlike simpler approximation heuristics #' like the "m-l-1" rule (`dof_ml1`), the Kenward-Roger approximation is #' also applicable in more complex multilevel designs, e.g. with cross-classified #' clusters. However, the "m-l-1" heuristic also applies to generalized #' mixed models, while approaches like Kenward-Roger or Satterthwaite are limited #' to linear mixed models only. #' #' @seealso `dof_kenward()` and `se_kenward()` are small helper-functions #' to calculate approximated degrees of freedom and standard errors for model #' parameters, based on the Kenward-Roger (1997) approach. #' #' [`dof_satterthwaite()`] and [`dof_ml1()`] approximate degrees of freedom #' based on Satterthwaite's method or the "m-l-1" rule. #' #' @examples #' \donttest{ #' if (require("lme4", quietly = TRUE)) { #' model <- lmer(Petal.Length ~ Sepal.Length + (1 | Species), data = iris) #' p_value_kenward(model) #' } #' } #' @return A data frame. #' @references Kenward, M. G., & Roger, J. H. (1997). Small sample inference for #' fixed effects from restricted maximum likelihood. Biometrics, 983-997. #' @export p_value_kenward <- function(model, dof = NULL) { if (!.check_REML_fit(model)) { model <- stats::update(model, . ~ ., REML = TRUE) } if (is.null(dof)) { dof <- dof_kenward(model) } .p_value_dof(model, dof, method = "kenward") } # helper ------------------------------ .p_value_dof <- function( model, dof, method = NULL, statistic = NULL, se = NULL, component = c( "all", "conditional", "zi", "zero_inflated", "dispersion", "precision", "scale", "smooth_terms", "full", "marginal" ), effects = c("fixed", "random", "all"), verbose = TRUE, vcov = NULL, vcov_args = NULL, ... ) { component <- match.arg(component) effects <- match.arg(effects) if (is.null(.check_component(model, component, verbose = verbose))) { return(NULL) } params <- insight::get_parameters(model, component = component) # check if all estimates are non-NA params <- .check_rank_deficiency(model, params, verbose = FALSE) if (is.null(statistic)) { statistic <- insight::get_statistic(model, component = component) params <- merge(params, statistic, sort = FALSE) statistic <- params$Statistic } # different SE for kenward and robust if (identical(method, "kenward") || identical(method, "kr")) { if (is.null(se)) { se <- se_kenward(model)$SE } } else if (!is.null(vcov)) { se <- standard_error( model, vcov = vcov, vcov_args = vcov_args, component = component, ... )$SE } # overwrite statistic, based on robust or kenward standard errors if (identical(method, "kenward") || identical(method, "kr") || !is.null(vcov)) { estimate <- if ("Coefficient" %in% colnames(params)) { params$Coefficient } else { params$Estimate } statistic <- estimate / se } p <- 2 * stats::pt(abs(statistic), df = dof, lower.tail = FALSE) out <- .data_frame(Parameter = params$Parameter, p = unname(p)) if ("Component" %in% names(params)) { out$Component <- params$Component } if ("Effects" %in% names(params) && effects != "fixed") { out$Effects <- params$Effects } if ("Response" %in% names(params)) { out$Response <- params$Response } out } .p_value_dof_kr <- function(model, params, dof) { if ("SE" %in% colnames(params) && "SE" %in% colnames(dof)) { params$SE <- NULL } params <- merge(params, dof, by = "Parameter") p <- 2 * stats::pt(abs(params$Estimate / params$SE), df = params$df_error, lower.tail = FALSE) .data_frame(Parameter = params$Parameter, p = unname(p)) } # helper ------------------------- .check_REML_fit <- function(model, verbose = TRUE) { if (inherits(model, "glmmTMB")) { is_reml <- isTRUE(model$modelInfo$REML) } else { insight::check_if_installed("lme4") is_reml <- lme4::getME(model, "is_REML") } if (!is_reml && verbose) { insight::format_warning( "Model was not fitted by REML. Re-fitting model now, but p-values, df, etc. still might be unreliable." ) } is_reml } parameters/R/methods_mjoint.R0000644000176200001440000000763114717111737016004 0ustar liggesusers#' @export model_parameters.mjoint <- function(model, ci = 0.95, effects = "fixed", component = "all", exponentiate = FALSE, p_adjust = NULL, keep = NULL, drop = NULL, verbose = TRUE, ...) { effects <- insight::validate_argument(effects, c("fixed", "random", "all")) component <- insight::validate_argument(component, c("all", "conditional", "survival")) params <- params_variance <- NULL if (effects %in% c("fixed", "all")) { # Processing params <- .extract_parameters_generic( model, ci = ci, component = component, standardize = FALSE, p_adjust = p_adjust, keep_parameters = keep, drop_parameters = drop, ... ) # exponentiate coefficients and SE/CI, if requested params <- .exponentiate_parameters(params, model, exponentiate) params$Effects <- "fixed" } if (effects %in% c("random", "all")) { params_variance <- .extract_random_variances( model, ci = ci, effects = effects, ci_method = NULL, ci_random = FALSE, verbose = verbose ) params_variance$Component <- "conditional" } # merge random and fixed effects, if necessary if (!is.null(params) && !is.null(params_variance)) { params$Level <- NA params$Group <- "" # add component column if (!"Component" %in% colnames(params)) { params$Component <- "conditional" } # reorder params <- params[match(colnames(params_variance), colnames(params))] } params <- rbind(params, params_variance) # remove empty column if (!is.null(params$Level) && all(is.na(params$Level))) { params$Level <- NULL } params <- .add_model_parameters_attributes( params, model, ci = ifelse(effects == "random", NA, ci), exponentiate, ci_method = NULL, p_adjust = p_adjust, verbose = verbose, group_level = FALSE, ... ) attr(params, "object_name") <- insight::safe_deparse_symbol(substitute(model)) class(params) <- c("parameters_model", "see_parameters_model", class(params)) params } #' @export p_value.mjoint <- function(model, component = c("all", "conditional", "survival"), ...) { component <- match.arg(component) s <- summary(model) params <- rbind( data.frame( Parameter = rownames(s$coefs.long), p = unname(s$coefs.long[, 4]), Component = "conditional", stringsAsFactors = FALSE, row.names = NULL ), data.frame( Parameter = rownames(s$coefs.surv), p = unname(s$coefs.surv[, 4]), Component = "survival", stringsAsFactors = FALSE, row.names = NULL ) ) if (component != "all") { params <- params[params$Component == component, , drop = FALSE] } params } #' @export ci.mjoint <- function(x, ci = 0.95, ...) { .ci_generic(model = x, ci = ci, dof = Inf, ...) } #' @export standard_error.mjoint <- function(model, component = c("all", "conditional", "survival"), ...) { component <- match.arg(component) s <- summary(model) params <- rbind( data.frame( Parameter = rownames(s$coefs.long), SE = unname(s$coefs.long[, 2]), Component = "conditional", stringsAsFactors = FALSE, row.names = NULL ), data.frame( Parameter = rownames(s$coefs.surv), SE = unname(s$coefs.surv[, 2]), Component = "survival", stringsAsFactors = FALSE, row.names = NULL ) ) if (component != "all") { params <- params[params$Component == component, , drop = FALSE] } params } parameters/R/bootstrap_parameters.R0000644000176200001440000001256514775505314017225 0ustar liggesusers#' Parameters bootstrapping #' #' Compute bootstrapped parameters and their related indices such as Confidence Intervals (CI) and p-values. #' #' #' @param test The indices to compute. Character (vector) with one or more of #' these options: `"p-value"` (or `"p"`), `"p_direction"` (or `"pd"`), `"rope"`, #' `"p_map"`, `"equivalence_test"` (or `"equitest"`), `"bayesfactor"` (or `"bf"`) #' or `"all"` to compute all tests. For each "test", the corresponding #' **bayestestR** function is called (e.g. [bayestestR::rope()] or #' [bayestestR::p_direction()]) and its results included in the summary output. #' @param ... Arguments passed to other methods, like [`bootstrap_model()`] or #' [`bayestestR::describe_posterior()`]. #' @inheritParams bootstrap_model #' @inheritParams bayestestR::describe_posterior #' #' @return A data frame summarizing the bootstrapped parameters. #' #' @inheritSection bootstrap_model Using with **emmeans** #' #' @references #' Davison, A. C., & Hinkley, D. V. (1997). Bootstrap methods and their #' application (Vol. 1). Cambridge university press. #' #' @seealso [`bootstrap_model()`], [`simulate_parameters()`], [`simulate_model()`] #' #' @details This function first calls [`bootstrap_model()`] to generate #' bootstrapped coefficients. The resulting replicated for each coefficient #' are treated as "distribution", and is passed to [`bayestestR::describe_posterior()`] #' to calculate the related indices defined in the `"test"` argument. #' #' Note that that p-values returned here are estimated under the assumption of #' *translation equivariance*: that shape of the sampling distribution is #' unaffected by the null being true or not. If this assumption does not hold, #' p-values can be biased, and it is suggested to use proper permutation tests #' to obtain non-parametric p-values. #' #' @examplesIf require("boot", quietly = TRUE) && require("emmeans", quietly = TRUE) #' \donttest{ #' set.seed(2) #' model <- lm(Sepal.Length ~ Species * Petal.Width, data = iris) #' b <- bootstrap_parameters(model) #' print(b) #' #' # different type of bootstrapping #' set.seed(2) #' b <- bootstrap_parameters(model, type = "balanced") #' print(b) #' #' est <- emmeans::emmeans(b, trt.vs.ctrl ~ Species) #' print(model_parameters(est)) #' } #' @export bootstrap_parameters <- function(model, ...) { UseMethod("bootstrap_parameters") } # methods ---------------------------------------------------------------------- #' @rdname bootstrap_parameters #' @export bootstrap_parameters.default <- function(model, iterations = 1000, centrality = "median", ci = 0.95, ci_method = "quantile", test = "p-value", ...) { boot_data <- bootstrap_model(model, iterations = iterations, ...) bootstrap_parameters(boot_data, centrality = centrality, ci = ci, ci_method = ci_method, test = test, ...) } #' @export bootstrap_parameters.bootstrap_model <- function(model, centrality = "median", ci = 0.95, ci_method = "quantile", test = "p-value", ...) { out <- .summary_bootstrap( data = model, test = test, centrality = centrality, ci = ci, ci_method = ci_method, ... ) class(out) <- c("bootstrap_parameters", "parameters_model", class(out)) attr(out, "boot_samples") <- model out } #' @export model_parameters.bootstrap_model <- bootstrap_parameters.bootstrap_model # utilities -------------------------------------------------------------------- #' @keywords internal .summary_bootstrap <- function(data, test, centrality, ci, ci_method, ...) { # Is the p-value requested? if (any(test %in% c("p-value", "p", "pval"))) { p_value <- TRUE test <- setdiff(test, c("p-value", "p", "pval")) if (length(test) == 0) test <- NULL } else { p_value <- FALSE } parameters <- bayestestR::describe_posterior( data, centrality = centrality, ci = ci, ci_method = ci_method, test = test, ... ) # Remove unnecessary columns if ("CI" %in% names(parameters) && insight::has_single_value(parameters$CI, remove_na = TRUE)) { parameters$CI <- NULL } else if ("CI" %in% names(parameters) && insight::n_unique(parameters$CI) > 1) { parameters <- datawizard::reshape_ci(parameters) } # Coef if (length(centrality) == 1) { names(parameters)[names(parameters) == insight::format_capitalize(centrality)] <- "Coefficient" } # p-value if (p_value) { parameters$.row_order <- seq_len(nrow(parameters)) # calculate probability of direction, then convert to p. p <- bayestestR::p_direction(data, null = 0, ...) p$p <- as.numeric(bayestestR::pd_to_p(p$pd)) p$pd <- NULL parameters <- merge(parameters, p, all = TRUE) parameters <- parameters[order(parameters$.row_order), ] parameters$.row_order <- NULL } rownames(parameters) <- NULL attr(parameters, "ci") <- ci parameters } parameters/R/methods_multgee.R0000644000176200001440000000016014355245205016130 0ustar liggesusers#' @export standard_error.LORgee <- standard_error.default #' @export p_value.LORgee <- p_value.default parameters/R/methods_survey.R0000644000176200001440000001063615073712214016031 0ustar liggesusers# model_parameters ----------------------------------------- #' @export model_parameters.svyglm <- function( model, ci = 0.95, ci_method = "wald", standardize = NULL, exponentiate = FALSE, p_adjust = NULL, include_info = getOption("parameters_info", FALSE), keep = NULL, drop = NULL, verbose = TRUE, ... ) { if (insight::n_obs(model) > 1e4 && ci_method == "likelihood") { insight::format_alert( "Likelihood confidence intervals may take longer time to compute. Use 'ci_method=\"wald\"' for faster computation of CIs." # nolint ) } # validation check, warn if unsupported argument is used. dot_args <- .check_dots( dots = list(...), not_allowed = c("vcov", "vcov_args", "bootstrap"), class(model)[1], verbose = verbose ) fun_args <- list( model, ci = ci, ci_method = ci_method, standardize = standardize, exponentiate = exponentiate, p_adjust = p_adjust, keep_parameters = keep, drop_parameters = drop, include_info = include_info, verbose = verbose ) fun_args <- c(fun_args, dot_args) out <- do.call(".model_parameters_generic", fun_args) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(model)) out } #' @export model_parameters.svyolr <- model_parameters.glm # simulate_model ----------------------------------------- #' @export simulate_model.svyglm.nb <- simulate_model.default #' @export simulate_model.svyglm.zip <- simulate_model.default # standard erors ----------------------------------------- #' @export standard_error.svyglm.nb <- function(model, ...) { if (!isNamespaceLoaded("survey")) { requireNamespace("survey", quietly = TRUE) } se <- sqrt(diag(stats::vcov(model, stderr = "robust"))) .data_frame(Parameter = .remove_backticks_from_string(names(se)), SE = as.vector(se)) } #' @export standard_error.svyglm.zip <- standard_error.svyglm.nb #' @export standard_error.svyglm <- function(model, ...) { vc <- insight::get_varcov(model) .data_frame( Parameter = .remove_backticks_from_string(row.names(vc)), SE = as.vector(sqrt(diag(vc))) ) } #' @export standard_error.svyolr <- standard_error.polr # confidence intervals ----------------------------------- #' @export ci.svyglm <- function(x, ci = 0.95, method = "wald", ...) { method <- match.arg(method, choices = c("wald", "residual", "normal", "likelihood")) if (method == "likelihood") { out <- lapply(ci, function(i) .ci_likelihood(model = x, ci = i)) out <- do.call(rbind, out) } else { out <- .ci_generic(model = x, ci = ci, method = method, ...) } row.names(out) <- NULL out } #' @export ci.svyolr <- ci.polr # p values ----------------------------------------------- ## TODO how to calculate p when ci-method is "likelihood"? #' @export p_value.svyglm <- function(model, verbose = TRUE, ...) { statistic <- insight::get_statistic(model) dof <- insight::get_df(model, type = "residual") p <- 2 * stats::pt(-abs(statistic$Statistic), df = dof) .data_frame(Parameter = statistic$Parameter, p = as.vector(p)) } #' @export p_value.svyolr <- p_value.polr #' @export p_value.svyglm.nb <- function(model, ...) { if (!isNamespaceLoaded("survey")) { requireNamespace("survey", quietly = TRUE) } est <- stats::coef(model) se <- sqrt(diag(stats::vcov(model, stderr = "robust"))) p <- 2 * stats::pt( abs(est / se), df = insight::get_df(model, type = "wald"), lower.tail = FALSE ) .data_frame(Parameter = .remove_backticks_from_string(names(p)), p = as.vector(p)) } #' @export p_value.svyglm.zip <- p_value.svyglm.nb # helper -------------------- .ci_likelihood <- function(model, ci) { glm_ci <- tryCatch( { out <- as.data.frame( stats::confint(model, level = ci, method = "likelihood"), stringsAsFactors = FALSE ) names(out) <- c("CI_low", "CI_high") out$CI <- ci out$Parameter <- insight::get_parameters( model, effects = "fixed", component = "conditional" )$Parameter out <- out[c("Parameter", "CI", "CI_low", "CI_high")] rownames(out) <- NULL out }, error = function(e) { NULL } ) if (is.null(glm_ci)) { glm_ci <- .ci_generic(model, ci = ci) } glm_ci } parameters/R/methods_censReg.R0000644000176200001440000000027014717111737016062 0ustar liggesusers#' @export model_parameters.censReg <- model_parameters.default #' @export standard_error.censReg <- standard_error.default #' @export p_value.censReg <- p_value.default parameters/R/1_model_parameters.R0000644000176200001440000011347015073712214016514 0ustar liggesusers# Arguments passed to or from other methods. For instance, when default methods, glm (almost default) #################### .default ---------------------- #' Model Parameters #' #' Compute and extract model parameters. The available options and arguments depend #' on the modeling **package** and model `class`. Follow one of these links to read #' the model-specific documentation: #' - [Default method][model_parameters.default()]: `lm`, `glm`, **stats**, **censReg**, #' **MASS**, **survey**, ... #' - [Additive models][model_parameters.cgam()]: **bamlss**, **gamlss**, **mgcv**, #' **scam**, **VGAM**, `Gam` (although the output of `Gam` is more Anova-alike), #' `gamm`, ... #' - [ANOVA][model_parameters.aov()]: **afex**, `aov`, `anova`, `Gam`, ... #' - [Bayesian][model_parameters.brmsfit()]: **BayesFactor**, **blavaan**, **brms**, #' **MCMCglmm**, **posterior**, **rstanarm**, `bayesQR`, `bcplm`, `BGGM`, `blmrm`, #' `blrm`, `mcmc.list`, `MCMCglmm`, ... #' - [Clustering][model_parameters.hclust()]: **hclust**, **kmeans**, **mclust**, **pam**, ... #' - [Correlations, t-tests, etc.][model_parameters.htest()]: **lmtest**, `htest`, #' `pairwise.htest`, ... #' - [Meta-Analysis][model_parameters.rma()]: **metaBMA**, **metafor**, **metaplus**, ... #' - [Mixed models][model_parameters.glmmTMB()]: **cplm**, **glmmTMB**, **lme4**, #' **lmerTest**, **nlme**, **ordinal**, **robustlmm**, **spaMM**, `mixed`, `MixMod`, ... #' - [Multinomial, ordinal and cumulative link][model_parameters.mlm()]: **brglm2**, #' **DirichletReg**, **nnet**, **ordinal**, `mlm`, ... #' - [Multiple imputation][model_parameters.mira()]: **mice** #' - [PCA, FA, CFA, SEM][model_parameters.principal()]: **FactoMineR**, **lavaan**, #' **psych**, `sem`, ... #' - [Zero-inflated and hurdle][model_parameters.zcpglm()]: **cplm**, **mhurdle**, #' **pscl**, ... #' - [Other models][model_parameters.glimML()]: **aod**, **bbmle**, **betareg**, #' **emmeans**, **epiR**, **glmx**, **ivfixed**, **ivprobit**, **JRM**, #' **lmodel2**, **logitsf**, **marginaleffects**, **margins**, **maxLik**, #' **mediation**, **mfx**, **multcomp**, **mvord**, **plm**, **PMCMRplus**, #' **quantreg**, **selection**, **systemfit**, **tidymodels**, **varEST**, #' **WRS2**, `bfsl`, `deltaMethod`, `fitdistr`, `mjoint`, `mle`, `model.avg`, #' ... #' #' A full overview can be found here: #' https://easystats.github.io/parameters/reference/ #' #' @param model Statistical Model. #' @param ... Arguments passed to or from other methods. Non-documented #' arguments are #' - `digits`, `p_digits`, `ci_digits` and `footer_digits` to set the number of #' digits for the output. `groups` can be used to group coefficients. These #' arguments will be passed to the print-method, or can directly be used in #' `print()`, see documentation in [`print.parameters_model()`]. #' - If `s_value = TRUE`, the p-value will be replaced by the S-value in the #' output (cf. _Rafi and Greenland 2020_). #' - `pd` adds an additional column with the _probability of direction_ (see #' [`bayestestR::p_direction()`] for details). Furthermore, see 'Examples' in #' [`model_parameters.default()`]. #' - For developers, whose interest mainly is to get a "tidy" data frame of #' model summaries, it is recommended to set `pretty_names = FALSE` to speed #' up computation of the summary table. #' #' @seealso [insight::standardize_names()] to rename columns into a consistent, #' standardized naming scheme. #' #' @note The [`print()`][print.parameters_model] method has several #' arguments to tweak the output. There is also a #' [`plot()`-method](https://easystats.github.io/see/articles/parameters.html) #' implemented in the #' [**see**-package](https://easystats.github.io/see/), and a dedicated #' method for use inside rmarkdown files, #' [`print_md()`][print_md.parameters_model]. \cr \cr **For developers**, if #' speed performance is an issue, you can use the (undocumented) `pretty_names` #' argument, e.g. `model_parameters(..., pretty_names = FALSE)`. This will #' skip the formatting of the coefficient names and makes `model_parameters()` #' faster. #' #' @section Standardization of model coefficients: #' Standardization is based on [standardize_parameters()]. In case #' of `standardize = "refit"`, the data used to fit the model will be #' standardized and the model is completely refitted. In such cases, standard #' errors and confidence intervals refer to the standardized coefficient. The #' default, `standardize = "refit"`, never standardizes categorical predictors #' (i.e. factors), which may be a different behaviour compared to other R #' packages or other software packages (like SPSS). To mimic behaviour of SPSS #' or packages such as **lm.beta**, use `standardize = "basic"`. #' #' @section Standardization Methods: #' - **refit**: This method is based on a complete model re-fit with a #' standardized version of the data. Hence, this method is equal to #' standardizing the variables before fitting the model. It is the "purest" and #' the most accurate (Neter et al., 1989), but it is also the most #' computationally costly and long (especially for heavy models such as Bayesian #' models). This method is particularly recommended for complex models that #' include interactions or transformations (e.g., polynomial or spline terms). #' The `robust` (default to `FALSE`) argument enables a robust standardization #' of data, i.e., based on the `median` and `MAD` instead of the `mean` and #' `SD`. **See [`datawizard::standardize()`] for more details.** #' **Note** that `standardize_parameters(method = "refit")` may not return #' the same results as fitting a model on data that has been standardized with #' `standardize()`; `standardize_parameters()` used the data used by the model #' fitting function, which might not be same data if there are missing values. #' see the `remove_na` argument in `standardize()`. #' #' - **posthoc**: Post-hoc standardization of the parameters, aiming at #' emulating the results obtained by "refit" without refitting the model. The #' coefficients are divided by the standard deviation (or MAD if `robust`) of #' the outcome (which becomes their expression 'unit'). Then, the coefficients #' related to numeric variables are additionally multiplied by the standard #' deviation (or MAD if `robust`) of the related terms, so that they correspond #' to changes of 1 SD of the predictor (e.g., "A change in 1 SD of `x` is #' related to a change of 0.24 of the SD of `y`). This does not apply to binary #' variables or factors, so the coefficients are still related to changes in #' levels. This method is not accurate and tend to give aberrant results when #' interactions are specified. #' #' - **basic**: This method is similar to `method = "posthoc"`, but treats all #' variables as continuous: it also scales the coefficient by the standard #' deviation of model's matrix' parameter of factors levels (transformed to #' integers) or binary predictors. Although being inappropriate for these cases, #' this method is the one implemented by default in other software packages, #' such as [lm.beta::lm.beta()]. #' #' - **smart** (Standardization of Model's parameters with Adjustment, #' Reconnaissance and Transformation - *experimental*): Similar to `method = #' "posthoc"` in that it does not involve model refitting. The difference is #' that the SD (or MAD if `robust`) of the response is computed on the relevant #' section of the data. For instance, if a factor with 3 levels A (the #' intercept), B and C is entered as a predictor, the effect corresponding to B #' vs. A will be scaled by the variance of the response at the intercept only. #' As a results, the coefficients for effects of factors are similar to a Glass' #' delta. #' #' - **pseudo** (*for 2-level (G)LMMs only*): In this (post-hoc) method, the #' response and the predictor are standardized based on the level of prediction #' (levels are detected with [performance::check_group_variation()]): Predictors #' are standardized based on their SD at level of prediction (see also #' [datawizard::demean()]); The outcome (in linear LMMs) is standardized based #' on a fitted random-intercept-model, where `sqrt(random-intercept-variance)` #' is used for level 2 predictors, and `sqrt(residual-variance)` is used for #' level 1 predictors (Hoffman 2015, page 342). A warning is given when a #' within-group variable is found to have access between-group variance. #' #' See also [package vignette](https://easystats.github.io/parameters/articles/standardize_parameters_effsize.html). #' #' @section Labeling the Degrees of Freedom: #' Throughout the **parameters** package, we decided to label the residual #' degrees of freedom *df_error*. The reason for this is that these degrees #' of freedom not always refer to the residuals. For certain models, they refer #' to the estimate error - in a linear model these are the same, but in - for #' instance - any mixed effects model, this isn't strictly true. Hence, we #' think that `df_error` is the most generic label for these degrees of #' freedom. #' #' @section Confidence intervals and approximation of degrees of freedom: #' There are different ways of approximating the degrees of freedom depending #' on different assumptions about the nature of the model and its sampling #' distribution. The `ci_method` argument modulates the method for computing degrees #' of freedom (df) that are used to calculate confidence intervals (CI) and the #' related p-values. Following options are allowed, depending on the model #' class: #' #' **Classical methods:** #' #' Classical inference is generally based on the **Wald method**. #' The Wald approach to inference computes a test statistic by dividing the #' parameter estimate by its standard error (Coefficient / SE), #' then comparing this statistic against a t- or normal distribution. #' This approach can be used to compute CIs and p-values. #' #' `"wald"`: #' - Applies to *non-Bayesian models*. For *linear models*, CIs #' computed using the Wald method (SE and a *t-distribution with residual df*); #' p-values computed using the Wald method with a *t-distribution with residual df*. #' For other models, CIs computed using the Wald method (SE and a *normal distribution*); #' p-values computed using the Wald method with a *normal distribution*. #' #' `"normal"` #' - Applies to *non-Bayesian models*. Compute Wald CIs and p-values, #' but always use a normal distribution. #' #' `"residual"` #' - Applies to *non-Bayesian models*. Compute Wald CIs and p-values, #' but always use a *t-distribution with residual df* when possible. If the #' residual df for a model cannot be determined, a normal distribution is #' used instead. #' #' **Methods for mixed models:** #' #' Compared to fixed effects (or single-level) models, determining appropriate #' df for Wald-based inference in mixed models is more difficult. #' See [the R GLMM FAQ](https://bbolker.github.io/mixedmodels-misc/glmmFAQ.html#what-are-the-p-values-listed-by-summaryglmerfit-etc.-are-they-reliable) #' for a discussion. #' #' Several approximate methods for computing df are available, but you should #' also consider instead using profile likelihood (`"profile"`) or bootstrap ("`boot"`) #' CIs and p-values instead. #' #' `"satterthwaite"` #' - Applies to *linear mixed models*. CIs computed using the #' Wald method (SE and a *t-distribution with Satterthwaite df*); p-values #' computed using the Wald method with a *t-distribution with Satterthwaite df*. #' #' `"kenward"` #' - Applies to *linear mixed models*. CIs computed using the Wald #' method (*Kenward-Roger SE* and a *t-distribution with Kenward-Roger df*); #' p-values computed using the Wald method with *Kenward-Roger SE and t-distribution with Kenward-Roger df*. #' #' `"ml1"` #' - Applies to *linear mixed models*. CIs computed using the Wald #' method (SE and a *t-distribution with m-l-1 approximated df*); p-values #' computed using the Wald method with a *t-distribution with m-l-1 approximated df*. #' See [`ci_ml1()`]. #' #' `"betwithin"` #' - Applies to *linear mixed models* and *generalized linear mixed models*. #' CIs computed using the Wald method (SE and a *t-distribution with between-within df*); #' p-values computed using the Wald method with a *t-distribution with between-within df*. #' See [`ci_betwithin()`]. #' #' **Likelihood-based methods:** #' #' Likelihood-based inference is based on comparing the likelihood for the #' maximum-likelihood estimate to the the likelihood for models with one or more #' parameter values changed (e.g., set to zero or a range of alternative values). #' Likelihood ratios for the maximum-likelihood and alternative models are compared #' to a \eqn{\chi}-squared distribution to compute CIs and p-values. #' #' `"profile"` #' - Applies to *non-Bayesian models* of class `glm`, `polr`, `merMod` or `glmmTMB`. #' CIs computed by *profiling the likelihood curve for a parameter*, using #' linear interpolation to find where likelihood ratio equals a critical value; #' p-values computed using the Wald method with a *normal-distribution* (note: #' this might change in a future update!) #' #' `"uniroot"` #' - Applies to *non-Bayesian models* of class `glmmTMB`. CIs #' computed by *profiling the likelihood curve for a parameter*, using root #' finding to find where likelihood ratio equals a critical value; p-values #' computed using the Wald method with a *normal-distribution* (note: this #' might change in a future update!) #' #' **Methods for bootstrapped or Bayesian models:** #' #' Bootstrap-based inference is based on **resampling** and refitting the model #' to the resampled datasets. The distribution of parameter estimates across #' resampled datasets is used to approximate the parameter's sampling #' distribution. Depending on the type of model, several different methods for #' bootstrapping and constructing CIs and p-values from the bootstrap #' distribution are available. #' #' For Bayesian models, inference is based on drawing samples from the model #' posterior distribution. #' #' `"quantile"` (or `"eti"`) #' - Applies to *all models (including Bayesian models)*. #' For non-Bayesian models, only applies if `bootstrap = TRUE`. CIs computed #' as *equal tailed intervals* using the quantiles of the bootstrap or #' posterior samples; p-values are based on the *probability of direction*. #' See [`bayestestR::eti()`]. #' #' `"hdi"` #' - Applies to *all models (including Bayesian models)*. For non-Bayesian #' models, only applies if `bootstrap = TRUE`. CIs computed as *highest density intervals* #' for the bootstrap or posterior samples; p-values are based on the *probability of direction*. #' See [`bayestestR::hdi()`]. #' #' `"bci"` (or `"bcai"`) #' - Applies to *all models (including Bayesian models)*. #' For non-Bayesian models, only applies if `bootstrap = TRUE`. CIs computed #' as *bias corrected and accelerated intervals* for the bootstrap or #' posterior samples; p-values are based on the *probability of direction*. #' See [`bayestestR::bci()`]. #' #' `"si"` #' - Applies to *Bayesian models* with proper priors. CIs computed as #' *support intervals* comparing the posterior samples against the prior samples; #' p-values are based on the *probability of direction*. See [`bayestestR::si()`]. #' #' `"boot"` #' - Applies to *non-Bayesian models* of class `merMod`. CIs computed #' using *parametric bootstrapping* (simulating data from the fitted model); #' p-values computed using the Wald method with a *normal-distribution)* #' (note: this might change in a future update!). #' #' For all iteration-based methods other than `"boot"` #' (`"hdi"`, `"quantile"`, `"ci"`, `"eti"`, `"si"`, `"bci"`, `"bcai"`), #' p-values are based on the probability of direction ([`bayestestR::p_direction()`]), #' which is converted into a p-value using [`bayestestR::pd_to_p()`]. #' #' @section Statistical inference - how to quantify evidence: #' There is no standardized approach to drawing conclusions based on the #' available data and statistical models. A frequently chosen but also much #' criticized approach is to evaluate results based on their statistical #' significance (*Amrhein et al. 2017*). #' #' A more sophisticated way would be to test whether estimated effects exceed #' the "smallest effect size of interest", to avoid even the smallest effects #' being considered relevant simply because they are statistically significant, #' but clinically or practically irrelevant (*Lakens et al. 2018, Lakens 2024*). #' #' A rather unconventional approach, which is nevertheless advocated by various #' authors, is to interpret results from classical regression models either in #' terms of probabilities, similar to the usual approach in Bayesian statistics #' (*Schweder 2018; Schweder and Hjort 2003; Vos 2022*) or in terms of relative #' measure of "evidence" or "compatibility" with the data (*Greenland et al. 2022; #' Rafi and Greenland 2020*), which nevertheless comes close to a probabilistic #' interpretation. #' #' A more detailed discussion of this topic is found in the documentation of #' [`p_function()`]. #' #' The **parameters** package provides several options or functions to aid #' statistical inference. These are, for example: #' - [`equivalence_test()`][equivalence_test.lm], to compute the (conditional) #' equivalence test for frequentist models #' - [`p_significance()`][p_significance.lm], to compute the probability of #' *practical significance*, which can be conceptualized as a unidirectional #' equivalence test #' - [`p_function()`], or _consonance function_, to compute p-values and #' compatibility (confidence) intervals for statistical models #' - the `pd` argument (setting `pd = TRUE`) in `model_parameters()` includes #' a column with the *probability of direction*, i.e. the probability that a #' parameter is strictly positive or negative. See [`bayestestR::p_direction()`] #' for details. If plotting is desired, the [`p_direction()`][p_direction.lm] #' function can be used, together with `plot()`. #' - the `s_value` argument (setting `s_value = TRUE`) in `model_parameters()` #' replaces the p-values with their related _S_-values (*Rafi and Greenland 2020*) #' - finally, it is possible to generate distributions of model coefficients by #' generating bootstrap-samples (setting `bootstrap = TRUE`) or simulating #' draws from model coefficients using [`simulate_model()`]. These samples #' can then be treated as "posterior samples" and used in many functions from #' the **bayestestR** package. #' #' Most of the above shown options or functions derive from methods originally #' implemented for Bayesian models (*Makowski et al. 2019*). However, assuming #' that model assumptions are met (which means, the model fits well to the data, #' the correct model is chosen that reflects the data generating process #' (distributional model family) etc.), it seems appropriate to interpret #' results from classical frequentist models in a "Bayesian way" (more details: #' documentation in [`p_function()`]). #' #' @inheritSection format_parameters Interpretation of Interaction Terms #' @inheritSection print.parameters_model Global Options to Customize Messages and Tables when Printing #' #' @references #' #' - Amrhein, V., Korner-Nievergelt, F., and Roth, T. (2017). The earth is #' flat (p > 0.05): Significance thresholds and the crisis of unreplicable #' research. PeerJ, 5, e3544. \doi{10.7717/peerj.3544} #' #' - Greenland S, Rafi Z, Matthews R, Higgs M. To Aid Scientific Inference, #' Emphasize Unconditional Compatibility Descriptions of Statistics. (2022) #' https://arxiv.org/abs/1909.08583v7 (Accessed November 10, 2022) #' #' - Hoffman, L. (2015). Longitudinal analysis: Modeling within-person #' fluctuation and change. Routledge. #' #' - Lakens, D. (2024). Improving Your Statistical Inferences (Version v1.5.1). #' Retrieved from https://lakens.github.io/statistical_inferences/. #' \doi{10.5281/ZENODO.6409077} #' #' - Lakens, D., Scheel, A. M., and Isager, P. M. (2018). Equivalence Testing #' for Psychological Research: A Tutorial. Advances in Methods and Practices #' in Psychological Science, 1(2), 259–269. #' #' - Makowski, D., Ben-Shachar, M. S., Chen, S. H. A., and Lüdecke, D. (2019). #' Indices of Effect Existence and Significance in the Bayesian Framework. #' Frontiers in Psychology, 10, 2767. \doi{10.3389/fpsyg.2019.02767} #' #' - Montiel Olea, J. L., and Plagborg-Møller, M. (2019). Simultaneous #' confidence bands: Theory, implementation, and an application to SVARs. #' Journal of Applied Econometrics, 34(1), 1–17. \doi{10.1002/jae.2656} #' #' - Neter, J., Wasserman, W., and Kutner, M. H. (1989). Applied linear #' regression models. #' #' - Rafi Z, Greenland S. Semantic and cognitive tools to aid statistical #' science: replace confidence and significance by compatibility and surprise. #' BMC Medical Research Methodology (2020) 20:244. #' #' - Schweder T. Confidence is epistemic probability for empirical science. #' Journal of Statistical Planning and Inference (2018) 195:116–125. #' \doi{10.1016/j.jspi.2017.09.016} #' #' - Schweder T, Hjort NL. Frequentist analogues of priors and posteriors. #' In Stigum, B. (ed.), Econometrics and the Philosophy of Economics: Theory #' Data Confrontation in Economics, pp. 285-217. Princeton University Press, #' Princeton, NJ, 2003 #' #' - Vos P, Holbert D. Frequentist statistical inference without repeated sampling. #' Synthese 200, 89 (2022). \doi{10.1007/s11229-022-03560-x} #' #' @return A data frame of indices related to the model's parameters. #' @export model_parameters <- function(model, ...) { UseMethod("model_parameters") } # DF naming convention -------------------- # DF column naming # F has df, df_error # t has df_error # z has df_error = Inf # Chisq has df # https://github.com/easystats/parameters/issues/455 # Options ------------------------------------- # Add new options to the docs in "print.parameters_model" # getOption("parameters_info"): show model summary # getOption("parameters_mixed_info"): show model summary for mixed models # getOption("parameters_cimethod"): show message about CI approximation # getOption("parameters_exponentiate"): show warning about exp for log/logit links # getOption("parameters_labels"): use value/variable labels instead pretty names # getOption("parameters_interaction"): separator char for interactions # getOption("parameters_select"): default for the `select` argument #' @rdname model_parameters #' @export parameters <- model_parameters #' @title Parameters from (General) Linear Models #' @name model_parameters.default #' #' @description Extract and compute indices and measures to describe parameters #' of (generalized) linear models (GLMs). #' #' @param model Model object. #' @param ci Confidence Interval (CI) level. Default to `0.95` (`95%`). #' @param bootstrap Should estimates be based on bootstrapped model? If `TRUE`, #' then arguments of [Bayesian regressions][model_parameters.brmsfit] apply #' (see also [`bootstrap_parameters()`]). #' @param iterations The number of bootstrap replicates. This only apply in the #' case of bootstrapped frequentist models. #' @param standardize The method used for standardizing the parameters. Can be #' `NULL` (default; no standardization), `"refit"` (for re-fitting the model #' on standardized data) or one of `"basic"`, `"posthoc"`, `"smart"`, #' `"pseudo"`. See 'Details' in [`standardize_parameters()`]. #' **Importantly**: #' - The `"refit"` method does *not* standardize categorical predictors (i.e. #' factors), which may be a different behaviour compared to other R packages #' (such as **lm.beta**) or other software packages (like SPSS). to mimic #' such behaviours, either use `standardize="basic"` or standardize the data #' with `datawizard::standardize(force=TRUE)` *before* fitting the model. #' - By default, the response (dependent) variable is also standardized, *if #' applicable*. Set `include_response = FALSE` to avoid standardization of #' the response variable. See details in [`datawizard::standardize.default()`]. #' - For mixed models, when using methods other than `"refit"`, only the fixed #' effects will be standardized. #' - Robust estimation (i.e., `vcov` set to a value other than `NULL`) of #' standardized parameters only works when `standardize="refit"`. #' @param exponentiate Logical, indicating whether or not to exponentiate the #' coefficients (and related confidence intervals). This is typical for #' logistic regression, or more generally speaking, for models with log or #' logit links. It is also recommended to use `exponentiate = TRUE` for models #' with log-transformed response values. For models with a log-transformed #' response variable, when `exponentiate = TRUE`, a one-unit increase in the #' predictor is associated with multiplying the outcome by that predictor's #' coefficient. **Note:** Delta-method standard errors are also computed (by #' multiplying the standard errors by the transformed coefficients). This is #' to mimic behaviour of other software packages, such as Stata, but these #' standard errors poorly estimate uncertainty for the transformed #' coefficient. The transformed confidence interval more clearly captures this #' uncertainty. For `compare_parameters()`, `exponentiate = "nongaussian"` #' will only exponentiate coefficients from non-Gaussian families. #' @param p_adjust String value, if not `NULL`, indicates the method to adjust #' p-values. See [`stats::p.adjust()`] for details. Further possible #' adjustment methods are `"tukey"`, `"scheffe"`, `"sidak"`, `"sup-t"`, and #' `"none"` to explicitly disable adjustment for `emmGrid` objects (from #' **emmeans**). `"sup-t"` computes simultaneous confidence bands, also called #' sup-t confidence band (Montiel Olea & Plagborg-Møller, 2019). #' @param ci_method Method for computing degrees of freedom for #' confidence intervals (CI) and the related p-values. Allowed are following #' options (which vary depending on the model class): `"residual"`, #' `"normal"`, `"likelihood"`, `"satterthwaite"`, `"kenward"`, `"wald"`, #' `"profile"`, `"boot"`, `"uniroot"`, `"ml1"`, `"betwithin"`, `"hdi"`, #' `"quantile"`, `"ci"`, `"eti"`, `"si"`, `"bci"`, or `"bcai"`. See section #' _Confidence intervals and approximation of degrees of freedom_ in #' [`model_parameters()`] for further details. When `ci_method=NULL`, in most #' cases `"wald"` is used then. #' @param include_info Logical, if `TRUE`, prints summary information about the #' model (model formula, number of observations, residual standard deviation #' and more). #' @param keep Character containing a regular expression pattern that #' describes the parameters that should be included (for `keep`) or excluded #' (for `drop`) in the returned data frame. `keep` may also be a #' named list of regular expressions. All non-matching parameters will be #' removed from the output. If `keep` is a character vector, every parameter #' name in the *"Parameter"* column that matches the regular expression in #' `keep` will be selected from the returned data frame (and vice versa, #' all parameter names matching `drop` will be excluded). Furthermore, if #' `keep` has more than one element, these will be merged with an `OR` #' operator into a regular expression pattern like this: `"(one|two|three)"`. #' If `keep` is a named list of regular expression patterns, the names of the #' list-element should equal the column name where selection should be #' applied. This is useful for model objects where `model_parameters()` #' returns multiple columns with parameter components, like in #' [model_parameters.lavaan()]. Note that the regular expression pattern #' should match the parameter names as they are stored in the returned data #' frame, which can be different from how they are printed. Inspect the #' `$Parameter` column of the parameters table to get the exact parameter #' names. #' @param ... Arguments passed to or from other methods. For instance, when #' `bootstrap = TRUE`, arguments like `type` or `parallel` are passed down to #' `bootstrap_model()`. #' #' Further non-documented arguments are: #' #' - `digits`, `p_digits`, `ci_digits` and `footer_digits` to set the number of #' digits for the output. `groups` can be used to group coefficients. These #' arguments will be passed to the print-method, or can directly be used in #' `print()`, see documentation in [`print.parameters_model()`]. #' - If `s_value = TRUE`, the p-value will be replaced by the S-value in the #' output (cf. _Rafi and Greenland 2020_). #' - `pd` adds an additional column with the _probability of direction_ (see #' [`bayestestR::p_direction()`] for details). Furthermore, see 'Examples' for #' this function. #' - For developers, whose interest mainly is to get a "tidy" data frame of #' model summaries, it is recommended to set `pretty_names = FALSE` to speed #' up computation of the summary table. #' @param drop See `keep`. #' @param verbose Toggle warnings and messages. #' @inheritParams standard_error #' #' @seealso [`insight::standardize_names()`] to rename columns into a #' consistent, standardized naming scheme. #' #' @inheritSection model_parameters Confidence intervals and approximation of degrees of freedom #' #' @examplesIf require("boot", quietly = TRUE) && require("sandwich") && require("clubSandwich") && require("brglm2") #' library(parameters) #' model <- lm(mpg ~ wt + cyl, data = mtcars) #' #' model_parameters(model) #' #' # bootstrapped parameters #' model_parameters(model, bootstrap = TRUE) #' #' # standardized parameters #' model_parameters(model, standardize = "refit") #' #' # robust, heteroskedasticity-consistent standard errors #' model_parameters(model, vcov = "HC3") #' #' model_parameters(model, #' vcov = "vcovCL", #' vcov_args = list(cluster = mtcars$cyl) #' ) #' #' # different p-value style in output #' model_parameters(model, p_digits = 5) #' model_parameters(model, digits = 3, ci_digits = 4, p_digits = "scientific") #' #' # report S-value or probability of direction for parameters #' model_parameters(model, s_value = TRUE) #' model_parameters(model, pd = TRUE) #' #' \donttest{ #' # logistic regression model #' model <- glm(vs ~ wt + cyl, data = mtcars, family = "binomial") #' model_parameters(model) #' #' # show odds ratio / exponentiated coefficients #' model_parameters(model, exponentiate = TRUE) #' #' # bias-corrected logistic regression with penalized maximum likelihood #' model <- glm( #' vs ~ wt + cyl, #' data = mtcars, #' family = "binomial", #' method = "brglmFit" #' ) #' model_parameters(model) #' } #' @return A data frame of indices related to the model's parameters. #' @export model_parameters.default <- function( model, ci = 0.95, ci_method = NULL, bootstrap = FALSE, iterations = 1000, standardize = NULL, exponentiate = FALSE, p_adjust = NULL, vcov = NULL, vcov_args = NULL, include_info = getOption("parameters_info", FALSE), keep = NULL, drop = NULL, verbose = TRUE, ... ) { # validation check for inputs .is_model_valid(model) # validation check, warn if unsupported argument is used. # unsupported arguments will be removed from the argument list. dots <- .check_dots( dots = list(...), not_allowed = c("include_sigma", "wb_component"), class(model)[1], verbose = FALSE ) # extract model parameters table, as data frame out <- .model_parameters_generic( model = model, ci = ci, ci_method = ci_method, bootstrap = bootstrap, iterations = iterations, merge_by = "Parameter", standardize = standardize, exponentiate = exponentiate, p_adjust = p_adjust, include_info = include_info, keep_parameters = keep, drop_parameters = drop, vcov = vcov, vcov_args = vcov_args, verbose = verbose, ... ) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(model)) out } .fail_error_message <- function(out, model) { # tell user if something went wrong... if (length(out) == 1 && isTRUE(is.na(out))) { insight::format_error( paste0( "Sorry, `model_parameters()` failed with the following error (possible class `", class(model)[1], "` not supported):\n" ), attr(out, "error") ) } else if (is.null(out)) { insight::format_error(paste0( "Sorry, `model_parameters()` does not currently work for objects of class `", class(model)[1], "`." )) } } # helper function for the composition of the parameters table, # including a bunch of attributes required for further processing # (like printing etc.) .model_parameters_generic <- function( model, ci = 0.95, bootstrap = FALSE, iterations = 1000, merge_by = "Parameter", standardize = NULL, exponentiate = FALSE, effects = "fixed", component = "conditional", ci_method = NULL, p_adjust = NULL, include_info = FALSE, keep_parameters = NULL, drop_parameters = NULL, vcov = NULL, vcov_args = NULL, verbose = TRUE, ... ) { dots <- list(...) out <- tryCatch( { # ==== 1. first step, extracting (bootstrapped) model parameters ------- # Processing, bootstrapped parameters if (bootstrap) { # set default method for bootstrapped CI if (is.null(ci_method) || missing(ci_method)) { ci_method <- "quantile" } fun_args <- list(model, iterations = iterations, ci = ci, ci_method = ci_method) fun_args <- c(fun_args, dots) params <- do.call("bootstrap_parameters", fun_args) # Processing, non-bootstrapped parameters } else { # set default method for CI if (is.null(ci_method) || missing(ci_method)) { ci_method <- "wald" } fun_args <- list( model, ci = ci, component = component, merge_by = merge_by, standardize = standardize, effects = effects, ci_method = ci_method, p_adjust = p_adjust, keep_parameters = keep_parameters, drop_parameters = drop_parameters, verbose = verbose, vcov = vcov, vcov_args = vcov_args ) fun_args <- c(fun_args, dots) params <- do.call(".extract_parameters_generic", fun_args) } # ==== 2. second step, exponentiate ------- # exponentiate coefficients and SE/CI, if requested params <- .exponentiate_parameters(params, model, exponentiate) # ==== 3. third step, add information as attributes ------- # add further information as attributes params <- .add_model_parameters_attributes( params, model, ci, exponentiate, bootstrap, iterations, ci_method = ci_method, p_adjust = p_adjust, include_info = include_info, verbose = verbose, ... ) class(params) <- c("parameters_model", "see_parameters_model", class(params)) params }, error = function(e) { fail <- NA attr(fail, "error") <- gsub(" ", " ", gsub("\\n", "", e$message), fixed = TRUE) fail } ) # check if everything is ok .fail_error_message(out, model) out } #################### .glm ---------------------- #' @export model_parameters.glm <- function( model, ci = 0.95, ci_method = NULL, bootstrap = FALSE, iterations = 1000, standardize = NULL, exponentiate = FALSE, p_adjust = NULL, vcov = NULL, vcov_args = NULL, include_info = getOption("parameters_info", FALSE), keep = NULL, drop = NULL, verbose = TRUE, ... ) { dots <- list(...) # set default if (is.null(ci_method)) { if (isTRUE(bootstrap)) { ci_method <- "quantile" } else if (!is.null(vcov) || !is.null(vcov_args) || inherits(model, "svyolr")) { ci_method <- "wald" } else { ci_method <- "profile" } } # profiled CIs may take a long time to compute, so we warn the user about it if (insight::n_obs(model) > 1e4 && identical(ci_method, "profile")) { insight::format_alert( "Profiled confidence intervals may take longer time to compute.", "Use `ci_method=\"wald\"` for faster computation of CIs." ) } # tell user that profiled CIs don't respect vcov-args if ( identical(ci_method, "profile") && (!is.null(vcov) || !is.null(vcov_args)) && isTRUE(verbose) ) { insight::format_alert( "When `ci_method=\"profile\"`, `vcov` only modifies standard errors, test-statistic and p-values, but not confidence intervals.", # nolint "Use `ci_method=\"wald\"` to return confidence intervals based on robust standard errors." ) } fun_args <- list( model = model, ci = ci, ci_method = ci_method, bootstrap = bootstrap, iterations = iterations, merge_by = "Parameter", standardize = standardize, exponentiate = exponentiate, p_adjust = p_adjust, include_info = include_info, keep_parameters = keep, drop_parameters = drop, vcov = vcov, vcov_args = vcov_args, verbose = verbose ) fun_args <- c(fun_args, dots) out <- do.call(".model_parameters_generic", fun_args) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(model)) out } #' @export model_parameters.zoo <- model_parameters.default parameters/R/methods_biglm.R0000644000176200001440000000033614716604200015560 0ustar liggesusers#' @export standard_error.biglm <- function(model, ...) { cs <- summary(model)$mat params <- insight::get_parameters(model) .data_frame( Parameter = params$Parameter, SE = as.vector(cs[, 4]) ) } parameters/R/extract_parameters_anova.R0000644000176200001440000003200515013566107020026 0ustar liggesusers#' @keywords internal .extract_parameters_anova <- function(model, test = "multivariate", p_adjust = NULL, include_intercept = FALSE, verbose = TRUE) { # Processing if (inherits(model, "manova")) { parameters <- .extract_anova_manova(model) } else if (inherits(model, "maov")) { parameters <- .extract_anova_maov(model) } else if (inherits(model, "aov")) { parameters <- .extract_anova_aov(model) } else if (inherits(model, "anova")) { parameters <- .extract_anova_anova(model) } else if (inherits(model, "Anova.mlm")) { parameters <- .extract_anova_mlm(model, test) } else if (inherits(model, "aovlist")) { parameters <- .extract_anova_aovlist(model) } else if (inherits(model, "anova.rms")) { parameters <- .extract_anova_aov_rms(model) } else if (inherits(model, "seqanova.svyglm")) { parameters <- .extract_anova_aov_svyglm(model) } # remove intercept intercepts <- parameters$Parameter %in% c("Intercept", "(Intercept)") if (any(intercepts) && !include_intercept) { parameters <- parameters[!intercepts, ] } # Rename # p-values names(parameters) <- gsub("(Pr|P)\\(>.*\\)", "p", names(parameters)) names(parameters) <- gsub("Pr..Chisq.", "p", names(parameters), fixed = TRUE) names(parameters) <- gsub("Pr..Chi.", "p", names(parameters), fixed = TRUE) names(parameters) <- gsub("p.value", "p", names(parameters), fixed = TRUE) names(parameters) <- gsub("^P$", "p", names(parameters)) # squares names(parameters) <- gsub("Sum Sq", "Sum_Squares", names(parameters), fixed = TRUE) names(parameters) <- gsub("Error SS", "Sum_Squares_Error", names(parameters), fixed = TRUE) names(parameters) <- gsub("Partial.SS", "Sum_Squares_Partial", names(parameters), fixed = TRUE) names(parameters) <- gsub("Sum of Sq", "Sum_Squares", names(parameters), fixed = TRUE) names(parameters) <- gsub("Mean Sq", "Mean_Square", names(parameters), fixed = TRUE) names(parameters) <- gsub("MSE", "Mean_Square", names(parameters), fixed = TRUE) names(parameters) <- gsub("MS", "Mean_Square", names(parameters), fixed = TRUE) # statistic names(parameters) <- gsub("approx F", "F", names(parameters), fixed = TRUE) names(parameters) <- gsub("F values", "F", names(parameters), fixed = TRUE) names(parameters) <- gsub("F value", "F", names(parameters), fixed = TRUE) names(parameters) <- gsub("LR.Chisq", "Chi2", names(parameters), fixed = TRUE) names(parameters) <- gsub("LR Chisq", "Chi2", names(parameters), fixed = TRUE) names(parameters) <- gsub("Chisq", "Chi2", names(parameters), fixed = TRUE) names(parameters) <- gsub("Chi.sq", "Chi2", names(parameters), fixed = TRUE) names(parameters) <- gsub("Chi.Square", "Chi2", names(parameters), fixed = TRUE) names(parameters) <- gsub("Chi-Square", "Chi2", names(parameters), fixed = TRUE) # other names(parameters) <- gsub("logLik", "Log_Likelihood", names(parameters), fixed = TRUE) names(parameters) <- gsub("deviance", "Deviance", names(parameters), fixed = TRUE) names(parameters) <- gsub("Resid. Dev", "Deviance_error", names(parameters), fixed = TRUE) # error-df if (!"df_error" %in% names(parameters)) { names(parameters) <- gsub("DenDF", "df_error", names(parameters), fixed = TRUE) names(parameters) <- gsub("den Df", "df_error", names(parameters), fixed = TRUE) names(parameters) <- gsub("Res.Df", "df_error", names(parameters), fixed = TRUE) names(parameters) <- gsub("Resid. Df", "df_error", names(parameters), fixed = TRUE) names(parameters) <- gsub("Res.DoF", "df_error", names(parameters), fixed = TRUE) } # df if (!"df" %in% names(parameters)) { names(parameters) <- gsub("npar", "df", names(parameters), fixed = TRUE) names(parameters) <- gsub("NumDF", "df", names(parameters), fixed = TRUE) names(parameters) <- gsub("num Df", "df", names(parameters), fixed = TRUE) names(parameters) <- gsub("d.f.", "df", names(parameters), fixed = TRUE) names(parameters) <- gsub("Df", "df", names(parameters), fixed = TRUE) } # other df names(parameters) <- gsub("Chi.Df", "Chi2_df", names(parameters), fixed = TRUE) names(parameters) <- gsub("Chi DoF", "Chi2_df", names(parameters), fixed = TRUE) # Reorder row.names(parameters) <- NULL col_order <- c( "Response", "Group", "Parameter", "Coefficient", "DEff", "SE", "Pillai", "AIC", "BIC", "Log_Likelihood", "Chi2", "Chi2_df", "RSS", "Sum_Squares", "Sum_Squares_Partial", "Sum_Squares_Error", "df", "Deviance", "Statistic", "df_num", "df_error", "Deviance_error", "Mean_Square", "F", "Rao", "p" ) parameters <- parameters[col_order[col_order %in% names(parameters)]] # ==== adjust p-values? if (!is.null(p_adjust)) { parameters <- .p_adjust(parameters, p_adjust, model, verbose) } insight::text_remove_backticks(parameters, verbose = FALSE) } # helpers ----- # aov ----- .extract_anova_aov <- function(model) { parameters <- as.data.frame(summary(model)[[1]]) parameters$Parameter <- insight::trim_ws(row.names(parameters)) parameters } # manova ----- .extract_anova_manova <- function(model) { parameters <- as.data.frame(summary(model)$stats) parameters$Parameter <- insight::trim_ws(row.names(parameters)) parameters$df_num <- parameters[["num Df"]] parameters$df_error <- parameters[["den Df"]] parameters[["den Df"]] <- NULL parameters[["num Df"]] <- NULL parameters } # maov ----- .extract_anova_maov <- function(model) { s <- summary(model) out <- do.call(rbind, lapply(names(s), function(i) { parameters <- as.data.frame(s[[i]]) parameters$Parameter <- insight::trim_ws(row.names(parameters)) parameters$Response <- gsub("\\s*Response ", "", i) parameters })) out } # aov.rms ----- .extract_anova_aov_rms <- function(model) { parameters <- data.frame(model) parameters$Parameter <- rownames(parameters) parameters$Parameter[parameters$Parameter == "ERROR"] <- "Residuals" parameters$Parameter[parameters$Parameter == "TOTAL"] <- "Total" parameters } # aovlist ----- .extract_anova_aovlist <- function(model) { if (names(model)[1L] == "(Intercept)") { model <- model[-1L] } parameters <- Reduce(function(x, y) merge(x, y, all = TRUE, sort = FALSE), lapply(names(model), function(i) { aov_summary <- summary(model[[i]]) if (inherits(aov_summary, "summary.manova")) { temp <- as.data.frame(aov_summary$stats) } else { temp <- as.data.frame(aov_summary[[1]]) } temp$Parameter <- insight::trim_ws(row.names(temp)) temp$Group <- i temp })) # parameters <- parameters[order(parameters$Group), ] parameters } # anova ----- .extract_anova_anova <- function(model) { parameters <- as.data.frame(model) parameters$Parameter <- insight::trim_ws(row.names(parameters)) # Deal with anovas of models if (length(attributes(model)$heading) == 2) { info <- attributes(model)$heading[[2]] if (grepl("Model", info, fixed = TRUE)) { parameters$Parameter <- unlist(strsplit(info, "\n", fixed = TRUE)) } } else if (length(attributes(model)$heading) > 2) { p_names <- attributes(model)$heading[-1:-2] if (nrow(parameters) == length(p_names)) { parameters$Parameter <- p_names } } # If mixed models... sumsq <- names(parameters)[names(parameters) %in% c("Sum Sq", "Sum of Sq")] df_num <- names(parameters)[names(parameters) %in% c("npar", "Df", "NumDF", "num Df")] mean_sq <- names(parameters)[names(parameters) %in% c("Mean Sq", "MSE")] if (length(sumsq) != 0 && length(df_num) != 0) { parameters$Mean_Square <- parameters[[sumsq]] / parameters[[df_num]] } else if (length(mean_sq) != 0) { parameters$Mean_Square <- parameters[[mean_sq]] } if (length(df_num) == 0 && length(sumsq) != 0 && "Mean_Square" %in% colnames(parameters) && !("Df" %in% colnames(parameters))) { parameters$Df <- round(parameters[[sumsq]] / parameters$Mean_Square) } # Special catch for car::linearHypothesis m_attr <- attributes(model) if (!is.null(m_attr$value) && isTRUE(startsWith(m_attr$heading[[1]], "Linear hypothesis"))) { # Drop unrestricted model (not interesting in linear hypothesis tests) # Use formula to subset if available (e.g. with car::linearHypothesis) if (any(grepl("Model", m_attr$heading, fixed = TRUE))) { idx <- sub(".*: ", "", strsplit( grep("Model", m_attr$heading, fixed = TRUE, value = TRUE), "\n", fixed = TRUE )[[1]]) idx <- idx != "restricted model" parameters <- parameters[idx, , drop = FALSE] } hypothesis <- m_attr$heading[grep("=", m_attr$heading, fixed = TRUE)] parameters_xtra <- data.frame( Parameter = hypothesis, Coefficient = m_attr$value, SE = sqrt(as.numeric(diag(m_attr$vcov))) ) row.names(parameters_xtra) <- row.names(parameters) <- NULL parameters <- cbind(parameters_xtra, parameters) parameters$Parameter <- gsub(" ", " ", parameters$Parameter, fixed = TRUE) ## Annoying extra space sometimes } parameters } # Anova.mlm ------------- .extract_anova_mlm <- function(model, test = NULL) { if (identical(test, "univariate")) { ut <- unclass(summary(model)$univariate.tests) out <- data.frame(Parameter = rownames(ut), stringsAsFactors = FALSE) out <- cbind(out, as.data.frame(ut)) } else { out <- lapply(seq_along(model$terms), function(i) { if (model$repeated) { qr_value <- qr(model$SSPE[[i]]) } else { qr_value <- qr(model$SSPE) } eigs <- Re(eigen(qr.coef(qr_value, model$SSP[[i]]), symmetric = FALSE)$values) test <- switch(model$test, Pillai = .pillai_test(eigs, model$df[i], model$error.df), Wilks = .wilks_test(eigs, model$df[i], model$error.df), `Hotelling-Lawley` = .hl_test(eigs, model$df[i], model$error.df), Roy = .roy_test(eigs, model$df[i], model$error.df) ) data.frame( Parameter = model$terms[i], df = model$df[i], Statistic = test[1], `F` = test[2], # nolint df_num = test[3], df_error = test[4], p = stats::pf(test[2], test[3], test[4], lower.tail = FALSE), stringsAsFactors = FALSE ) }) out <- do.call(rbind, out) } out } # Anova.seqanova.svyglm ------------- .extract_anova_aov_svyglm <- function(model) { if (identical(attributes(model)$method, "Wald")) { params <- lapply(model, function(x) { data.frame(F = as.vector(x$Ftest), df = x$df, df_error = x$ddf, p = as.vector(x$p)) }) } else { params <- lapply(model, function(x) { data.frame(Chi2 = x$chisq, DEff = x$lambda, df = x$df, df_error = x$ddf, p = as.vector(x$p)) }) } params <- do.call(rbind, params) cbind(data.frame(Parameter = sapply(model, "[[", "test.terms"), params)) } # test helper ------------- .pillai_test <- function(eig, q, df.res) { test <- sum(eig / (1 + eig)) p <- length(eig) s <- min(p, q) n <- 0.5 * (df.res - p - 1) m <- 0.5 * (abs(p - q) - 1) tmp1 <- 2 * m + s + 1 tmp2 <- 2 * n + s + 1 c(test, (tmp2 / tmp1 * test) / (s - test), s * tmp1, s * tmp2) } .roy_test <- function(eig, q, df.res) { p <- length(eig) test <- max(eig) tmp1 <- max(p, q) tmp2 <- df.res - tmp1 + q c(test, (tmp2 * test) / tmp1, tmp1, tmp2) } .hl_test <- function(eig, q, df.res) { test <- sum(eig) p <- length(eig) m <- 0.5 * (abs(p - q) - 1) n <- 0.5 * (df.res - p - 1) s <- min(p, q) tmp1 <- 2 * m + s + 1 tmp2 <- 2 * (s * n + 1) c(test, (tmp2 * test) / s / s / tmp1, s * tmp1, tmp2) } .wilks_test <- function(eig, q, df.res) { test <- prod(1 / (1 + eig)) p <- length(eig) tmp1 <- df.res - 0.5 * (p - q + 1) tmp2 <- (p * q - 2) / 4 tmp3 <- p^2 + q^2 - 5 tmp3 <- if (tmp3 > 0) { sqrt(((p * q)^2 - 4) / tmp3) } else { 1 } c( test, ((test^(-1 / tmp3) - 1) * (tmp1 * tmp3 - 2 * tmp2)) / p / q, p * q, tmp1 * tmp3 - 2 * tmp2 ) } # parameter-power ---------------- .power_for_aov <- function(model, params) { if (requireNamespace("effectsize", quietly = TRUE)) { power_aov <- tryCatch( { cohens_f2 <- effectsize::cohens_f_squared(model, partial = TRUE, verbose = FALSE) f2 <- cohens_f2$Cohens_f2[match(cohens_f2$Parameter, params$Parameter)] u <- params$df[params$Parameter != "Residuals"] v <- params$df[params$Parameter == "Residuals"] lambda <- f2 * (u + v + 1) cohens_f2$Power <- stats::pf(stats::qf(0.05, u, v, lower.tail = FALSE), u, v, lambda, lower.tail = FALSE) cohens_f2 }, error = function(e) { NULL } ) } if (!is.null(power_aov)) { params <- merge(params, power_aov[c("Parameter", "Power")], sort = FALSE, all = TRUE) } params } parameters/R/factor_scores.R0000644000176200001440000000240415025253237015601 0ustar liggesusers#' Extract factor scores from Factor Analysis (EFA) or Omega #' #' `factor_scores()` extracts the factor scores from objects returned by #' [`psych::fa()`], [`factor_analysis()`], or [`psych::omega()`] #' #' @param x An object returned by [`psych::fa()`], [`factor_analysis()`], or #' [`psych::omega()`]. #' @param ... Currently unused. #' #' @return A data frame with the factor scores. It simply extracts the `$scores` #' element from the object and converts it into a data frame. #' #' @seealso [`factor_analysis()`] #' #' @examplesIf insight::check_if_installed("psych", quietly = TRUE) #' data(mtcars) #' out <- factor_analysis(mtcars[, 1:7], n = 2) #' head(factor_scores(out)) #' #' @export factor_scores <- function(x, ...) { UseMethod("factor_scores") } #' @export factor_scores.fa <- function(x, ...) { as.data.frame(x$scores) } #' @export factor_scores.omega <- function(x, ...) { as.data.frame(x$scores) } #' @export factor_scores.parameters_efa <- function(x, ...) { model <- attributes(x)$model if (is.null(model)) { insight::format_error("The `model` attribute is missing from the input object.") } as.data.frame(model$scores) } #' @export factor_scores.parameters_omega <- factor_scores.parameters_efa parameters/R/methods_psych.R0000644000176200001440000003045515025253237015625 0ustar liggesusers#' Parameters from PCA, FA, CFA, SEM #' #' Format structural models from the **psych** or **FactoMineR** packages. There #' is a `summary()` method for the returned output from `model_parameters()`, to #' show further information. See 'Examples'. #' #' @param standardize Return standardized parameters (standardized coefficients). #' Can be `TRUE` (or `"all"` or `"std.all"`) for standardized #' estimates based on both the variances of observed and latent variables; #' `"latent"` (or `"std.lv"`) for standardized estimates based #' on the variances of the latent variables only; or `"no_exogenous"` #' (or `"std.nox"`) for standardized estimates based on both the #' variances of observed and latent variables, but not the variances of #' exogenous covariates. See `lavaan::standardizedsolution` for details. #' @param labels A character vector containing labels to be added to the #' loadings data. Usually, the question related to the item. #' @param component What type of links to return. Can be `"all"` or some of #' `c("regression", "correlation", "loading", "variance", "mean")`. #' @param ... Arguments passed to or from other methods. #' @inheritParams principal_components #' @inheritParams model_parameters.default #' #' @note There is also a #' [`plot()`-method](https://easystats.github.io/see/articles/parameters.html) #' for `lavaan` models implemented in the #' [**see**-package](https://easystats.github.io/see/). #' #' @details #' For the structural models obtained with **psych**, the following indices #' are present: #' #' - **Complexity** (\cite{Hoffman's, 1978; Pettersson and Turkheimer, #' 2010}) represents the number of latent components needed to account for #' the observed variables. Whereas a perfect simple structure solution has a #' complexity of 1 in that each item would only load on one factor, a #' solution with evenly distributed items has a complexity greater than 1. #' #' - **Uniqueness** represents the variance that is 'unique' to the #' variable and not shared with other variables. It is equal to `1 – #' communality` (variance that is shared with other variables). A uniqueness #' of `0.20` suggests that `20%` or that variable's variance is not shared #' with other variables in the overall factor model. The greater 'uniqueness' #' the lower the relevance of the variable in the factor model. #' #' - **MSA** represents the Kaiser-Meyer-Olkin Measure of Sampling #' Adequacy (\cite{Kaiser and Rice, 1974}) for each item. It indicates #' whether there is enough data for each factor give reliable results for the #' PCA. The value should be > 0.6, and desirable values are > 0.8 #' (\cite{Tabachnick and Fidell, 2013}). #' #' @examplesIf all(insight::check_if_installed(c("psych", "lavaan"), quietly = TRUE)) #' library(parameters) #' \donttest{ #' # Principal Component Analysis (PCA) --------- #' data(attitude) #' pca <- psych::principal(attitude) #' model_parameters(pca) #' summary(model_parameters(pca)) #' #' pca <- psych::principal(attitude, nfactors = 3, rotate = "none") #' model_parameters(pca, sort = TRUE, threshold = 0.2) #' #' principal_components(attitude, n = 3, sort = TRUE, threshold = 0.2) #' #' #' # Exploratory Factor Analysis (EFA) --------- #' efa <- psych::fa(attitude, nfactors = 3) #' model_parameters(efa, #' threshold = "max", sort = TRUE, #' labels = as.character(1:ncol(attitude)) #' ) #' #' #' # Omega --------- #' data(mtcars) #' omega <- psych::omega(mtcars, nfactors = 3, plot = FALSE) #' params <- model_parameters(omega) #' params #' summary(params) #' } #' #' #' # lavaan ------------------------------------- #' # Confirmatory Factor Analysis (CFA) --------- #' #' data(HolzingerSwineford1939, package = "lavaan") #' structure <- " visual =~ x1 + x2 + x3 #' textual =~ x4 + x5 + x6 #' speed =~ x7 + x8 + x9 " #' model <- lavaan::cfa(structure, data = HolzingerSwineford1939) #' model_parameters(model) #' model_parameters(model, standardize = TRUE) #' #' # filter parameters #' model_parameters( #' model, #' parameters = list( #' To = "^(?!visual)", #' From = "^(?!(x7|x8))" #' ) #' ) #' #' # Structural Equation Model (SEM) ------------ #' #' data(PoliticalDemocracy, package = "lavaan") #' structure <- " #' # latent variable definitions #' ind60 =~ x1 + x2 + x3 #' dem60 =~ y1 + a*y2 + b*y3 + c*y4 #' dem65 =~ y5 + a*y6 + b*y7 + c*y8 #' # regressions #' dem60 ~ ind60 #' dem65 ~ ind60 + dem60 #' # residual correlations #' y1 ~~ y5 #' y2 ~~ y4 + y6 #' y3 ~~ y7 #' y4 ~~ y8 #' y6 ~~ y8 #' " #' model <- lavaan::sem(structure, data = PoliticalDemocracy) #' model_parameters(model) #' model_parameters(model, standardize = TRUE) #' #' @return A data frame of indices or loadings. #' @references #' - Kaiser, H.F. and Rice. J. (1974). Little jiffy, mark iv. Educational and #' Psychological Measurement, 34(1):111–117 #' #' - Pettersson, E., and Turkheimer, E. (2010). Item selection, evaluation, and #' simple structure in personality data. Journal of research in personality, #' 44(4), 407-420. #' #' - Revelle, W. (2016). How To: Use the psych package for Factor Analysis and #' data reduction. #' #' - Tabachnick, B. G., and Fidell, L. S. (2013). Using multivariate statistics #' (6th ed.). Boston: Pearson Education. #' #' - Rosseel Y (2012). lavaan: An R Package for Structural Equation #' Modeling. Journal of Statistical Software, 48(2), 1-36. #' #' - Merkle EC , Rosseel Y (2018). blavaan: Bayesian Structural Equation #' Models via Parameter Expansion. Journal of Statistical Software, 85(4), #' 1-30. http://www.jstatsoft.org/v85/i04/ #' #' @export model_parameters.principal <- function(model, sort = FALSE, threshold = NULL, labels = NULL, verbose = TRUE, ...) { # n n <- model$factors # Get summary data_summary <- .get_fa_variance_summary(model) # Get loadings loadings <- as.data.frame(unclass(model$loadings)) # Format loadings <- cbind(data.frame(Variable = row.names(loadings)), loadings) row.names(loadings) <- NULL # Labels if (!is.null(labels)) { loadings$Label <- labels loadings <- loadings[c("Variable", "Label", names(loadings)[!names(loadings) %in% c("Variable", "Label")])] loading_cols <- 3:(n + 2) } else { loading_cols <- 2:(n + 1) } # Add information loadings$Complexity <- model$complexity loadings$Uniqueness <- model$uniquenesses loadings$MSA <- attributes(model)$MSA # Add attributes attr(loadings, "summary") <- data_summary attr(loadings, "model") <- model attr(loadings, "rotation") <- model$rotation attr(loadings, "scores") <- model$scores attr(loadings, "additional_arguments") <- list(...) attr(loadings, "n") <- n attr(loadings, "threshold") <- threshold attr(loadings, "sort") <- sort attr(loadings, "type") <- model$fn attr(loadings, "loadings_columns") <- loading_cols # Sorting if (isTRUE(sort)) { loadings <- .sort_loadings(loadings) } # Add some more attributes attr(loadings, "loadings_long") <- .long_loadings(loadings, threshold = threshold, loadings_columns = loading_cols) # here we match the original columns in the data set with the assigned components # for each variable, so we know which column in the original data set belongs # to which extracted component... attr(loadings, "closest_component") <- .closest_component( loadings, loadings_columns = loading_cols, variable_names = rownames(model$loadings) ) # add class-attribute for printing if (model$fn == "principal") { class(loadings) <- unique(c("parameters_pca", "see_parameters_pca", class(loadings))) } else { class(loadings) <- unique(c("parameters_efa", "see_parameters_efa", class(loadings))) } loadings } #' @export model_parameters.fa <- model_parameters.principal #' @export model_parameters.fa.ci <- model_parameters.fa #' @export model_parameters.omega <- function(model, sort = FALSE, threshold = NULL, labels = NULL, ...) { # n n <- model$stats$factors # Get summary data_summary <- .get_omega_variance_summary(model) # Get omega coefficients omega_coefficients <- .get_omega_coefficients_summary(model) # Get loadings loadings <- as.data.frame(unclass(model$schmid$sl)) # Format loadings <- cbind(data.frame(Variable = row.names(loadings)), loadings) row.names(loadings) <- NULL # Labels if (!is.null(labels)) { loadings$Label <- labels loadings <- loadings[c("Variable", "Label", names(loadings)[!names(loadings) %in% c("Variable", "Label")])] loading_cols <- 3:(n + 4) } else { loading_cols <- 2:(n + 3) } # Add information colnames(loadings)[colnames(loadings) == "com"] <- "Complexity" rotation <- model$Call$rotate if (is.null(rotation)) { rotation <- "oblimin" } # Add attributes attr(loadings, "summary") <- data_summary attr(loadings, "omega_coefficients") <- omega_coefficients attr(loadings, "model") <- model attr(loadings, "rotation") <- rotation attr(loadings, "scores") <- model$scores attr(loadings, "additional_arguments") <- list(...) attr(loadings, "n") <- n attr(loadings, "threshold") <- threshold attr(loadings, "sort") <- sort attr(loadings, "loadings_columns") <- loading_cols # Sorting if (isTRUE(sort)) { loadings <- .sort_loadings(loadings) } # Add some more attributes attr(loadings, "loadings_long") <- .long_loadings(loadings, threshold = threshold, loadings_columns = loading_cols) # here we match the original columns in the data set with the assigned components # for each variable, so we know which column in the original data set belongs # to which extracted component... attr(loadings, "closest_component") <- .closest_component( loadings, loadings_columns = loading_cols, variable_names = rownames(model$schmid$sl) ) # add class-attribute for printing class(loadings) <- c("parameters_omega", class(loadings)) loadings } #' @export model_parameters.item_omega <- function(model, sort = FALSE, threshold = NULL, labels = NULL, ...) { x <- attributes(model)$model model_parameters(x, sort = sort, threshold = threshold, labels = labels, ...) } # helper ------------------------------------------------ .get_fa_variance_summary <- function(model) { n <- model$factors variance <- as.data.frame(unclass(model$Vaccounted)) data_summary <- .data_frame( Component = names(variance), Eigenvalues = model$values[1:n], Variance = as.numeric(variance["Proportion Var", ]) ) if ("Cumulative Var" %in% row.names(variance)) { data_summary$Variance_Cumulative <- as.numeric(variance["Cumulative Var", ]) } else if (ncol(variance) == 1) { data_summary$Variance_Cumulative <- as.numeric(variance["Proportion Var", ]) } else { data_summary$Variance_Cumulative <- NA } data_summary$Variance_Proportion <- data_summary$Variance / sum(data_summary$Variance) data_summary } .get_omega_variance_summary <- function(model) { # Get summary: Table of Variance table_var <- as.data.frame(unclass(model$omega.group)) table_var$Composite <- rownames(model$omega.group) table_var$Total <- table_var$total * 100 table_var$General <- table_var$general * 100 table_var$Group <- table_var$group * 100 table_var[c("Composite", "Total", "General", "Group")] } .get_omega_coefficients_summary <- function(model) { # Table of omega coefficients table_om <- model$omega.group colnames(table_om) <- c("Omega_Total", "Omega_Hierarchical", "Omega_Group") table_om$Composite <- row.names(table_om) row.names(table_om) <- NULL table_om[c("Composite", names(table_om)[names(table_om) != "Composite"])] } parameters/R/methods_estimatr.R0000644000176200001440000000135714314304411016315 0ustar liggesusers#' @export standard_error.lm_robust <- function(model, ...) { if (insight::is_multivariate(model)) { standard_error.mlm(model, ...) } else { standard_error.default(model, ...) } } #' @export p_value.lm_robust <- function(model, ...) { if (insight::is_multivariate(model)) { p_value.mlm(model, ...) } else { p_value.default(model, ...) } } #' @export ci.lm_robust <- function(x, ...) { if (insight::is_multivariate(x)) { ci.mlm(x, ...) } else { ci.default(x, ...) } } #' @export model_parameters.lm_robust <- function(model, ...) { if (insight::is_multivariate(model)) { model_parameters.mlm(model, ...) } else { model_parameters.default(model, ...) } } parameters/R/2_ci.R0000644000176200001440000001121714717111736013567 0ustar liggesusers#' @title Confidence Intervals (CI) #' @name ci.default #' #' @description `ci()` attempts to return confidence intervals of model parameters. #' #' @param x A statistical model. #' @param ci Confidence Interval (CI) level. Default to `0.95` (`95%`). #' @param dof Number of degrees of freedom to be used when calculating #' confidence intervals. If `NULL` (default), the degrees of freedom are #' retrieved by calling [`insight::get_df()`] with approximation method defined #' in `method`. If not `NULL`, use this argument to override the default degrees #' of freedom used to compute confidence intervals. #' @param method Method for computing degrees of freedom for confidence #' intervals (CI) and the related p-values. Allowed are following options (which #' vary depending on the model class): `"residual"`, `"normal"`, `"likelihood"`, #' `"satterthwaite"`, `"kenward"`, `"wald"`, `"profile"`, `"boot"`, `"uniroot"`, #' `"ml1"`, `"betwithin"`, `"hdi"`, `"quantile"`, `"ci"`, `"eti"`, `"si"`, #' `"bci"`, or `"bcai"`. See section _Confidence intervals and approximation of #' degrees of freedom_ in [`model_parameters()`] for further details. #' @param component Model component for which parameters should be shown. See #' the documentation for your object's class in [`model_parameters()`] or #' [`p_value()`] for further details, or see section _Model components_. #' @param iterations The number of bootstrap replicates. Only applies to models #' of class `merMod` when `method=boot`. #' @param verbose Toggle warnings and messages. #' @param ... Additional arguments passed down to the underlying functions. #' E.g., arguments like `vcov` or `vcov_args` can be used to compute confidence #' intervals using a specific variance-covariance matrix for the standard #' errors. #' @inheritParams standard_error #' #' @return A data frame containing the CI bounds. #' #' @inheritSection model_parameters Confidence intervals and approximation of degrees of freedom #' #' @inheritSection model_parameters.zcpglm Model components #' #' @examplesIf require("glmmTMB") && requireNamespace("sandwich") #' data(qol_cancer) #' model <- lm(QoL ~ time + age + education, data = qol_cancer) #' #' # regular confidence intervals #' ci(model) #' #' # using heteroscedasticity-robust standard errors #' ci(model, vcov = "HC3") #' #' \donttest{ #' library(parameters) #' data(Salamanders, package = "glmmTMB") #' model <- glmmTMB::glmmTMB( #' count ~ spp + mined + (1 | site), #' ziformula = ~mined, #' family = poisson(), #' data = Salamanders #' ) #' #' ci(model) #' ci(model, component = "zi") #' } #' @export ci.default <- function(x, ci = 0.95, dof = NULL, method = NULL, iterations = 500, component = "all", vcov = NULL, vcov_args = NULL, verbose = TRUE, ...) { # check for valid input .is_model_valid(x) .ci_generic( model = x, ci = ci, dof = dof, method = method, component = component, vcov = vcov, vcov_args = vcov_args, verbose = verbose, ... ) } #' @export ci.glm <- function(x, ci = 0.95, dof = NULL, method = "profile", vcov = NULL, vcov_args = NULL, verbose = TRUE, ...) { method <- insight::validate_argument( method, c("profile", "wald", "normal", "residual") ) # No robust vcov for profile method if (method == "profile") { if ((!is.null(vcov) || !is.null(vcov_args)) && isTRUE(verbose)) { insight::format_alert( "The `vcov` and `vcov_args` are not available with `method=\"profile\"`." ) } out <- lapply(ci, function(i) .ci_profiled(model = x, ci = i)) out <- do.call(rbind, out) } else { out <- .ci_generic( model = x, ci = ci, dof = dof, method = method, vcov = vcov, vcov_args = vcov_args, verbose = verbose, ... ) } # Return the CI bounds as a data frame. row.names(out) <- NULL out } # helper ----------------------------------------- #' @keywords internal .check_component <- function(m, x, verbose = TRUE) { if (x %in% c("zi", "zero_inflated")) { minfo <- insight::model_info(m, verbose = FALSE) if (!isTRUE(minfo$is_zero_inflated)) { if (isTRUE(verbose)) { message("Model has no zero-inflation component!") } x <- NULL } } x } parameters/R/methods_plm.R0000644000176200001440000000624414736731407015276 0ustar liggesusers# plm package: .plm, .pgmm, .pggls # plm --------------------------- #' @export standard_error.plm <- function(model, vcov = NULL, vcov_args = NULL, verbose = TRUE, ...) { dots <- list(...) se <- NULL se_standard <- stats::coef(summary(model)) # vcov: matrix if (is.matrix(vcov)) { se <- sqrt(diag(vcov)) } # vcov: function which returns a matrix if (is.function(vcov)) { fun_args <- c(list(model), vcov_args, dots) se <- .safe(sqrt(diag(do.call("vcov", fun_args)))) } # vcov: character (with backward compatibility for `robust = TRUE`) if (is.character(vcov) || isTRUE(dots[["robust"]])) { .vcov <- insight::get_varcov( model, vcov = vcov, vcov_args = vcov_args, verbose = verbose, ... ) se <- sqrt(diag(.vcov)) } if (is.null(se)) { se <- as.vector(se_standard[, 2]) } .data_frame( Parameter = .remove_backticks_from_string(rownames(se_standard)), SE = se ) } #' @export p_value.plm <- p_value.default # pggls ------------------------ #' @export p_value.pggls <- function(model, ...) { cs <- summary(model)$CoefTable p <- cs[, 4] .data_frame( Parameter = .remove_backticks_from_string(rownames(cs)), p = as.vector(p) ) } # pgmm -------------------- #' @export model_parameters.pgmm <- function(model, ci = 0.95, component = c("conditional", "all"), exponentiate = FALSE, p_adjust = NULL, keep = NULL, drop = NULL, verbose = TRUE, ...) { component <- match.arg(component) params <- .extract_parameters_generic( model, merge_by = c("Parameter", "Component"), ci = ci, component = component, p_adjust = p_adjust, keep_parameters = keep, drop_parameters = drop, ... ) # exponentiate coefficients and SE/CI, if requested params <- .exponentiate_parameters(params, model, exponentiate) params <- .add_model_parameters_attributes( params, model, ci, exponentiate, p_adjust = p_adjust, verbose = verbose, ... ) attr(params, "object_name") <- insight::safe_deparse_symbol(substitute(model)) class(params) <- c("parameters_model", "see_parameters_model", class(params)) params } #' @export standard_error.pgmm <- function(model, component = c("conditional", "all"), ...) { component <- match.arg(component) params <- insight::get_parameters(model, component = component, ...) se <- sqrt(diag(insight::get_varcov(model, component = component, ...))) .data_frame( Parameter = params$Parameter, SE = as.vector(se) ) } #' @export ci.pgmm <- function(x, ci = 0.95, dof = Inf, method = NULL, component = "conditional", ...) { if (is.null(method)) { method <- "wald" } else { method <- tolower(method) } .ci_generic(model = x, ci = ci, dof = dof, method = method, component = component) } parameters/R/methods_BayesX.R0000644000176200001440000000106314317274256015672 0ustar liggesusers#' @export standard_error.bayesx <- function(model, ...) { .data_frame( Parameter = insight::find_parameters(model, component = "conditional", flatten = TRUE), SE = model$fixed.effects[, 2] ) } #' @export ci.bayesx <- function(x, ci = 0.95, ...) { .ci_generic(model = x, ci = ci, dof = Inf, component = "conditional", ...) } #' @export p_value.bayesx <- function(model, ...) { .data_frame( Parameter = insight::find_parameters(model, component = "conditional", flatten = TRUE), p = model$fixed.effects[, 4] ) } parameters/R/simulate_parameters.R0000644000176200001440000000657714717111737017037 0ustar liggesusers#' @title Simulate Model Parameters #' @name simulate_parameters #' #' @description Compute simulated draws of parameters and their related indices #' such as Confidence Intervals (CI) and p-values. Simulating parameter draws #' can be seen as a (computationally faster) alternative to bootstrapping. #' #' @inheritParams simulate_model #' @inheritParams bayestestR::describe_posterior #' #' @return A data frame with simulated parameters. #' #' @references Gelman A, Hill J. Data analysis using regression and #' multilevel/hierarchical models. Cambridge; New York: Cambridge University #' Press 2007: 140-143 #' #' @seealso [`bootstrap_model()`], [`bootstrap_parameters()`], [`simulate_model()`] #' #' @note There is also a [`plot()`-method](https://easystats.github.io/see/articles/parameters.html) implemented in the [**see**-package](https://easystats.github.io/see/). #' #' @details #' ## Technical Details #' `simulate_parameters()` is a computationally faster alternative #' to `bootstrap_parameters()`. Simulated draws for coefficients are based #' on a multivariate normal distribution (`MASS::mvrnorm()`) with mean #' `mu = coef(model)` and variance `Sigma = vcov(model)`. #' #' ## Models with Zero-Inflation Component #' For models from packages **glmmTMB**, **pscl**, **GLMMadaptive** and #' **countreg**, the `component` argument can be used to specify #' which parameters should be simulated. For all other models, parameters #' from the conditional component (fixed effects) are simulated. This may #' include smooth terms, but not random effects. #' #' @examples #' model <- lm(Sepal.Length ~ Species * Petal.Width + Petal.Length, data = iris) #' simulate_parameters(model) #' #' \donttest{ #' if (require("glmmTMB", quietly = TRUE)) { #' model <- glmmTMB( #' count ~ spp + mined + (1 | site), #' ziformula = ~mined, #' family = poisson(), #' data = Salamanders #' ) #' simulate_parameters(model, centrality = "mean") #' simulate_parameters(model, ci = c(.8, .95), component = "zero_inflated") #' } #' } #' @export simulate_parameters <- function(model, ...) { UseMethod("simulate_parameters") } #' @rdname simulate_parameters #' @export simulate_parameters.default <- function(model, iterations = 1000, centrality = "median", ci = 0.95, ci_method = "quantile", test = "p-value", ...) { # check for valid input .is_model_valid(model) sim_data <- simulate_model(model, iterations = iterations, ...) out <- .summary_bootstrap( data = sim_data, test = test, centrality = centrality, ci = ci, ci_method = ci_method, ... ) params <- insight::get_parameters(model, verbose = FALSE) if ("Effects" %in% colnames(params) && insight::n_unique(params$Effects) > 1) { out$Effects <- params$Effects } class(out) <- c("parameters_simulate", "see_parameters_simulate", class(out)) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(model)) attr(out, "iterations") <- iterations attr(out, "ci") <- ci attr(out, "ci_method") <- ci_method attr(out, "centrality") <- centrality attr(out, "simulated") <- TRUE out } parameters/R/methods_bbmle.R0000644000176200001440000000114314507235543015554 0ustar liggesusers#' @export model_parameters.mle2 <- model_parameters.glm #' @export ci.mle2 <- ci.glm #' @export standard_error.mle2 <- function(model, ...) { insight::check_if_installed("bbmle") s <- bbmle::summary(model) .data_frame( Parameter = names(s@coef[, 2]), SE = unname(s@coef[, 2]) ) } #' @export p_value.mle2 <- function(model, ...) { insight::check_if_installed("bbmle") s <- bbmle::summary(model) .data_frame( Parameter = names(s@coef[, 4]), p = unname(s@coef[, 4]) ) } #' @export format_parameters.mle2 <- function(model, ...) { NULL } parameters/R/utils_model_parameters.R0000644000176200001440000004304115057544134017516 0ustar liggesusers#' This function adds meta-information to the returned parameters data frame, #' usually used for printing etc. Prettifying names can be time consuming, if #' it is not necessary to have pretty names, set `pretty_names = FALSE` #' #' @keywords internal #' @noRd .add_model_parameters_attributes <- function(params, model, ci, exponentiate = FALSE, bootstrap = FALSE, iterations = 1000, ci_method = NULL, p_adjust = NULL, include_info = FALSE, verbose = TRUE, group_level = FALSE, wb_component = FALSE, modelinfo = NULL, ...) { # capture additional arguments dot.arguments <- list(...) # model info if (is.null(modelinfo)) { info <- .safe(suppressWarnings(insight::model_info(model, verbose = FALSE))) } else { info <- modelinfo } if (is.null(info)) { info <- list(family = "unknown", link_function = "unknown") } # for simplicity, we just use the model information from the first formula # when we have multivariate response models... if (insight::is_multivariate(model) && !"is_zero_inflated" %in% names(info) && !inherits(model, c("vgam", "vglm"))) { info <- info[[1]] } # add regular attributes if (isFALSE(dot.arguments$pretty_names)) { attr(params, "pretty_names") <- params$Parameter } else if (is.null(attr(params, "pretty_names", exact = TRUE))) { attr(params, "pretty_names") <- suppressWarnings(format_parameters(model, model_info = info, ...)) } attr(params, "ci") <- ci attr(params, "ci_method") <- .format_ci_method_name(ci_method) attr(params, "df_method") <- .format_ci_method_name(ci_method) attr(params, "verbose") <- verbose attr(params, "exponentiate") <- exponentiate attr(params, "ordinal_model") <- isTRUE(info$is_ordinal) | isTRUE(info$is_multinomial) attr(params, "linear_model") <- isTRUE(info$is_linear) attr(params, "mixed_model") <- isTRUE(info$is_mixed) attr(params, "n_obs") <- info$n_obs attr(params, "model_class") <- as.character(class(model)) attr(params, "bootstrap") <- bootstrap attr(params, "iterations") <- iterations attr(params, "p_adjust") <- p_adjust attr(params, "robust_vcov") <- "vcov" %in% names(list(...)) attr(params, "ignore_group") <- isFALSE(group_level) attr(params, "ran_pars") <- isFALSE(group_level) attr(params, "show_summary") <- isTRUE(include_info) attr(params, "log_link") <- isTRUE(grepl("log", info$link_function, fixed = TRUE)) attr(params, "logit_link") <- isTRUE(identical(info$link_function, "logit")) # save model call attr(params, "model_call") <- .safe(insight::get_call(model)) # use tryCatch, these might fail... attr(params, "test_statistic") <- .safe(insight::find_statistic(model)) attr(params, "log_response") <- .safe(isTRUE(grepl("log", insight::find_transformation(model), fixed = TRUE))) attr(params, "log_predictors") <- .safe(any(grepl("log", unlist(insight::find_terms(model, verbose = FALSE)[c("conditional", "zero_inflated", "instruments")]), fixed = TRUE))) # nolint # save if model is multivariate response model if (isTRUE(info$is_multivariate)) { attr(params, "multivariate_response") <- TRUE } # if we have a complex random-within-between model, don't show first title element if (isTRUE(wb_component) && !is.null(params$Component) && any(c("within", "between") %in% params$Component)) { attr(params, "no_caption") <- TRUE } # for additional infos, add R2, RMSE if (isTRUE(include_info) && requireNamespace("performance", quietly = TRUE)) { rsq <- .safe(suppressWarnings(performance::r2(model))) attr(params, "r2") <- rsq rmse <- .safe(performance::performance_rmse(model)) attr(params, "rmse") <- rmse } # Models for which titles should be removed - here we add exceptions for # objects that should not have a table headline like "# Fixed Effects", when # there is nothing else than fixed effects (redundant title) if (inherits(model, c( "mediate", "emmGrid", "emm_list", "summary_emm", "lm", "averaging", "glm", "coxph", "bfsl", "deltaMethod", "phylolm", "phyloglm" ))) { attr(params, "no_caption") <- TRUE attr(params, "title") <- "" } # weighted nobs weighted_nobs <- .safe({ w <- insight::get_weights(model, remove_na = TRUE, null_as_ones = TRUE) round(sum(w)) }) attr(params, "weighted_nobs") <- weighted_nobs # model formula model_formula <- .safe(insight::safe_deparse(insight::find_formula(model, verbose = FALSE)$conditional)) # nolint attr(params, "model_formula") <- model_formula # column name for coefficients - for emm_list, we can have # multiple different names for the parameter column. for other # models, check whether we have coefficient, odds ratios, IRR etc. if (inherits(model, "emm_list")) { coef_col1 <- .find_coefficient_type(info, exponentiate, model[[1]]) coef_col2 <- .find_coefficient_type(info, exponentiate, model[[2]]) attr(params, "coefficient_name") <- coef_col1 attr(params, "coefficient_name2") <- coef_col2 } else { coef_col <- .find_coefficient_type(info, exponentiate, model) attr(params, "coefficient_name") <- coef_col attr(params, "zi_coefficient_name") <- if (isTRUE(exponentiate)) { "Odds Ratio" } else { "Log-Odds" } } # special handling for meta analysis. we need additional # information about study weights if (inherits(model, c("rma", "rma.uni"))) { rma_data <- .safe(insight::get_data(model, verbose = FALSE)) attr(params, "data") <- rma_data attr(params, "study_weights") <- 1 / model$vi } # special handling for meta analysis again, but these objects save the # inverse weighting information in a different column. if (inherits(model, c("meta_random", "meta_fixed", "meta_bma"))) { rma_data <- .safe(insight::get_data(model, verbose = FALSE)) attr(params, "data") <- rma_data attr(params, "study_weights") <- 1 / params$SE^2 } # should coefficients be grouped? if ("groups" %in% names(dot.arguments)) { attr(params, "coef_groups") <- dot.arguments[["groups"]] } # now comes all the digits stuff... if ("digits" %in% names(dot.arguments)) { attr(params, "digits") <- dot.arguments[["digits"]] } else { attr(params, "digits") <- 2 } if ("ci_digits" %in% names(dot.arguments)) { attr(params, "ci_digits") <- dot.arguments[["ci_digits"]] } else { attr(params, "ci_digits") <- NULL } if ("p_digits" %in% names(dot.arguments)) { attr(params, "p_digits") <- dot.arguments[["p_digits"]] } else { attr(params, "p_digits") <- 3 } if ("footer_digits" %in% names(dot.arguments)) { attr(params, "footer_digits") <- dot.arguments[["footer_digits"]] } else { attr(params, "footer_digits") <- 3 } if ("s_value" %in% names(dot.arguments)) { attr(params, "s_value") <- dot.arguments[["s_value"]] } # pd? if (isTRUE(dot.arguments[["pd"]]) && !is.null(params[["p"]])) { params$pd <- bayestestR::p_to_pd(params[["p"]]) } # add CI, and reorder if (!"CI" %in% colnames(params) && length(ci) == 1) { params$CI <- ci ci_pos <- grep("CI_low", colnames(params), fixed = TRUE) if (length(ci_pos)) { if (length(ci_pos) > 1) { ci_pos <- ci_pos[1] } a <- attributes(params) params <- params[c(1:(ci_pos - 1), ncol(params), ci_pos:(ncol(params) - 1))] attributes(params) <- utils::modifyList(a, attributes(params)) } } # include reference level? if (isTRUE(dot.arguments[["include_reference"]])) { a <- attributes(params) params <- .safe(.add_reference_level(params, model), params) attributes(params) <- utils::modifyList(a, attributes(params)) } # add parameters with value and variable attr(params, "pretty_labels") <- .format_value_labels(params, model) row.names(params) <- NULL params } #' Format CI method name when stored as an attribute #' #' @keywords internal #' @noRd .format_ci_method_name <- function(ci_method) { if (is.null(ci_method)) { return(NULL) } switch(tolower(ci_method), # abbreviations eti = , hdi = , si = toupper(ci_method), # named after people satterthwaite = , kenward = , wald = insight::format_capitalize(ci_method), # special cases bci = , bcai = "BCa", # no change otherwise ci_method ) } #' This function formats the column name of the printed output, to reflect #' the correct type of coefficient. #' #' @keywords internal #' @noRd .find_coefficient_type <- function(info, exponentiate, model = NULL) { # column name for coefficients coef_col <- "Coefficient" if (!is.null(model) && inherits(model, "emmGrid")) { s <- summary(model) name <- attributes(s)$estName if (!is.null(name)) { coef_col <- switch(name, prob = "Probability", odds.ratio = "Odds Ratio", emmean = "Marginal Means", rate = "Estimated Counts", ratio = "Ratio", "Coefficient" ) } } else if (!is.null(info) && info$family != "unknown") { if (isTRUE(exponentiate)) { if (info$is_exponential && identical(info$link_function, "log")) { coef_col <- "Prevalence Ratio" } else if (info$is_probit) { coef_col <- "Coefficient" } else if ((info$is_binomial && info$is_logit) || info$is_ordinal || info$is_multinomial || info$is_categorical) { coef_col <- "Odds Ratio" } else if (info$is_binomial && !info$is_logit) { if (info$link_function == "identity") { coef_col <- "Exp. Risk" } else { coef_col <- "Risk Ratio" } } else if (info$is_count) { coef_col <- "IRR" } } else if (info$is_exponential && identical(info$link_function, "log")) { coef_col <- "Log-Prevalence" } else if (info$is_probit) { coef_col <- "Z-Score" } else if ((info$is_binomial && info$is_logit) || info$is_ordinal || info$is_multinomial || info$is_categorical) { coef_col <- "Log-Odds" } else if (info$is_binomial && !info$is_logit) { if (info$link_function == "identity") { coef_col <- "Risk" } else { coef_col <- "Log-Risk" } } else if (info$is_count) { coef_col <- "Log-Mean" } } coef_col } .is_valid_exponentiate_argument <- function(exponentiate) { isTRUE(exponentiate) || identical(exponentiate, "nongaussian") } #' This function exponentiates coefficients, e.g. to return odds ratios instead #' of log-odds ratios. #' #' @keywords internal #' @noRd .exponentiate_parameters <- function(params, model = NULL, exponentiate = TRUE) { # "exponentiate" must be # - TRUE, will always exponentiate all coefficients # - "nongaussian", will exponentiate all coefficients for models with non-gaussian family if (!.is_valid_exponentiate_argument(exponentiate)) { return(params) } # check if non-gaussian applies if (!is.null(model) && insight::model_info(model, verbose = FALSE)$is_linear && identical(exponentiate, "nongaussian")) { return(params) } # pattern for marginaleffects objects if (is.null(attr(params, "coefficient_name"))) { pattern <- "^(Coefficient|Mean|Median|MAP|Std_Coefficient|CI_|Std_CI)" } else { pattern <- sprintf( "^(Coefficient|Mean|Median|MAP|Std_Coefficient|%s|CI_|Std_CI)", attr(params, "coefficient_name") ) } columns <- grepl(pattern = pattern, colnames(params)) if (any(columns)) { if (inherits(model, "mvord")) { rows <- params$Component != "correlation" } else if (is.null(params$Component)) { rows <- seq_len(nrow(params)) } else { # don't exponentiate dispersion rows <- !tolower(params$Component) %in% c("dispersion", "residual") } params[rows, columns] <- exp(params[rows, columns]) if (all(c("Coefficient", "SE") %in% names(params))) { params$SE[rows] <- params$Coefficient[rows] * params$SE[rows] } } params } #' this function extracts the table with cleaned parameter names, extracted #' from `insight::clean_parameters()`. it first checks whether this object #' is saved as attribute, and if not, calls `insight::clean_parameters()`. #' #' @keywords internal #' @noRd .get_cleaned_parameters <- function(params, model) { # check if we have cleaned parameters as attributes cp <- attributes(params)$clean_parameters # if not, add if (is.null(cp)) { cp <- insight::clean_parameters(model) } cp } #' this function extract "prettified" parameter names, using #' `insight::clean_parameters()`, and matches them with the parameter names. #' the result is a named vector, added as attributes to the output #' #' @keywords internal #' @noRd .add_pretty_names <- function(params, model) { attr(params, "model_class") <- class(model) # check if we have cleaned parameters as attributes cp <- .get_cleaned_parameters(params, model) clean_params <- cp[cp$Parameter %in% params$Parameter, ] named_clean_params <- stats::setNames( clean_params$Cleaned_Parameter[match(params$Parameter, clean_params$Parameter)], params$Parameter ) # add Group variable if (!is.null(clean_params$Group) && any(nzchar(clean_params$Group, keepNA = TRUE))) { params$Group <- .safe(gsub("(.*): (.*)", "\\2", clean_params$Group)) } attr(params, "cleaned_parameters") <- named_clean_params attr(params, "pretty_names") <- named_clean_params params } #' @keywords internal .add_anova_attributes <- function(params, model, ci, test = NULL, alternative = NULL, p_adjust = NULL, ...) { dot.arguments <- lapply(match.call(expand.dots = FALSE)$`...`, function(x) x) # nolint attr(params, "ci") <- ci attr(params, "model_class") <- class(model) attr(params, "anova_type") <- .anova_type(model) attr(params, "text_alternative") <- .anova_alternative(params, alternative) attr(params, "p_adjust") <- p_adjust if (inherits(model, "Anova.mlm") && !identical(test, "univariate")) { attr(params, "anova_test") <- model$test } # some tweaks for MANOVA, so outputs of manova(model) and car::Manova(model) # look the same, see #833 if (inherits(model, "maov") && is.null(test) && "Pillai" %in% names(params)) { attr(params, "anova_test") <- "Pillai" names(params)[names(params) == "Pillai"] <- "Statistic" } # here we add exception for objects that should not have a table headline if (inherits(model, c("aov", "anova", "lm"))) { attr(params, "title") <- "" } if ("digits" %in% names(dot.arguments)) { attr(params, "digits") <- eval(dot.arguments[["digits"]]) } else { attr(params, "digits") <- 2 } if ("ci_digits" %in% names(dot.arguments)) { attr(params, "ci_digits") <- eval(dot.arguments[["ci_digits"]]) } else { attr(params, "ci_digits") <- NULL } if ("p_digits" %in% names(dot.arguments)) { attr(params, "p_digits") <- eval(dot.arguments[["p_digits"]]) } else { attr(params, "p_digits") <- 3 } if ("s_value" %in% names(dot.arguments)) { attr(params, "s_value") <- eval(dot.arguments[["s_value"]]) } params } .additional_arguments <- function(x, value, default) { add_args <- attributes(x)$additional_arguments if (length(add_args) > 0 && value %in% names(add_args)) { out <- add_args[[value]] } else { out <- attributes(x)[[value]] } if (is.null(out)) { out <- default } out } #' checks for valid inputs in model_parameters(). E.g., some models don't support #' the "vcov" argument - this should not be silently ignored, but rather the user #' should be informed that robust SE are not available for that model. #' #' @keywords internal #' @noRd .check_dots <- function(dots, not_allowed, model_class, function_name = "model_parameters", verbose = TRUE) { # remove arguments that are NULL dots <- insight::compact_list(dots) # return if no args if (!length(dots) || is.null(dots)) { return(NULL) } not_allowed <- not_allowed[which(not_allowed %in% names(dots))] if (length(not_allowed)) { if (verbose) { not_allowed_string <- datawizard::text_concatenate(not_allowed, enclose = "\"") insight::format_alert( sprintf("Following arguments are not supported in `%s()` for models of class `%s` and will be ignored: %s", function_name, model_class, not_allowed_string), # nolint sprintf("In case you obtain expected results, please run `%s()` again without specifying the above mentioned arguments.", function_name) # nolint ) } dots[not_allowed] <- NULL if (!length(dots)) { dots <- NULL } } dots } # functions to check if necessary default argument was provided ------------ .is_model_valid <- function(model) { if (missing(model) || is.null(model)) { insight::format_error( "You must provide a model-object. Argument `model` cannot be missing or `NULL`." ) } } parameters/R/cluster_analysis.R0000644000176200001440000004406615063017406016341 0ustar liggesusers#' Cluster Analysis #' #' Compute hierarchical or kmeans cluster analysis and return the group #' assignment for each observation as vector. #' #' @references #' - Maechler M, Rousseeuw P, Struyf A, Hubert M, Hornik K (2014) cluster: Cluster #' Analysis Basics and Extensions. R package. #' #' @param x A data frame (with at least two variables), or a matrix (with at #' least two columns). #' @param n Number of clusters used for supervised cluster methods. If `NULL`, #' the number of clusters to extract is determined by calling [`n_clusters()`]. #' Note that this argument does not apply for unsupervised clustering methods #' like `dbscan`, `hdbscan`, `mixture`, `pvclust`, or `pamk`. #' @param method Method for computing the cluster analysis. Can be `"kmeans"` #' (default; k-means using `kmeans()`), `"hkmeans"` (hierarchical k-means #' using `factoextra::hkmeans()`), `pam` (K-Medoids using `cluster::pam()`), #' `pamk` (K-Medoids that finds out the number of clusters), `"hclust"` #' (hierarchical clustering using `hclust()` or `pvclust::pvclust()`), #' `dbscan` (DBSCAN using `dbscan::dbscan()`), `hdbscan` (Hierarchical DBSCAN #' using `dbscan::hdbscan()`), or `mixture` (Mixture modeling using #' `mclust::Mclust()`, which requires the user to run `library(mclust)` #' before). #' @param distance_method Distance measure to be used for methods based on #' distances (e.g., when `method = "hclust"` for hierarchical clustering. For #' other methods, such as `"kmeans"`, this argument will be ignored). Must be #' one of `"euclidean"`, `"maximum"`, `"manhattan"`, `"canberra"`, `"binary"` #' or `"minkowski"`. See [`dist()`] and `pvclust::pvclust()` for more #' information. #' @param hclust_method Agglomeration method to be used when `method = "hclust"` #' or `method = "hkmeans"` (for hierarchical clustering). This should be one #' of `"ward"`, `"ward.D2"`, `"single"`, `"complete"`, `"average"`, #' `"mcquitty"`, `"median"` or `"centroid"`. Default is `"complete"` (see #' [`hclust()`]). #' @param kmeans_method Algorithm used for calculating kmeans cluster. Only applies, #' if `method = "kmeans"`. May be one of `"Hartigan-Wong"` (default), #' `"Lloyd"` (used by SPSS), or `"MacQueen"`. See [`kmeans()`] for details on #' this argument. #' @param iterations The number of replications. #' @param dbscan_eps The `eps` argument for DBSCAN method. See [`n_clusters_dbscan()`]. #' #' @inheritParams equivalence_test.lm #' @inheritParams n_clusters #' #' @return The group classification for each observation as vector. The #' returned vector includes missing values, so it has the same length #' as `nrow(x)`. #' #' @note #' There is also a [`plot()`-method](https://easystats.github.io/see/articles/parameters.html) #' implemented in the [**see**-package](https://easystats.github.io/see/). #' #' @details #' The `print()` and `plot()` methods show the (standardized) mean value for #' each variable within each cluster. Thus, a higher absolute value indicates #' that a certain variable characteristic is more pronounced within that #' specific cluster (as compared to other cluster groups with lower absolute #' mean values). #' #' Clusters classification can be obtained via `print(x, newdata = NULL, ...)`. #' #' @seealso #' - [`n_clusters()`] to determine the number of clusters to extract. #' - [`cluster_discrimination()`] to determine the accuracy of cluster group #' classification via linear discriminant analysis (LDA). #' - [`performance::check_clusterstructure()`] to check suitability of data #' for clustering. #' - https://www.datanovia.com/en/lessons/ #' #' @examples #' set.seed(33) #' # K-Means ==================================================== #' rez <- cluster_analysis(iris[1:4], n = 3, method = "kmeans") #' rez # Show results #' predict(rez) # Get clusters #' summary(rez) # Extract the centers values (can use 'plot()' on that) #' if (requireNamespace("MASS", quietly = TRUE)) { #' cluster_discrimination(rez) # Perform LDA #' } #' #' # Hierarchical k-means (more robust k-means) #' if (require("factoextra", quietly = TRUE)) { #' rez <- cluster_analysis(iris[1:4], n = 3, method = "hkmeans") #' rez # Show results #' predict(rez) # Get clusters #' } #' #' # Hierarchical Clustering (hclust) =========================== #' rez <- cluster_analysis(iris[1:4], n = 3, method = "hclust") #' rez # Show results #' predict(rez) # Get clusters #' #' # K-Medoids (pam) ============================================ #' if (require("cluster", quietly = TRUE)) { #' rez <- cluster_analysis(iris[1:4], n = 3, method = "pam") #' rez # Show results #' predict(rez) # Get clusters #' } #' #' # PAM with automated number of clusters #' if (require("fpc", quietly = TRUE)) { #' rez <- cluster_analysis(iris[1:4], method = "pamk") #' rez # Show results #' predict(rez) # Get clusters #' } #' #' # DBSCAN ==================================================== #' if (require("dbscan", quietly = TRUE)) { #' # Note that you can assimilate more outliers (cluster 0) to neighbouring #' # clusters by setting borderPoints = TRUE. #' rez <- cluster_analysis(iris[1:4], method = "dbscan", dbscan_eps = 1.45) #' rez # Show results #' predict(rez) # Get clusters #' } #' #' # Mixture ==================================================== #' if (require("mclust", quietly = TRUE)) { #' library(mclust) # Needs the package to be loaded #' rez <- cluster_analysis(iris[1:4], method = "mixture") #' rez # Show results #' predict(rez) # Get clusters #' } #' @export cluster_analysis <- function(x, n = NULL, method = "kmeans", include_factors = FALSE, standardize = TRUE, verbose = TRUE, distance_method = "euclidean", hclust_method = "complete", kmeans_method = "Hartigan-Wong", dbscan_eps = 15, iterations = 100, ...) { # match arguments method <- match.arg( method, choices = c("kmeans", "hkmeans", "pam", "pamk", "hclust", "dbscan", "hdbscan", "mixture"), several.ok = TRUE ) # Preparation ------------------------------------------------------------- # coerce to data frame if input is a matrix if (is.matrix(x)) { x <- as.data.frame(x) } # validation check - needs data frame if (!is.data.frame(x)) { insight::format_error("`x` needs to be a data frame.") } # validation check - need at least two columns if (ncol(x) < 2) { insight::format_error("At least two variables required to compute a cluster analysis.") } # check if we have a correlation/covariance or distance matrix? if (nrow(x) == ncol(x) && identical(round(x[lower.tri(x)], 10), round(x[upper.tri(x)], 10))) { ## TODO: special handling insight::format_warning( "Input data seems to be a correlation, covariance or similar matrix." ) } # Preprocess data cluster_data <- .prepare_data_clustering(x, include_factors = include_factors, standardize = standardize, ...) # Get number of clusters if (is.null(n) && any(method %in% c("kmeans", "hkmeans", "pam"))) { n <- tryCatch( { nc <- n_clusters(cluster_data, standardize = FALSE, ...) n <- attributes(nc)$n if (verbose) { insight::print_color(sprintf( "Using solution with %i clusters, supported by %i out of %i methods.\n", n, max(summary(nc)$n_Methods), sum(summary(nc)$n_Methods) ), "blue") } n }, error = function(e) { if (isTRUE(verbose)) { insight::format_error( "Could not extract number of clusters. Please provide argument `n`." ) } 2 } ) } # Apply clustering -------------------------------------------------------- if (any(method == "kmeans")) { rez <- .cluster_analysis_kmeans( cluster_data, n = n, kmeans_method = kmeans_method, iterations = iterations, ... ) } else if (any(method == "hkmeans")) { rez <- .cluster_analysis_hkmeans( cluster_data, n = n, kmeans_method = kmeans_method, hclust_method = hclust_method, iterations = iterations, ... ) } else if (any(method == "pam")) { rez <- .cluster_analysis_pam( cluster_data, n = n, distance_method = distance_method, ... ) } else if (any(method == "pamk")) { rez <- .cluster_analysis_pamk( cluster_data, distance_method = distance_method, ... ) } else if (any(method == "hclust")) { rez <- .cluster_analysis_hclust( cluster_data, n = n, distance_method = distance_method, hclust_method = hclust_method, iterations = iterations, ... ) } else if (any(method == "dbscan")) { rez <- .cluster_analysis_dbscan( cluster_data, dbscan_eps = dbscan_eps, ... ) } else if (any(method == "hdbscan")) { rez <- .cluster_analysis_hdbscan( cluster_data, ... ) } else if (any(method %in% c("mixture", "mclust"))) { rez <- .cluster_analysis_mixture( cluster_data, n = n, ... ) } else { insight::format_error("Did not find `method` argument. Could be misspecified.") } # Assign clusters to observations # Create NA-vector of same length as original data frame clusters <- rep(NA, times = nrow(x)) # Create vector with cluster group classification (with missing) if (include_factors) { complete_cases <- stats::complete.cases(x) } else { complete_cases <- stats::complete.cases(x[vapply(x, is.numeric, TRUE)]) } clusters[complete_cases] <- rez$clusters # Get clustering parameters out <- model_parameters(rez$model, data = cluster_data, clusters = clusters, ...) performance <- cluster_performance(out) attr(out, "model") <- rez$model attr(out, "method") <- method attr(out, "clusters") <- clusters attr(out, "data") <- cluster_data attr(out, "performance") <- performance class(out) <- c("cluster_analysis", class(out)) out } # Clustering Methods -------------------------------------------------------- #' @keywords internal .cluster_analysis_kmeans <- function(cluster_data, n = 2, kmeans_method = "Hartigan-Wong", iterations = 100, ...) { model <- stats::kmeans( cluster_data, centers = n, algorithm = kmeans_method, iter.max = iterations, ... ) list(model = model, clusters = model$cluster) } #' @keywords internal .cluster_analysis_hkmeans <- function(cluster_data, n = 2, kmeans_method = "Hartigan-Wong", hclust_method = "complete", iterations = 100, ...) { insight::check_if_installed("factoextra") model <- factoextra::hkmeans(cluster_data, k = n, km.algorithm = kmeans_method, iter.max = iterations, hc.method = hclust_method, ... ) list(model = model, clusters = model$cluster) } #' @keywords internal .cluster_analysis_pam <- function(cluster_data = NULL, n = 2, distance_method = "euclidean", ...) { insight::check_if_installed("cluster") model <- cluster::pam(cluster_data, k = n, metric = distance_method, ...) list(model = model, clusters = model$clustering) } #' @keywords internal .cluster_analysis_pamk <- function(cluster_data = NULL, distance_method = "euclidean", pamk_method = "ch", ...) { insight::check_if_installed("fpc") model <- fpc::pamk(cluster_data, metric = distance_method, criterion = pamk_method, ...) list(model = model$pamobject, clusters = model$pamobject$clustering) } #' @keywords internal .cluster_analysis_hclust <- function(cluster_data, n = 2, distance_method = "euclidean", hclust_method = "complete", iterations = 100, ...) { if (is.null(n)) { rez <- n_clusters_hclust( cluster_data, preprocess = FALSE, distance_method = distance_method, hclust_method = hclust_method, iterations = iterations, ... ) out <- list(model = attributes(rez)$model, clusters = rez$Cluster) } else { if (distance_method %in% c("correlation", "uncentered", "abscor")) { insight::format_warning( paste0( "Method `", distance_method, "` not supported by regular `hclust()`. Please specify another one or set `n = NULL` to use pvclust." ) ) } cluster_dist <- stats::dist(cluster_data, method = distance_method, ...) model <- stats::hclust(cluster_dist, method = hclust_method, ...) out <- list(model = model, clusters = stats::cutree(model, k = n)) } out } #' @keywords internal .cluster_analysis_dbscan <- function(cluster_data = NULL, dbscan_eps = 0.15, min_size = 0.05, borderPoints = FALSE, ...) { insight::check_if_installed("dbscan") if (min_size < 1) min_size <- round(min_size * nrow(cluster_data)) model <- dbscan::dbscan(cluster_data, eps = dbscan_eps, minPts = min_size, borderPoints = borderPoints, ...) list(model = model, clusters = model$cluster) } #' @keywords internal .cluster_analysis_hdbscan <- function(cluster_data = NULL, min_size = 0.05, ...) { insight::check_if_installed("dbscan") if (min_size < 1) min_size <- round(min_size * nrow(cluster_data)) model <- dbscan::hdbscan(cluster_data, minPts = min_size, ...) list(model = model, clusters = model$cluster) } #' @keywords internal .cluster_analysis_mixture <- function(cluster_data = NULL, n = NULL, ...) { insight::check_if_installed("mclust") model <- mclust::Mclust(cluster_data, G = n, verbose = FALSE, ...) list(model = model, clusters = model$classification) } # Methods ---------------------------------------------------------------- #' @export #' @inheritParams stats::predict predict.cluster_analysis <- function(object, newdata = NULL, ...) { if (is.null(newdata)) { attributes(object)$clusters } else { NextMethod() } } #' @export print.cluster_analysis <- function(x, ...) { NextMethod() cat("\n") print(attributes(x)$performance) insight::print_color("\n# You can access the predicted clusters via `predict()`.\n", "yellow") invisible(x) } #' @export summary.cluster_analysis <- function(object, ...) { obj_data <- as.data.frame(object) cols <- names(attributes(object)$data) obj_data <- obj_data[names(obj_data) %in% c(cols, "Cluster")] # Keep only data class(obj_data) <- c("cluster_analysis_summary", class(obj_data)) obj_data } # Plotting ---------------------------------------------------------------- #' @export visualisation_recipe.cluster_analysis_summary <- function(x, ...) { data_long <- datawizard::data_to_long( x, select = names(x)[-1], # skip 'Cluster' column names_to = "Group", values_to = "Center" ) layers <- list() # Layers ----------------------- layers[["l1"]] <- list( geom = "bar", data = data_long, aes = list(x = "Cluster", y = "Center", fill = "Group"), position = "dodge" ) layers[["l2"]] <- list( geom = "hline", data = data_long, aes = list(yintercept = 0), linetype = "dotted" ) layers[["l3"]] <- list( geom = "labs", x = "Cluster Group", y = "Center", fill = "Variable", title = "Cluster Centers" ) # Out class(layers) <- c("visualisation_recipe", "see_visualisation_recipe", class(layers)) attr(layers, "data") <- data_long layers } #' @export visualisation_recipe.cluster_analysis <- function(x, show_data = "text", ...) { ori_data <- stats::na.omit(attributes(x)$data) # Check number of columns: if more than 2, display PCs, if less, fail if (ncol(ori_data) <= 2) { insight::format_error("Less than 2 variables in the dataset. Cannot compute enough principal components to represent clustering.") # nolint } # Get 2 PCA Components pca <- principal_components(ori_data, n = 2) prediction_data <- stats::predict(pca) names(prediction_data) <- c("x", "y") prediction_data$Cluster <- as.character(stats::na.omit(attributes(x)$clusters)) prediction_data$label <- row.names(ori_data) if (!is.null(show_data) && show_data %in% c("label", "text")) { label <- "label" } else { label <- NULL } # Centers data (also on the PCA scale) data_centers <- stats::predict(pca, newdata = as.data.frame(x)[names(ori_data)], names = c("x", "y")) data_centers$Cluster <- as.character(as.data.frame(x)$Cluster) # Outliers prediction_data$Cluster[prediction_data$Cluster == "0"] <- NA data_centers <- data_centers[data_centers$Cluster != "0", ] layers <- list() # Layers ----------------------- layers[["l1"]] <- list( geom = show_data, data = prediction_data, aes = list(x = "x", y = "y", label = label, color = "Cluster") ) layers[["l2"]] <- list( geom = "point", data = data_centers, aes = list(x = "x", y = "y", color = "Cluster"), shape = "+", size = 10 ) layers[["l3"]] <- list( geom = "labs", x = "PCA - 1", y = "PCA - 2", title = "Clustering Solution" ) # Out class(layers) <- c("visualisation_recipe", "see_visualisation_recipe", class(layers)) attr(layers, "data") <- prediction_data layers } #' @export plot.cluster_analysis <- function(x, ...) { plot(visualisation_recipe(x, ...)) } parameters/R/methods_ivprobit.R0000644000176200001440000000047214716604200016325 0ustar liggesusers#' @export ci.ivprobit <- ci.default #' @export standard_error.ivprobit <- function(model, ...) { .data_frame( Parameter = model$names, SE = as.vector(model$se) ) } #' @export p_value.ivprobit <- p_value.default #' @export model_parameters.ivprobit <- model_parameters.ivFixed parameters/R/methods_effect_size.R0000644000176200001440000000241714716604200016756 0ustar liggesusers#' @export ci.parameters_standardized <- function(x, ci = 0.95, verbose = TRUE, ...) { se <- attr(x, "standard_error") if (is.null(se)) { if (isTRUE(verbose)) { insight::print_color("\nCould not extract standard errors of standardized coefficients.\n", "red") } return(NULL) } # for "refit" method if (is.data.frame(se) && "SE" %in% colnames(se)) { se <- se$SE } # check if we have model. if so, use df from model model <- .get_object(x) if (!is.null(model)) { dof <- insight::get_df(model, type = "wald") if (!is.null(dof)) { if (length(dof) > 1 && length(dof) != nrow(x)) { dof <- Inf } } else { dof <- Inf } } else { dof <- Inf } out <- lapply(ci, function(i) { alpha <- (1 + i) / 2 fac <- stats::qt(alpha, df = dof) data.frame( Parameter = x$Parameter, CI = i, CI_low = x$Std_Coefficient - se * fac, CI_high = x$Std_Coefficient + se * fac, stringsAsFactors = FALSE ) }) insight::text_remove_backticks(do.call(rbind, out), verbose = FALSE) } #' @export ci.effectsize_table <- ci.parameters_standardized #' @export standard_error.effectsize_table <- standard_error.parameters_standardized parameters/R/methods_spaMM.R0000644000176200001440000000346214507235543015516 0ustar liggesusers#' @export model_parameters.HLfit <- model_parameters.default #' @export ci.HLfit <- function(x, ci = 0.95, method = "wald", iterations = 100, ...) { method <- match.arg(tolower(method), choices = c("wald", "ml1", "betwithin", "profile", "boot")) # Wald approx if (method == "wald") { out <- .ci_generic(model = x, ci = ci, dof = Inf) # ml1 approx } else if (method == "ml1") { out <- ci_ml1(x, ci) # betwithin approx } else if (method == "betwithin") { out <- ci_betwithin(x, ci) # profiled } else if (method == "profile") { nparms <- n_parameters(x) conf <- stats::confint(x, parm = 1:nparms, level = ci, verbose = FALSE, boot_args = NULL) if (nparms == 1) { out <- as.data.frame(t(conf$interval)) } else { out <- as.data.frame(do.call(rbind, lapply(conf, function(i) i$interval))) } colnames(out) <- c("CI_low", "CI_high") out$Parameter <- insight::find_parameters(x, effects = "fixed", flatten = TRUE) out$CI <- ci out <- out[c("Parameter", "CI", "CI_low", "CI_high")] } # # bootstrapping # } else if (method == "boot") { # out <- stats::confint(x, parm = n_parameters(x), level = ci, verbose = FALSE, boot_args = list(nsim = iterations, showpbar = FALSE)) # } out } #' @export standard_error.HLfit <- function(model, method = NULL, ...) { if (is.null(method)) method <- "wald" utils::capture.output({ se <- summary(model)$beta_table[, 2] }) .data_frame( Parameter = insight::find_parameters(model, effects = "fixed", component = "conditional", flatten = TRUE ), SE = as.vector(se) ) } #' @export p_value.HLfit <- p_value.cpglmm parameters/R/dof_ml1.R0000644000176200001440000000425114736731407014300 0ustar liggesusers#' @rdname p_value_ml1 #' @export dof_ml1 <- function(model) { if (!insight::is_mixed_model(model)) { insight::format_error("Model must be a mixed model.") } re_groups <- insight::get_random(model) parameters <- insight::find_parameters(model, effects = "fixed")[["conditional"]] predictors <- insight::find_predictors(model, effects = "fixed", component = "conditional", flatten = TRUE) predictors <- setdiff(predictors, names(re_groups)) model_data <- insight::get_data(model, verbose = FALSE)[predictors] has_intcp <- insight::has_intercept(model) term_assignment <- .find_term_assignment(model_data, predictors, parameters) ddf <- sapply(model_data, function(.x) { min(vapply(re_groups, .get_df_ml1_approx, numeric(1), x = .x)) }) ltab <- table(ddf) ltab <- list(m = as.integer(names(ltab)), l = as.vector(ltab)) ltab$ddf <- ltab$m - ltab$l if (has_intcp) ltab$ddf <- ltab$ddf - 1 ii <- match(ddf, ltab$m) ddf[] <- ltab$ddf[ii] out <- numeric(length = length(parameters)) ## FIXME: number of items to replace is not a multiple of replacement length suppressWarnings(out[which("(Intercept)" != parameters)] <- ddf[term_assignment]) # nolint if (has_intcp) out[which("(Intercept)" == parameters)] <- min(ddf) stats::setNames(out, parameters) } .get_df_ml1_approx <- function(x, g) { if (!is.factor(g)) { g <- as.factor(g) } m <- nlevels(g) n <- length(x) if (is.character(x)) { x <- as.numeric(as.factor(x)) } else { x <- as.numeric(x) } x.bar <- stats::ave(x, g) var.within <- stats::var(x - x.bar) var.between <- stats::var(x.bar) if (var.within >= var.between) { n } else { m } } .find_term_assignment <- function(model_data, predictors, parameters) { parms <- unlist(lapply(seq_along(predictors), function(i) { p <- predictors[i] if (is.factor(model_data[[p]])) { ps <- paste0(p, levels(model_data[[p]])) names(ps)[seq_along(ps)] <- i ps } else { names(p) <- i p } })) out <- as.numeric(names(parms)[match(insight::clean_names(parameters), parms)]) out[!is.na(out)] } parameters/R/methods_mediate.R0000644000176200001440000001165514716604200016104 0ustar liggesusers#' @export model_parameters.mediate <- function(model, ci = 0.95, exponentiate = FALSE, verbose = TRUE, ...) { # Parameters, Estimate and CI params <- insight::get_parameters(model) # CI params <- merge(params, ci(model, ci = ci), by = "Parameter", sort = FALSE) params$CI <- NULL # p-value params <- merge(params, p_value(model), by = "Parameter", sort = FALSE) # ==== Renaming if (any(endsWith(params$Parameter, "(control)"))) { params$Component <- gsub("(.*)\\((.*)\\)$", "\\2", params$Parameter) } # exponentiate coefficients and SE/CI, if requested params <- .exponentiate_parameters(params, model, exponentiate) attr(params, "object_name") <- insight::safe_deparse_symbol(substitute(model)) params <- .add_model_parameters_attributes(params, model, ci, exponentiate, verbose = verbose, ...) class(params) <- c("parameters_model", "see_parameters_model", class(params)) params } #' @export ci.mediate <- function(x, ci = 0.95, ...) { info <- insight::model_info(x$model.y, verbose = FALSE) alpha <- (1 + ci) / 2 if (info$is_linear && !x$INT) { out <- data.frame( Parameter = c("ACME", "ADE", "Total Effect", "Prop. Mediated"), CI = ci, CI_low = c( stats::quantile(x$d0.sims, probs = 1 - alpha, names = FALSE), stats::quantile(x$z0.sims, probs = 1 - alpha, names = FALSE), stats::quantile(x$tau.sims, probs = 1 - alpha, names = FALSE), stats::quantile(x$n0.sims, probs = 1 - alpha, names = FALSE) ), CI_high = c( stats::quantile(x$d0.sims, probs = alpha, names = FALSE), stats::quantile(x$z0.sims, probs = alpha, names = FALSE), stats::quantile(x$tau.sims, probs = alpha, names = FALSE), stats::quantile(x$n0.sims, probs = alpha, names = FALSE) ), stringsAsFactors = FALSE ) } else { out <- data.frame( Parameter = c( "ACME (control)", "ACME (treated)", "ADE (control)", "ADE (treated)", "Total Effect", "Prop. Mediated (control)", "Prop. Mediated (treated)", "ACME (average)", "ADE (average)", "Prop. Mediated (average)" ), CI = ci, CI_low = c( stats::quantile(x$d0.sims, probs = 1 - alpha, names = FALSE), stats::quantile(x$d1.sims, probs = 1 - alpha, names = FALSE), stats::quantile(x$z0.sims, probs = 1 - alpha, names = FALSE), stats::quantile(x$z1.sims, probs = 1 - alpha, names = FALSE), stats::quantile(x$tau.sims, probs = 1 - alpha, names = FALSE), stats::quantile(x$n0.sims, probs = 1 - alpha, names = FALSE), stats::quantile(x$n1.sims, probs = 1 - alpha, names = FALSE), stats::quantile(x$d.avg.sims, probs = 1 - alpha, names = FALSE), stats::quantile(x$z.avg.sims, probs = 1 - alpha, names = FALSE), stats::quantile(x$n.avg.sims, probs = 1 - alpha, names = FALSE) ), CI_high = c( stats::quantile(x$d0.sims, probs = alpha, names = FALSE), stats::quantile(x$d1.sims, probs = alpha, names = FALSE), stats::quantile(x$z0.sims, probs = alpha, names = FALSE), stats::quantile(x$z1.sims, probs = alpha, names = FALSE), stats::quantile(x$tau.sims, probs = alpha, names = FALSE), stats::quantile(x$n0.sims, probs = alpha, names = FALSE), stats::quantile(x$n1.sims, probs = alpha, names = FALSE), stats::quantile(x$d.avg.sims, probs = alpha, names = FALSE), stats::quantile(x$z.avg.sims, probs = alpha, names = FALSE), stats::quantile(x$n.avg.sims, probs = alpha, names = FALSE) ), stringsAsFactors = FALSE ) } out } #' @export standard_error.mediate <- function(model, ...) { NULL } #' @export p_value.mediate <- function(model, ...) { info <- insight::model_info(model$model.y, verbose = FALSE) if (info$is_linear && !model$INT) { out <- data.frame( Parameter = c("ACME", "ADE", "Total Effect", "Prop. Mediated"), p = c(model$d0.p, model$z0.p, model$tau.p, model$n0.p), stringsAsFactors = FALSE ) } else { out <- data.frame( Parameter = c( "ACME (control)", "ACME (treated)", "ADE (control)", "ADE (treated)", "Total Effect", "Prop. Mediated (control)", "Prop. Mediated (treated)", "ACME (average)", "ADE (average)", "Prop. Mediated (average)" ), p = c( model$d0.p, model$d1.p, model$z0.p, model$z1.p, model$tau.p, model$n0.p, model$n1.p, model$d.avg.p, model$z.avg.p, model$n.avg.p ), stringsAsFactors = FALSE ) } out } #' @export format_parameters.mediate <- function(model, ...) { params <- insight::find_parameters(model, flatten = TRUE) params <- insight::trim_ws(gsub("(.*)\\((.*)\\)$", "\\1", params)) names(params) <- params params[params == "ACME"] <- "Indirect Effect (ACME)" params[params == "ADE"] <- "Direct Effect (ADE)" params } parameters/R/methods_ordinal.R0000644000176200001440000000666614761570351016143 0ustar liggesusers# model parameters ------------------- #' @export model_parameters.clm2 <- function(model, ci = 0.95, bootstrap = FALSE, iterations = 1000, component = "all", standardize = NULL, exponentiate = FALSE, p_adjust = NULL, include_info = getOption("parameters_info", FALSE), keep = NULL, drop = NULL, verbose = TRUE, ...) { component <- insight::validate_argument(component, c("all", "conditional", "scale")) if (component == "all") { merge_by <- c("Parameter", "Component") } else { merge_by <- "Parameter" } ## TODO check merge by out <- .model_parameters_generic( model = model, ci = ci, component = component, bootstrap = bootstrap, iterations = iterations, merge_by = c("Parameter", "Component"), standardize = standardize, exponentiate = exponentiate, p_adjust = p_adjust, keep_parameters = keep, drop_parameters = drop, include_info = include_info, ... ) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(model)) out } #' @export model_parameters.clmm2 <- model_parameters.clm2 #' @export model_parameters.clmm <- model_parameters.cpglmm # CI --------------------- ## TODO residual df? #' @export ci.clm2 <- function(x, ci = 0.95, component = c("all", "conditional", "scale"), ...) { component <- match.arg(component) .ci_generic(model = x, ci = ci, dof = Inf, component = component) } #' @export ci.clmm2 <- ci.clm2 # standard errors ----------------- #' @export standard_error.clm2 <- function(model, component = "all", ...) { component <- match.arg(component, choices = c("all", "conditional", "scale")) stats <- .get_se_from_summary(model) parms <- insight::get_parameters(model, component = component) .data_frame( Parameter = parms$Parameter, SE = stats[parms$Parameter], Component = parms$Component ) } #' @export standard_error.clmm2 <- standard_error.clm2 # p values ---------------- #' @export p_value.clm2 <- function(model, component = "all", ...) { component <- insight::validate_argument( component, c("all", "conditional", "scale") ) params <- insight::get_parameters(model) cs <- stats::coef(summary(model)) p <- cs[, 4] out <- .data_frame( Parameter = params$Parameter, Component = params$Component, p = as.vector(p) ) if (component != "all") { out <- out[out$Component == component, ] } out } #' @export p_value.clmm2 <- p_value.clm2 # simulate model ------------------- #' @export simulate_model.clm2 <- function(model, iterations = 1000, component = "all", ...) { component <- insight::validate_argument( component, c("all", "conditional", "scale") ) out <- .simulate_model(model, iterations, component = component, ...) class(out) <- c("parameters_simulate_model", class(out)) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(model)) out } #' @export simulate_model.clmm2 <- simulate_model.clm2 parameters/R/select_parameters.R0000644000176200001440000000714114736731407016462 0ustar liggesusers#' Automated selection of model parameters #' #' This function performs an automated selection of the 'best' parameters, #' updating and returning the "best" model. #' #' @param model A statistical model (of class `lm`, `glm`, or `merMod`). #' @param ... Arguments passed to or from other methods. #' #' @section Classical lm and glm: #' For frequentist GLMs, `select_parameters()` performs an AIC-based stepwise #' selection. #' #' @section Mixed models: #' For mixed-effects models of class `merMod`, stepwise selection is based on #' [`cAIC4::stepcAIC()`]. This step function only searches the "best" model #' based on the random-effects structure, i.e. `select_parameters()` adds or #' excludes random-effects until the cAIC can't be improved further. #' #' @examplesIf requireNamespace("lme4") #' model <- lm(mpg ~ ., data = mtcars) #' select_parameters(model) #' #' model <- lm(mpg ~ cyl * disp * hp * wt, data = mtcars) #' select_parameters(model) #' \donttest{ #' # lme4 ------------------------------------------- #' model <- lme4::lmer( #' Sepal.Width ~ Sepal.Length * Petal.Width * Petal.Length + (1 | Species), #' data = iris #' ) #' select_parameters(model) #' } #' #' @return The model refitted with optimal number of parameters. #' @export select_parameters <- function(model, ...) { UseMethod("select_parameters") } #' @rdname select_parameters #' @param k The multiple of the number of degrees of freedom used for the penalty. #' Only `k = 2` gives the genuine AIC: `k = log(n)` is sometimes referred to as #' BIC or SBC. #' @inheritParams stats::step #' @export select_parameters.lm <- function(model, direction = "both", steps = 1000, k = 2, ...) { junk <- utils::capture.output({ best <- stats::step( model, trace = 0, direction = direction, steps = steps, k = k, ... ) }) best } #' @rdname select_parameters #' @export select_parameters.merMod <- function(model, direction = "backward", steps = 1000, ...) { insight::check_if_installed("cAIC4") # Find slope and group candidates # data <- insight::get_data(model) # factors <- names(data[sapply(data, is.factor)]) # if(length(factors) == 0){ # factors <- NULL # } # nums <- names(data[sapply(data, is.numeric)]) # if(length(nums) == 0){ # nums <- NULL # } factors <- unique(c( insight::find_random(model, split_nested = FALSE, flatten = TRUE), insight::find_random(model, split_nested = TRUE, flatten = TRUE) )) factors <- gsub(":", "/", factors, fixed = TRUE) best <- suppressMessages( suppressWarnings( cAIC4::stepcAIC( model, # slopeCandidates = nums, groupCandidates = factors, direction = direction, steps = steps, allowUseAcross = TRUE )$finalModel ) ) # Using MuMIn's dredge(): works nicely BUT throws unnecessary warnings and # requires to set global options for na.action even tho no NaNs. # The code is here: https://github.com/cran/MuMIn/blob/master/R/dredge.R Maybe it could be reimplemented? # insight::check_if_installed("MuMIn") # model <- lmer( # Sepal.Width ~ Sepal.Length * Petal.Width * Petal.Length + (1 | Species), # data = iris, # na.action = na.fail # ) # summary(MuMIn::get.models(MuMIn::dredge(model), 1)[[1]]) best } parameters/R/parameters_type.R0000644000176200001440000003406215073712214016154 0ustar liggesusers#' Type of model parameters #' #' In a regression model, the parameters do not all have the meaning. For #' instance, the intercept has to be interpreted as theoretical outcome value #' under some conditions (when predictors are set to 0), whereas other #' coefficients are to be interpreted as amounts of change. Others, such as #' interactions, represent changes in another of the parameter. The #' `parameters_type` function attempts to retrieve information and meaning #' of parameters. It outputs a dataframe of information for each parameters, #' such as the `Type` (whether the parameter corresponds to a factor or a #' numeric predictor, or whether it is a (regular) interaction or a nested #' one), the `Link` (whether the parameter can be interpreted as a mean #' value, the slope of an association or a difference between two levels) and, #' in the case of interactions, which other parameters is impacted by which #' parameter. #' #' @param model A statistical model. #' @param ... Arguments passed to or from other methods. #' #' @examples #' library(parameters) #' #' model <- lm(Sepal.Length ~ Petal.Length + Species, data = iris) #' parameters_type(model) #' #' model <- lm(Sepal.Length ~ Species + poly(Sepal.Width, 2), data = iris) #' parameters_type(model) #' #' model <- lm(Sepal.Length ~ Species + poly(Sepal.Width, 2, raw = TRUE), data = iris) #' parameters_type(model) #' #' # Interactions #' model <- lm(Sepal.Length ~ Sepal.Width * Species, data = iris) #' parameters_type(model) #' #' model <- lm(Sepal.Length ~ Sepal.Width * Species * Petal.Length, data = iris) #' parameters_type(model) #' #' model <- lm(Sepal.Length ~ Species * Sepal.Width, data = iris) #' parameters_type(model) #' #' model <- lm(Sepal.Length ~ Species / Sepal.Width, data = iris) #' parameters_type(model) #' #' #' # Complex interactions #' data <- iris #' data$fac2 <- ifelse(data$Sepal.Width > mean(data$Sepal.Width), "A", "B") #' model <- lm(Sepal.Length ~ Species / fac2 / Petal.Length, data = data) #' parameters_type(model) #' #' model <- lm(Sepal.Length ~ Species / fac2 * Petal.Length, data = data) #' parameters_type(model) #' @return A data frame. #' @export parameters_type <- function(model, ...) { # Get info params <- data.frame( Parameter = insight::find_parameters(model, effects = "fixed", flatten = TRUE), stringsAsFactors = FALSE ) # Special case if (inherits(model, c("polr", "svyolr"))) { params$Parameter <- gsub("Intercept: ", "", params$Parameter, fixed = TRUE) } # Special case if (inherits(model, "bracl")) { params$Parameter <- gsub("(.*):(.*)", "\\2", params$Parameter) } # Special case if (inherits(model, "DirichletRegModel")) { cf <- stats::coef(model) if (model$parametrization == "common") { pattern <- paste0("(", paste(model$varnames, collapse = "|"), ")\\.(.*)") params$Parameter <- gsub(pattern, "\\2", names(unlist(cf))) } else { params$Parameter <- gsub("(.*)\\.(.*)\\.(.*)", "\\3", names(unlist(cf))) } } # Remove "as.factor()", "log()" etc. from parameter names but save original parameter before original_parameter <- params$Parameter params$Parameter <- .clean_parameter_names(params$Parameter, full = TRUE) ## TODO can we get rid of the count_ / zero_ prefix here? if (inherits(model, c("zeroinfl", "hurdle", "zerocount"))) { params$Parameter <- gsub("^(count_|zero_)", "", params$Parameter) } data <- insight::get_data(model, source = "mf", verbose = FALSE) if (is.null(data) || inherits(data, "ts") || nrow(data) == 0) { return(NULL) } # convert on-the-fly-factors back from numeric to factors data[] <- lapply(data, function(i) { if (isTRUE(attributes(i)$factor)) { as.factor(i) } else { i } }) reference <- .list_factors_numerics(data, model) # Get types main <- .parameters_type_table(names = params$Parameter, data, reference) secondary <- .parameters_type_table(names = main$Secondary_Parameter, data, reference) names(secondary) <- paste0("Secondary_", names(secondary)) names(secondary)[ names(secondary) == "Secondary_Secondary_Parameter" ] <- "Tertiary_Parameter" out <- cbind(params, main, secondary) # Deal with nested interactions for (i in unique(paste0( out[out$Type == "interaction", "Variable"], out[out$Type == "interaction", "Secondary_Variable"] ))) { interac <- out[paste0(out$Variable, out$Secondary_Variable) == i, ] if (!all(interac$Term %in% out$Parameter)) { out[paste0(out$Variable, out$Secondary_Variable) == i, "Type"] <- "nested" } if (all(interac$Term %in% out$Parameter)) { interac_sec_term <- interac$Secondary_Term[!is.na(interac$Secondary_Term)] if (length(interac_sec_term) && !all(interac_sec_term %in% out$Parameter)) { out[paste0(out$Variable, out$Secondary_Variable) == i, "Type"] <- "simple" } } } for (i in unique(out$Secondary_Parameter)) { if (!is.na(i) && i %in% out$Parameter) { .param_type <- out[!is.na(out$Parameter) & out$Parameter == i, "Type"] .param_secondary_type <- out[ !is.na(out$Secondary_Parameter) & out$Secondary_Parameter == i, "Secondary_Type" ] if ( length(.param_type) == length(.param_secondary_type) || length(.param_type) == 1 ) { out[ !is.na(out$Secondary_Parameter) & out$Secondary_Parameter == i, "Secondary_Type" ] <- .param_type } } } out$Parameter <- original_parameter out } #' @keywords internal .parameters_type_table <- function(names, data, reference) { out <- lapply(names, .parameters_type, data = data, reference = reference) out <- as.data.frame(do.call(rbind, out), stringsAsFactors = FALSE) names(out) <- c("Type", "Link", "Term", "Variable", "Level", "Secondary_Parameter") out } #' @keywords internal .parameters_type <- function(name, data, reference) { if (grepl(":", name, fixed = TRUE)) { # Split var <- unlist(strsplit(name, ":", fixed = TRUE)) if (length(var) > 2) { var <- c(utils::tail(var, 1), paste0(utils::head(var, -1), collapse = ":")) } else { var <- rev(var) } # Check if any is factor types <- unlist(lapply( var, function(x, data, reference) .parameters_type_basic(x, data, reference)[1], data = data, reference = reference )) link <- ifelse(any("factor" %in% types), "Difference", "Association") # Get type main <- .parameters_type_basic(var[1], data, reference) return(c("interaction", link, main[3], main[4], main[5], var[2])) } else { .parameters_type_basic(name, data, reference) } } #' @keywords internal .parameters_type_basic <- function(name, data, reference, brackets = c("[", "]")) { if (is.na(name)) { return(c(NA, NA, NA, NA, NA, NA)) } # parameter type is determined here. for formatting / printing, # refer to ".format_parameter()". Make sure that pattern # processed here are not "cleaned" (i.e. removed) in # ".clean_parameter_names()" cleaned_name <- .clean_parameter_names(name, full = TRUE) cleaned_ordered_name <- gsub("(.*)((\\.|\\^).*)", "\\1", cleaned_name) # Intercept if (.in_intercepts(cleaned_name)) { return(c("intercept", "Mean", "(Intercept)", NA, NA, NA)) # Numeric } else if (cleaned_name %in% reference$numeric) { return(c("numeric", "Association", name, name, NA, NA)) # Ordered factors } else if (is.ordered(data[[cleaned_ordered_name]])) { fac <- reference$levels_parent[match(cleaned_name, reference$levels)] return(c( "ordered", "Association", name, fac, .format_ordered(gsub(fac, "", name, fixed = TRUE), brackets = brackets), NA )) # Factors } else if (cleaned_name %in% reference$levels) { fac <- reference$levels_parent[match(cleaned_name, reference$levels)] return(c("factor", "Difference", name, fac, gsub(fac, "", name, fixed = TRUE), NA)) # Polynomials } else if (grepl("poly(", name, fixed = TRUE)) { if (grepl(", raw = TRUE", name, fixed = TRUE)) { name <- gsub(", raw = TRUE", "", name, fixed = TRUE) type <- "poly_raw" } else { type <- "poly" } var <- .poly_info(name, "name") degree <- .poly_info(name, "degree") return(c(type, "Association", name, var, degree, NA)) # Splines } else if (grepl("(bs|ns|psline|lspline|rcs|mSpline)\\(", name)) { type <- "spline" var <- gsub("(bs|ns|psline|lspline|rcs|mSpline)\\((.*)\\)(\\d)", "\\2", name) if (grepl(",", var, fixed = TRUE)) { var <- substr(var, start = 0, stop = regexpr(",", var, fixed = TRUE) - 1) } degree <- gsub("(bs|ns|psline|lspline|rcs|mSpline)\\((.*)\\)(\\d)", "\\3", name) return(c(type, "Association", name, var, degree, NA)) # log-transformation } else if (grepl("(log|logb|log1p|log2|log10)\\(", name)) { type <- "logarithm" var <- gsub("(log|logb|log1p|log2|log10)\\((.*)\\)", "\\2", name) if (grepl(",", var, fixed = TRUE)) { var <- substr(var, start = 0, stop = regexpr(",", var, fixed = TRUE) - 1) } return(c(type, "Association", name, var, NA, NA)) # exp-transformation } else if (grepl("(exp|expm1)\\(", name)) { type <- "exponentiation" var <- gsub("(exp|expm1)\\((.*)\\)", "\\2", name) if (grepl(",", var, fixed = TRUE)) { var <- substr(var, start = 0, stop = regexpr(",", var, fixed = TRUE) - 1) } return(c(type, "Association", name, var, NA, NA)) # sqrt-transformation } else if (grepl("sqrt(", name, fixed = TRUE)) { type <- "squareroot" var <- gsub("sqrt\\((.*)\\)", "\\1", name) if (grepl(",", var, fixed = TRUE)) { var <- substr(var, start = 0, stop = regexpr(",", var, fixed = TRUE) - 1) } return(c(type, "Association", name, var, NA, NA)) # As Is } else if (startsWith(name, "I(")) { type <- "asis" var <- gsub("^I\\((.*)\\)", "\\1", name) return(c(type, "Association", name, var, NA, NA)) # Smooth } else if (startsWith(name, "s(")) { return(c("smooth", "Association", name, NA, NA, NA)) # Smooth } else if (startsWith(name, "smooth_")) { return(c( "smooth", "Association", gsub("^smooth_(.*)\\[(.*)\\]", "\\2", name), NA, NA, NA )) } else { return(c("unknown", NA, NA, NA, NA, NA)) } } #' @keywords internal .poly_info <- function(x, what = "degree") { if (what == "degree") { subs <- "\\4" } else { subs <- "\\2" } p <- "(.*)poly\\((.*),\\s(.*)\\)(.*)" .safe(insight::trim_ws(sub(p, replacement = subs, x)), 1) } #' @keywords internal .list_factors_numerics <- function(data, model) { out <- list() # retrieve numerics .check_for_numerics <- function(x) { is.numeric(x) && !isTRUE(attributes(x)$factor) } out$numeric <- names(data[vapply(data, .check_for_numerics, TRUE)]) # get contrast coding contrast_coding <- .safe(model$contrasts) # clean names from on-the-fly conversion, like "as.ordered(x)" if (!is.null(contrast_coding) && !is.null(names(contrast_coding))) { names(contrast_coding) <- gsub( "(as\\.ordered|ordered|as\\.factor|factor)\\((.*)\\)", "\\2", names(contrast_coding) ) } # if contrasts are given as matrix, find related contrast name if (!is.null(contrast_coding)) { contrast_coding <- lapply(contrast_coding, function(i) { if (is.array(i)) { cn <- colnames(i) if (is.null(cn)) { if (rowMeans(i)[1] == -1) { i <- "contr.helmert" } else { i <- "contr.sum" } } else if (cn[1] == ".L") { i <- "contr.poly" } else if (cn[1] == "2") { i <- "contr.treatment2" } else if (cn[1] == "1") { i <- "contr.SAS2" } else { i <- "contr.custom" attr(i, "column_names") <- cn } } i }) } # Ordered factors out$ordered <- names(data[vapply(data, is.ordered, TRUE)]) # Factors out$factor <- names(data[ vapply(data, is.factor, TRUE) | vapply(data, is.character, TRUE) ]) out$levels <- NA out$levels_parent <- NA # clean names from on-the-fly conversion, like "as.ordered(x)" if (!is.null(contrast_coding) && !is.null(names(contrast_coding))) { names(contrast_coding) <- gsub( "(as\\.ordered|ordered|as\\.factor|factor)\\((.*)\\)", "\\2", names(contrast_coding) ) } for (fac in out$factor) { if ( (fac %in% out$ordered && is.null(contrast_coding[[fac]])) || (!is.null(contrast_coding[[fac]]) && any(contrast_coding[[fac]] %in% "contr.poly")) ) { levels <- paste0( fac, c(".L", ".Q", ".C", paste0("^", 4:1000))[seq_along(unique(data[[fac]]))] ) } else if ( !is.null(contrast_coding[[fac]]) && any( contrast_coding[[fac]] %in% c("contr.SAS2", "contr.sum", "contr.bayes", "contr.helmert") ) ) { levels <- paste0(fac, seq_along(unique(data[[fac]]))) } else if ( !is.null(contrast_coding[[fac]]) && any(contrast_coding[[fac]] %in% "contr.treatment2") ) { levels <- paste0(fac, 2:length(unique(data[[fac]]))) } else if ( !is.null(contrast_coding[[fac]]) && any(contrast_coding[[fac]] %in% "contr.SAS") ) { levels <- paste0(fac, rev(unique(data[[fac]]))) } else if ( !is.null(contrast_coding[[fac]]) && any(contrast_coding[[fac]] %in% "contr.custom") ) { levels <- paste0(fac, attributes(contrast_coding[[fac]])$column_names) } else { levels <- paste0(fac, unique(data[[fac]])) } out$levels_parent <- c(out$levels_parent, rep(fac, length(levels))) out$levels <- c(out$levels, levels) } out$levels <- out$levels[!is.na(out$levels)] out$levels_parent <- out$levels_parent[!is.na(out$levels_parent)] out } parameters/R/methods_lme4.R0000644000176200001440000001113215057525051015330 0ustar liggesusers############# .merMod ----------------- #' @export model_parameters.merMod <- model_parameters.coxme # helper ---------------------------------------------------------------------- .add_random_effects_lme4 <- function(model, params, ci, ci_method, ci_random, effects, group_level, verbose = TRUE, ...) { params_random <- params_variance <- NULL # only proceed if random effects are requested if (effects %in% c("random", "all")) { # group level estimates (BLUPs) or random effects variances? if (isTRUE(group_level)) { params_random <- .extract_random_parameters(model, ci = ci, effects = effects) } else { params_variance <- .extract_random_variances( model, ci = ci, effects = effects, ci_method = ci_method, ci_random = ci_random, verbose = verbose ) } # merge random and fixed effects, if necessary if (!is.null(params) && (!is.null(params_random) || !is.null(params_variance))) { params$Level <- NA params$Group <- "" if (is.null(params_random)) { params <- params[match(colnames(params_variance), colnames(params))] } else { params <- params[match(colnames(params_random), colnames(params))] } } } rbind(params, params_random, params_variance) } #' @export ci.merMod <- function(x, ci = 0.95, dof = NULL, method = "wald", iterations = 500, ...) { method <- tolower(method) method <- insight::validate_argument(method, c( "wald", "ml1", "betwithin", "kr", "satterthwaite", "kenward", "boot", "profile", "residual", "normal" )) # bootstrapping if (method == "boot") { out <- lapply(ci, function(ci, x) .ci_boot_merMod(x, ci, iterations, ...), x = x) out <- do.call(rbind, out) row.names(out) <- NULL # profiled CIs } else if (method == "profile") { pp <- suppressWarnings(stats::profile(x, which = "beta_")) out <- lapply(ci, function(i) .ci_profile_merMod(x, ci = i, profiled = pp, ...)) out <- do.call(rbind, out) # all others } else { out <- .ci_generic(model = x, ci = ci, dof = dof, method = method, ...) } out } #' @export standard_error.merMod <- function(model, effects = "fixed", method = NULL, vcov = NULL, vcov_args = NULL, ...) { dots <- list(...) effects <- insight::validate_argument(effects, c("fixed", "random")) if (effects == "random") { out <- .standard_errors_random(model) return(out) } if (is.null(method)) { method <- "wald" } else if ((method == "robust" && is.null(vcov)) || # deprecated argument isTRUE(list(...)[["robust"]])) { vcov <- "vcovHC" } if (!is.null(vcov) || isTRUE(dots[["robust"]])) { fun_args <- list(model, vcov = vcov, vcov_args = vcov_args ) fun_args <- c(fun_args, dots) out <- do.call("standard_error.default", fun_args) return(out) } # kenward approx if (method %in% c("kenward", "kr")) { out <- se_kenward(model) return(out) } else { # Classic and Satterthwaite SE out <- se_mixed_default(model) return(out) } } # helpers -------------- .standard_errors_random <- function(model) { insight::check_if_installed("lme4") rand.se <- lme4::ranef(model, condVar = TRUE) n.groupings <- length(rand.se) for (m in 1:n.groupings) { vars.m <- attr(rand.se[[m]], "postVar") K <- dim(vars.m)[1] J <- dim(vars.m)[3] names.full <- dimnames(rand.se[[m]]) rand.se[[m]] <- array(NA, c(J, K)) for (j in 1:J) { rand.se[[m]][j, ] <- sqrt(diag(as.matrix(vars.m[, , j]))) } dimnames(rand.se[[m]]) <- list(names.full[[1]], names.full[[2]]) } rand.se } se_mixed_default <- function(model) { params <- insight::find_parameters(model, effects = "fixed", component = "conditional", flatten = TRUE ) .data_frame(Parameter = params, SE = .get_se_from_summary(model)) } #' @export p_value.merMod <- p_value.cpglmm parameters/R/p_value_satterthwaite.R0000644000176200001440000000360314331167101017343 0ustar liggesusers#' @title Satterthwaite approximation for SEs, CIs and p-values #' @name p_value_satterthwaite #' #' @description An approximate F-test based on the Satterthwaite (1946) approach. #' #' @param model A statistical model. #' @param dof Degrees of Freedom. #' @inheritParams ci.default #' #' @details Inferential statistics (like p-values, confidence intervals and #' standard errors) may be biased in mixed models when the number of clusters #' is small (even if the sample size of level-1 units is high). In such cases #' it is recommended to approximate a more accurate number of degrees of freedom #' for such inferential statistics. Unlike simpler approximation heuristics #' like the "m-l-1" rule (`dof_ml1`), the Satterthwaite approximation is #' also applicable in more complex multilevel designs. However, the "m-l-1" #' heuristic also applies to generalized mixed models, while approaches like #' Kenward-Roger or Satterthwaite are limited to linear mixed models only. #' #' @seealso `dof_satterthwaite()` and `se_satterthwaite()` are small helper-functions #' to calculate approximated degrees of freedom and standard errors for model #' parameters, based on the Satterthwaite (1946) approach. #' #' [`dof_kenward()`] and [`dof_ml1()`] approximate degrees of freedom based on #' Kenward-Roger's method or the "m-l-1" rule. #' #' @examples #' \donttest{ #' if (require("lme4", quietly = TRUE)) { #' model <- lmer(Petal.Length ~ Sepal.Length + (1 | Species), data = iris) #' p_value_satterthwaite(model) #' } #' } #' @return A data frame. #' @references Satterthwaite FE (1946) An approximate distribution of estimates of variance components. Biometrics Bulletin 2 (6):110–4. #' @export p_value_satterthwaite <- function(model, dof = NULL, ...) { if (is.null(dof)) { dof <- dof_satterthwaite(model) } .p_value_dof(model, dof, method = "satterthwaite", ...) } parameters/R/methods_maxLik.R0000644000176200001440000000071714507235543015726 0ustar liggesusers# .maxLik, .maxim #' @export model_parameters.maxLik <- model_parameters.default #' @export model_parameters.maxim <- model_parameters.default #' @export p_value.maxLik <- function(model, ...) { p <- summary(model)$estimate[, 4] .data_frame( Parameter = .remove_backticks_from_string(names(p)), p = as.vector(p) ) } #' @export ci.maxLik <- ci.default #' @export standard_error.maxLik <- standard_error.default parameters/R/p_calibrate.R0000644000176200001440000000637314477616760015241 0ustar liggesusers#' @title Calculate calibrated p-values. #' @name p_calibrate #' #' @description Compute calibrated p-values that can be interpreted #' probabilistically, i.e. as posterior probability of H0 (given that H0 #' and H1 have equal prior probabilities). #' #' @param x A numeric vector of p-values, or a regression model object. #' @param type Type of calibration. Can be `"frequentist"` or `"bayesian"`. #' See 'Details'. #' @param verbose Toggle warnings. #' @param ... Currently not used. #' #' @return A data frame with p-values and calibrated p-values. #' #' @details #' The Bayesian calibration, i.e. when `type = "bayesian"`, can be interpreted #' as the lower bound of the Bayes factor for H0 to H1, based on the data. #' The full Bayes factor would then require multiplying by the prior odds of #' H0 to H1. The frequentist calibration also has a Bayesian interpretation; it #' is the posterior probability of H0, assuming that H0 and H1 have equal #' prior probabilities of 0.5 each (_Sellke et al. 2001_). #' #' The calibration only works for p-values lower than or equal to `1/e`. #' #' @references #' Thomas Sellke, M. J Bayarri and James O Berger (2001) Calibration of p Values #' for Testing Precise Null Hypotheses, The American Statistician, 55:1, 62-71, #' \doi{10.1198/000313001300339950} #' #' @examples #' model <- lm(mpg ~ wt + as.factor(gear) + am, data = mtcars) #' p_calibrate(model, verbose = FALSE) #' @export p_calibrate <- function(x, ...) { UseMethod("p_calibrate") } #' @export p_calibrate.numeric <- function(x, type = "frequentist", verbose = TRUE, ...) { type <- match.arg(tolower(type), choices = c("frequentist", "bayesian")) # fill p-values larger than calibration cut-off with `NA` x[x > (1 / exp(1))] <- NA if (type == "bayesian") { calibrated <- (-exp(1) * x * log(x)) } else { calibrated <- 1 / (1 + (1 / (-exp(1) * x * log(x)))) } if (verbose && anyNA(calibrated)) { insight::format_warning( "Some p-values were larger than the calibration cut-off.", "Returning `NA` for p-values that cannot be calibrated." ) } calibrated } #' @rdname p_calibrate #' @export p_calibrate.default <- function(x, type = "frequentist", verbose = TRUE, ...) { if (!insight::is_model(x)) { insight::format_error("`p_calibrate()` requires a valid model object.") } out <- p_value(x) out$p_calibrated <- p_calibrate(out$p, type = type, verbose = FALSE, ...) if (verbose && anyNA(out$p_calibrated)) { insight::format_warning( "Some p-values were larger than the calibration cut-off.", "Returning `NA` for p-values that cannot be calibrated." ) } class(out) <- c("p_calibrate", "data.frame") attr(out, "type") <- type out } # methods ----------------- #' @export format.p_calibrate <- function(x, ...) { insight::format_table(x, ...) } #' @export print.p_calibrate <- function(x, ...) { formatted <- format(x, ...) footer <- switch(attributes(x)$type, frequentist = "Calibrated p-values indicate the posterior probability of H0.\n", "Calibrated p-values indicate the Bayes Factor (evidence) in favor of H0 over H1.\n" ) cat(insight::export_table(formatted, footer = c(footer, "blue"), ...)) } parameters/R/reshape_loadings.R0000644000176200001440000000764014736731407016273 0ustar liggesusers#' Reshape loadings between wide/long formats #' #' Reshape loadings between wide/long formats. #' #' #' @examples #' if (require("psych")) { #' pca <- model_parameters(psych::fa(attitude, nfactors = 3)) #' loadings <- reshape_loadings(pca) #' #' loadings #' reshape_loadings(loadings) #' } #' @export reshape_loadings <- function(x, ...) { UseMethod("reshape_loadings") } #' @rdname reshape_loadings #' @inheritParams principal_components #' @export reshape_loadings.parameters_efa <- function(x, threshold = NULL, ...) { current_format <- attributes(x)$loadings_format if (is.null(current_format) || current_format == "wide") { .long_loadings(x, threshold = threshold) } else { .wide_loadings(x) } } #' @rdname reshape_loadings #' @param loadings_columns Vector indicating the columns corresponding to loadings. #' @export reshape_loadings.data.frame <- function(x, threshold = NULL, loadings_columns = NULL, ...) { if (is.null(loadings_columns)) loadings_columns <- seq_len(ncol(x)) if (length(loadings_columns) > 1) { .long_loadings(x, threshold = threshold, loadings_columns = loadings_columns) } } #' @keywords internal .wide_loadings <- function(loadings, loadings_columns = "Loading", component_column = "Component", variable_column = "Variable", ...) { if (is.numeric(loadings[[component_column]])) { loadings[[component_column]] <- paste0("F", loadings[[component_column]]) } complexity_column <- if ("Complexity" %in% colnames(loadings)) "Complexity" else NULL uniqueness_column <- if ("Uniqueness" %in% colnames(loadings)) "Uniqueness" else NULL reshape_columns <- c(loadings_columns, component_column, variable_column, complexity_column, uniqueness_column) loadings <- stats::reshape( loadings[reshape_columns], idvar = variable_column, timevar = component_column, direction = "wide", v.names = loadings_columns, sep = "_" ) names(loadings) <- gsub(paste0(loadings_columns, "_"), "", names(loadings), fixed = TRUE) attr(loadings, "loadings_format") <- "wide" class(loadings) <- unique(c("parameters_loadings", class(loadings))) # clean-up, column-order row.names(loadings) <- NULL column_order <- c(setdiff(colnames(loadings), c("Complexity", "Uniqueness")), c("Complexity", "Uniqueness")) loadings[column_order[column_order %in% colnames(loadings)]] } #' @keywords internal .long_loadings <- function(loadings, threshold = NULL, loadings_columns = NULL) { if (is.null(loadings_columns)) { loadings_columns <- attributes(loadings)$loadings_columns } if (!is.null(threshold)) { loadings <- .filter_loadings(loadings, threshold = threshold, loadings_columns = loadings_columns) } # Reshape to long long <- stats::reshape(loadings, direction = "long", varying = list(names(loadings)[loadings_columns]), v.names = "Loading", timevar = "Component", idvar = "Variable" ) # Restore component names for (i in 1:insight::n_unique(long$Component)) { component <- unique(long$Component)[[i]] name <- names(loadings)[loadings_columns][[i]] long[long$Component == component, "Component"] <- name } # Filtering long <- long[!is.na(long$Loading), ] row.names(long) <- NULL # Reorder columns loadings <- long[, c( "Component", "Variable", "Loading", names(loadings)[-loadings_columns][!names(loadings)[-loadings_columns] %in% c("Component", "Variable", "Loading")] )] attr(loadings, "loadings_format") <- "long" class(loadings) <- unique(c("parameters_loadings", class(loadings))) loadings } #' @export print.parameters_loadings <- function(x, ...) { formatted_table <- insight::format_table(x) cat(insight::export_table(formatted_table)) invisible(x) } parameters/R/methods_gee.R0000644000176200001440000000336514716604200015233 0ustar liggesusers#' @export standard_error.geeglm <- standard_error.default #' @export standard_error.gee <- function(model, method = NULL, ...) { cs <- stats::coef(summary(model)) if (isTRUE(list(...)$robust) || "vcov" %in% names(list(...))) { se <- as.vector(cs[, "Robust S.E."]) } else { se <- as.vector(cs[, "Naive S.E."]) } .data_frame(Parameter = .remove_backticks_from_string(rownames(cs)), SE = se) } #' @export p_value.gee <- function(model, method = NULL, ...) { cs <- stats::coef(summary(model)) if (is.null(method)) { method <- "any" } if (isTRUE(list(...)$robust) || "vcov" %in% names(list(...))) { p <- 2 * stats::pt( abs(cs[, "Estimate"] / cs[, "Robust S.E."]), df = insight::get_df(x = model, type = method), lower.tail = FALSE ) } else { p <- 2 * stats::pt( abs(cs[, "Estimate"] / cs[, "Naive S.E."]), df = insight::get_df(x = model, type = method), lower.tail = FALSE ) } .data_frame( Parameter = .remove_backticks_from_string(rownames(cs)), p = as.vector(p) ) } #' @export ci.geeglm <- function(x, ci = 0.95, method = "wald", ...) { .ci_generic(x, ci = ci, method = method, ...) } #' @export p_value.geeglm <- function(model, method = "wald", ...) { stat <- insight::get_statistic(model) if (!is.null(stat)) { if (identical(method, "residual")) { dof <- insight::get_df(model, type = "residual") p <- as.vector(2 * stats::pt( sqrt(abs(stat$Statistic)), df = dof, lower.tail = FALSE )) } else { p <- as.vector(1 - stats::pchisq(stat$Statistic, df = 1)) } .data_frame( Parameter = stat$Parameter, p = p ) } } parameters/R/p_value_ml1.R0000644000176200001440000000561414331167101015150 0ustar liggesusers#' @title "m-l-1" approximation for SEs, CIs and p-values #' @name p_value_ml1 #' #' @description Approximation of degrees of freedom based on a "m-l-1" heuristic #' as suggested by Elff et al. (2019). #' #' @param model A mixed model. #' @param dof Degrees of Freedom. #' @inheritParams ci.default #' #' @details #' ## Small Sample Cluster corrected Degrees of Freedom #' Inferential statistics (like p-values, confidence intervals and #' standard errors) may be biased in mixed models when the number of clusters #' is small (even if the sample size of level-1 units is high). In such cases #' it is recommended to approximate a more accurate number of degrees of freedom #' for such inferential statistics (see _Li and Redden 2015_). The #' *m-l-1* heuristic is such an approach that uses a t-distribution with #' fewer degrees of freedom (`dof_ml1()`) to calculate p-values #' (`p_value_ml1()`) and confidence intervals (`ci(method = "ml1")`). #' #' ## Degrees of Freedom for Longitudinal Designs (Repeated Measures) #' In particular for repeated measure designs (longitudinal data analysis), #' the *m-l-1* heuristic is likely to be more accurate than simply using the #' residual or infinite degrees of freedom, because `dof_ml1()` returns #' different degrees of freedom for within-cluster and between-cluster effects. #' #' ## Limitations of the "m-l-1" Heuristic #' Note that the "m-l-1" heuristic is not applicable (or at least less accurate) #' for complex multilevel designs, e.g. with cross-classified clusters. In such cases, #' more accurate approaches like the Kenward-Roger approximation (`dof_kenward()`) #' is recommended. However, the "m-l-1" heuristic also applies to generalized #' mixed models, while approaches like Kenward-Roger or Satterthwaite are limited #' to linear mixed models only. #' #' @seealso [`dof_ml1()`] is a small helper-function to calculate approximated #' degrees of freedom of model parameters, based on the "m-l-1" heuristic. #' #' @examples #' \donttest{ #' if (require("lme4")) { #' model <- lmer(Petal.Length ~ Sepal.Length + (1 | Species), data = iris) #' p_value_ml1(model) #' } #' } #' @return A data frame. #' @references #' - Elff, M.; Heisig, J.P.; Schaeffer, M.; Shikano, S. (2019). Multilevel #' Analysis with Few Clusters: Improving Likelihood-based Methods to Provide #' Unbiased Estimates and Accurate Inference, British Journal of Political #' Science. #' #' - Li, P., Redden, D. T. (2015). Comparing denominator degrees of freedom #' approximations for the generalized linear mixed model in analyzing binary #' outcome in small sample cluster-randomized trials. BMC Medical Research #' Methodology, 15(1), 38. \doi{10.1186/s12874-015-0026-x} #' #' @export p_value_ml1 <- function(model, dof = NULL, ...) { if (is.null(dof)) { dof <- dof_ml1(model) } .p_value_dof(model, dof, method = "ml1", ...) } parameters/R/methods_aov.R0000644000176200001440000004426215013566107015265 0ustar liggesusers# classes: .aov, .anova, aovlist, anova.rms, maov, afex_aov # .aov ------ #' Parameters from ANOVAs #' #' @param model Object of class [aov()], [anova()], `aovlist`, `Gam`, #' [manova()], `Anova.mlm`, `afex_aov` or `maov`. #' @param es_type The effect size of interest. Not that possibly not all #' effect sizes are applicable to the model object. See 'Details'. For Anova #' models, can also be a character vector with multiple effect size names. #' @param df_error Denominator degrees of freedom (or degrees of freedom of the #' error estimate, i.e., the residuals). This is used to compute effect sizes #' for ANOVA-tables from mixed models. See 'Examples'. (Ignored for #' `afex_aov`.) #' @param type Numeric, type of sums of squares. May be 1, 2 or 3. If 2 or 3, #' ANOVA-tables using `car::Anova()` will be returned. (Ignored for #' `afex_aov`.) #' @param ci Confidence Interval (CI) level for effect sizes specified in #' `es_type`. The default, `NULL`, will compute no confidence #' intervals. `ci` should be a scalar between 0 and 1. #' @param test String, indicating the type of test for `Anova.mlm` to be #' returned. If `"multivariate"` (or `NULL`), returns the summary of #' the multivariate test (that is also given by the `print`-method). If #' `test = "univariate"`, returns the summary of the univariate test. #' @param power Logical, if `TRUE`, adds a column with power for each #' parameter. #' @param table_wide Logical that decides whether the ANOVA table should be in #' wide format, i.e. should the numerator and denominator degrees of freedom #' be in the same row. Default: `FALSE`. #' @param include_intercept Logical, if `TRUE`, includes the intercept #' (`(Intercept)`) in the anova table. #' @param alternative A character string specifying the alternative hypothesis; #' Controls the type of CI returned: `"two.sided"` (default, two-sided CI), #' `"greater"` or `"less"` (one-sided CI). Partial matching is allowed #' (e.g., `"g"`, `"l"`, `"two"`...). See section *One-Sided CIs* in #' the [effectsize_CIs vignette](https://easystats.github.io/effectsize/). #' @inheritParams model_parameters.default #' @param ... Arguments passed to [`effectsize::effectsize()`]. For example, #' to calculate _partial_ effect sizes types, use `partial = TRUE`. For objects #' of class `htest` or `BFBayesFactor`, `adjust = TRUE` can be used to return #' bias-corrected effect sizes, which is advisable for small samples and large #' tables. See also #' [`?effectsize::eta_squared`](https://easystats.github.io/effectsize/reference/eta_squared.html) #' for arguments `partial` and `generalized`; #' [`?effectsize::phi`](https://easystats.github.io/effectsize/reference/phi.html) #' for `adjust`; and #' [`?effectsize::oddratio`](https://easystats.github.io/effectsize/reference/oddsratio.html) #' for `log`. #' #' @return A data frame of indices related to the model's parameters. #' #' @inherit effectsize::effectsize details #' #' @note For ANOVA-tables from mixed models (i.e. `anova(lmer())`), only #' partial or adjusted effect sizes can be computed. Note that type 3 ANOVAs #' with interactions involved only give sensible and informative results when #' covariates are mean-centred and factors are coded with orthogonal contrasts #' (such as those produced by `contr.sum`, `contr.poly`, or #' `contr.helmert`, but *not* by the default `contr.treatment`). #' #' @examplesIf requireNamespace("effectsize", quietly = TRUE) #' df <- iris #' df$Sepal.Big <- ifelse(df$Sepal.Width >= 3, "Yes", "No") #' #' model <- aov(Sepal.Length ~ Sepal.Big, data = df) #' model_parameters(model) #' #' model_parameters(model, es_type = c("omega", "eta"), ci = 0.9) #' #' model <- anova(lm(Sepal.Length ~ Sepal.Big, data = df)) #' model_parameters(model) #' model_parameters( #' model, #' es_type = c("omega", "eta", "epsilon"), #' alternative = "greater" #' ) #' #' model <- aov(Sepal.Length ~ Sepal.Big + Error(Species), data = df) #' model_parameters(model) #' #' @examplesIf requireNamespace("lme4", quietly = TRUE) && requireNamespace("effectsize", quietly = TRUE) #' \donttest{ #' df <- iris #' df$Sepal.Big <- ifelse(df$Sepal.Width >= 3, "Yes", "No") #' mm <- lme4::lmer(Sepal.Length ~ Sepal.Big + Petal.Width + (1 | Species), data = df) #' model <- anova(mm) #' #' # simple parameters table #' model_parameters(model) #' #' # parameters table including effect sizes #' model_parameters( #' model, #' es_type = "eta", #' ci = 0.9, #' df_error = dof_satterthwaite(mm)[2:3] #' ) #' } #' @export model_parameters.aov <- function(model, type = NULL, df_error = NULL, ci = NULL, alternative = NULL, p_adjust = NULL, test = NULL, power = FALSE, es_type = NULL, keep = NULL, drop = NULL, include_intercept = FALSE, table_wide = FALSE, verbose = TRUE, ...) { # save model object, for later checks original_model <- model object_name <- insight::safe_deparse_symbol(substitute(model)) if (inherits(model, "aov") && !is.null(type) && type > 1) { if (requireNamespace("car", quietly = TRUE)) { model <- car::Anova(model, type = type) } else { insight::format_warning("Package {.pkg car} required for type-2 or type-3 Anova. Defaulting to type-1.") } } # try to extract type of anova table if (is.null(type)) { type <- .anova_type(model, verbose = verbose) } # exceptions if (.is_levenetest(model)) { return(model_parameters.htest(model, ...)) } # check contrasts if (verbose) { .check_anova_contrasts(original_model, type) } # extract standard parameters params <- .extract_parameters_anova( model, test, p_adjust = p_adjust, include_intercept = include_intercept, verbose = verbose ) # add effect sizes, if available params <- .effectsizes_for_aov( model, params = params, es_type = es_type, df_error = df_error, ci = ci, alternative = alternative, verbose = FALSE, # we get messages for contrasts before ... ) # add power, if possible if (isTRUE(power)) { params <- .power_for_aov(model, params) } # filter parameters if (!is.null(keep) || !is.null(drop)) { params <- .filter_parameters(params, keep = keep, drop = drop, verbose = verbose ) } # wide or long? if (table_wide) { params <- .anova_table_wide(params) } # add attributes params <- .add_anova_attributes(params, model, ci, test = test, alternative = alternative, ...) class(params) <- c("parameters_model", "see_parameters_model", class(params)) attr(params, "object_name") <- object_name params } #' @export standard_error.aov <- function(model, ...) { params <- model_parameters(model) .data_frame( Parameter = params$Parameter, SE = params$SE ) } #' @export p_value.aov <- function(model, ...) { params <- model_parameters(model) if (nrow(params) == 0) { return(NA) } if ("Group" %in% names(params)) { params <- params[params$Group == "Within", ] } if ("Residuals" %in% params$Parameter) { params <- params[params$Parameter != "Residuals", ] } if (!"p" %in% names(params)) { return(NA) } .data_frame( Parameter = params$Parameter, p = params$p ) } # .anova ------ #' @export standard_error.anova <- standard_error.aov #' @export p_value.anova <- p_value.aov #' @export model_parameters.anova <- model_parameters.aov # .aov.list ------ #' @export standard_error.aovlist <- standard_error.aov #' @export p_value.aovlist <- p_value.aov #' @export model_parameters.aovlist <- model_parameters.aov # .afex_aov ------ #' @export model_parameters.afex_aov <- function(model, es_type = NULL, df_error = NULL, type = NULL, keep = NULL, drop = NULL, include_intercept = FALSE, p_adjust = NULL, verbose = TRUE, ...) { if (inherits(model$Anova, "Anova.mlm")) { params <- model$anova_table with_df_and_p <- summary(model$Anova)$univariate.tests params$`Sum Sq` <- with_df_and_p[-1, 1] params$`Error SS` <- with_df_and_p[-1, 3] out <- .extract_parameters_anova( params, test = NULL, include_intercept = include_intercept, p_adjust = NULL, verbose ) p_adjust <- .extract_p_adjust_afex(model, p_adjust) } else { p_adjust <- .extract_p_adjust_afex(model, p_adjust) out <- .extract_parameters_anova( model$Anova, test = NULL, include_intercept = include_intercept, p_adjust, verbose ) } out <- .effectsizes_for_aov( model, params = out, es_type = es_type, df_error = df_error, verbose = verbose, ... ) # add attributes out <- .add_anova_attributes( out, model, ci, test = NULL, alternative = NULL, p_adjust = p_adjust, ... ) # filter parameters if (!is.null(keep) || !is.null(drop)) { out <- .filter_parameters(out, keep = keep, drop = drop, verbose = verbose ) } if (!"Method" %in% names(out)) { out$Method <- "ANOVA estimation for factorial designs using 'afex'" } attr(out, "title") <- unique(out$Method) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(model)) class(out) <- unique(c("parameters_model", "see_parameters_model", class(out))) out } # others ------ #' @export model_parameters.anova.rms <- model_parameters.aov #' @export model_parameters.Anova.mlm <- model_parameters.aov #' @export model_parameters.maov <- model_parameters.aov #' @export model_parameters.seqanova.svyglm <- model_parameters.aov # helper ------------------------------ .anova_type <- function(model, type = NULL, verbose = TRUE) { if (is.null(type)) { type_to_numeric <- function(type) { if (is.numeric(type)) { return(type) } # nolint start switch(type, `1` = , `I` = 1, `2` = , `II` = 2, `3` = , `III` = 3, 1 ) # nolint end } # default to 1 type <- 1 if (inherits(model, "anova.rms")) { type <- 2 } else if (!is.null(attr(model, "type", exact = TRUE))) { type <- type_to_numeric(attr(model, "type", exact = TRUE)) } else if (!is.null(attr(model, "heading"))) { heading <- attr(model, "heading")[1] if (grepl("(.*)Type (.*) Wald(.*)", heading)) { type <- type_to_numeric(insight::trim_ws(gsub("(.*)Type (.*) Wald(.*)", "\\2", heading))) } else if (grepl("Type (.*) Analysis(.*)", heading)) { type <- type_to_numeric(insight::trim_ws(gsub("Type (.*) Analysis(.*)", "\\1", heading))) } else if (grepl("(.*)Type (.*) tests(.*)", heading)) { type <- type_to_numeric(insight::trim_ws(gsub("(.*)Type (.*) tests(.*)", "\\2", heading))) } } else if ("type" %in% names(model) && !is.null(model$type)) { type <- type_to_numeric(model$type) } } type } .anova_alternative <- function(params, alternative) { alternative_footer <- NULL if (!is.null(alternative)) { alternative <- insight::validate_argument( tolower(alternative), c("two.sided", "greater", "less") ) if (alternative != "two.sided") { ci_low <- which(endsWith(colnames(params), "CI_low")) ci_high <- which(endsWith(colnames(params), "CI_high")) if (length(ci_low) && length(ci_high)) { bound <- if (alternative == "less") params[[ci_low[1]]][1] else params[[ci_high[1]]][1] bound <- insight::format_value(bound, digits = 2) side <- if (alternative == "less") "lower" else "upper" alternative_footer <- sprintf( "One-sided CIs: %s bound fixed at [%s].", side, bound ) } } } alternative_footer } .check_anova_contrasts <- function(model, type) { # check only valid for anova tables of type III if (!is.null(type) && type == 3) { # check for interaction terms interaction_terms <- tryCatch( { insight::find_interactions(model, flatten = TRUE) }, error = function(e) { if (is.data.frame(model)) { if (any(grepl(":", row.names(model), fixed = TRUE))) { TRUE } else { NULL } } } ) # try to access data of model predictors predictors <- .safe(insight::get_predictors(model)) # if data available, check contrasts and mean centering if (is.null(predictors)) { treatment_contrasts_or_not_centered <- FALSE } else { treatment_contrasts_or_not_centered <- vapply(predictors, function(i) { if (is.factor(i)) { cn <- stats::contrasts(i) if (is.null(cn) || (all(cn %in% c(0, 1)))) { return(TRUE) } } else if (abs(mean(i, na.rm = TRUE)) > 1e-2) { return(TRUE) } FALSE }, TRUE) } # successfully checked predictors, or if not possible, at least found interactions? if (!is.null(interaction_terms) && (any(treatment_contrasts_or_not_centered) || is.null(predictors))) { insight::format_alert( "Type 3 ANOVAs only give sensible and informative results when covariates are mean-centered and factors are coded with orthogonal contrasts (such as those produced by `contr.sum`, `contr.poly`, or `contr.helmert`, but *not* by the default `contr.treatment`)." # nolint ) } } } .effectsizes_for_aov <- function(model, params, es_type = NULL, df_error = NULL, ci = NULL, alternative = NULL, verbose = TRUE, ...) { # user actually does not want to compute effect sizes if (is.null(es_type)) { return(params) } # is valid effect size? if (!all(es_type %in% c("eta", "omega", "epsilon", "f", "f2"))) { return(params) } insight::check_if_installed("effectsize") # set error-df, when provided. if (!is.null(df_error) && is.data.frame(model) && !any(c("DenDF", "den Df", "denDF", "df_error") %in% colnames(model))) { if (length(df_error) > nrow(model)) { insight::format_error( "Number of degrees of freedom in argument `df_error` is larger than number of parameters." ) } model$df_error <- df_error } # multiple effect sizes possible for (es in es_type) { fx <- effectsize::effectsize( model, type = es, ci = ci, alternative = alternative, verbose = verbose, ... ) params <- .add_effectsize_to_parameters(fx, params) # warn only once verbose <- FALSE } params } # internals -------------------------- # add effect size column and related CI to the parameters # data frame, automatically detecting the effect size name .add_effectsize_to_parameters <- function(fx, params) { if (!is.null(fx$CI_low)) { # find name of current effect size es <- effectsize::get_effectsize_name(colnames(fx)) # and add CI-name to effect size, to have specific # CI columns for this particular effect size ci_low <- paste0(gsub("_partial$", "", es), "_CI_low") ci_high <- paste0(gsub("_partial$", "", es), "_CI_high") # rename columns fx[[ci_low]] <- fx$CI_low fx[[ci_high]] <- fx$CI_high # delete old or duplicated columns fx$CI_low <- NULL fx$CI_high <- NULL fx$CI <- NULL } params$.id <- seq_len(nrow(params)) params <- merge( params, fx, all.x = TRUE, sort = FALSE, by = intersect(c("Response", "Group", "Parameter"), intersect(colnames(params), colnames(fx))) ) params <- params[order(params$.id), ] params$.id <- NULL params } .is_levenetest <- function(x) { inherits(x, "anova") && !is.null(attributes(x)$heading) && all(isTRUE(grepl("Levene's Test", attributes(x)$heading, fixed = TRUE))) } # data: A dataframe from `model_parameters` # ... Currently ignored .anova_table_wide <- function(data, ...) { wide_anova <- function(x) { # creating numerator and denominator degrees of freedom idxResid <- which(x$Parameter == "Residuals") if (length(idxResid)) { x$df_error <- x$df[idxResid] x$Sum_Squares_Error <- x$Sum_Squares[idxResid] x$Mean_Square_Error <- x$Mean_Square[idxResid] x <- x[-idxResid, ] } x } if ("Group" %in% colnames(data)) { data <- split(data, data$Group) data <- lapply(data, wide_anova) data <- Filter(function(x) nrow(x) >= 1L, data) cols <- unique(unlist(lapply(data, colnames))) data <- lapply(data, function(x) { x[, setdiff(cols, colnames(x))] <- NA x }) data <- do.call(rbind, data) } else { data <- wide_anova(data) } # reorder columns col_order <- union(c("Parameter", "F", "df", "df_error", "p"), names(data)) data[, col_order] } #' @keywords internal .extract_p_adjust_afex <- function(model, p_adjust) { if (is.null(p_adjust) && inherits(model, "afex_aov")) { p_adjust <- attr(model$anova_table, "p_adjust_method") if (p_adjust == "none") { p_adjust <- NULL } } p_adjust } parameters/R/methods_mgcv.R0000644000176200001440000000150214717111737015427 0ustar liggesusers#' @export model_parameters.gamm <- function(model, ci = 0.95, bootstrap = FALSE, iterations = 1000, verbose = TRUE, ...) { model <- model$gam class(model) <- c("gam", "lm", "glm") model_parameters( model, ci = ci, bootstrap = bootstrap, iterations = iterations, ... ) } #' @export ci.gamm <- ci.gamm4 #' @export standard_error.gamm <- standard_error.gamm4 #' @export p_value.gamm <- p_value.gamm4 #' @export simulate_model.gamm <- function(model, iterations = 1000, ...) { model <- model$gam class(model) <- c("gam", "lm", "glm") simulate_model(model, iterations = iterations, ...) } parameters/R/methods_scam.R0000644000176200001440000000030714717111737015420 0ustar liggesusers#' @export ci.scam <- ci.gam #' @export standard_error.scam <- standard_error.gam #' @export p_value.scam <- p_value.gam #' @export model_parameters.scam <- model_parameters.cgam parameters/R/print.parameters_model.R0000644000176200001440000005331315053035103017417 0ustar liggesusers#' @title Print model parameters #' @name print.parameters_model #' #' @description A `print()`-method for objects from [`model_parameters()`][model_parameters]. #' #' @param x,object An object returned by [`model_parameters()`][model_parameters]. #' @param split_components Logical, if `TRUE` (default), For models with #' multiple components (zero-inflation, smooth terms, ...), each component is #' printed in a separate table. If `FALSE`, model parameters are printed #' in a single table and a `Component` column is added to the output. #' @param select Determines which columns and and which layout columns are #' printed. There are three options for this argument: #' #' * **Selecting columns by name or index** #' #' `select` can be a character vector (or numeric index) of column names that #' should be printed, where columns are extracted from the data frame returned #' by `model_parameters()` and related functions. #' #' There are two pre-defined options for selecting columns: #' `select = "minimal"` prints coefficients, confidence intervals and #' p-values, while `select = "short"` prints coefficients, standard errors and #' p-values. #' #' * **A string expression with layout pattern** #' #' `select` is a string with "tokens" enclosed in braces. These tokens will be #' replaced by their associated columns, where the selected columns will be #' collapsed into one column. Following tokens are replaced by the related #' coefficients or statistics: `{estimate}`, `{se}`, `{ci}` (or `{ci_low}` and #' `{ci_high}`), `{p}` and `{stars}`. The token `{ci}` will be replaced by #' `{ci_low}, {ci_high}`. Example: `select = "{estimate}{stars} ({ci})"` #' #' It is possible to create multiple columns as well. A `|` separates values #' into new cells/columns. Example: `select = "{estimate} ({ci})|{p}"`. #' #' If `format = "html"`, a `
` inserts a line break inside a cell. See #' 'Examples'. #' #' * **A string indicating a pre-defined layout** #' #' `select` can be one of the following string values, to create one of the #' following pre-defined column layouts: #' #' - `"ci"`: Estimates and confidence intervals, no asterisks for p-values. #' This is equivalent to `select = "{estimate} ({ci})"`. #' - `"se"`: Estimates and standard errors, no asterisks for p-values. This is #' equivalent to `select = "{estimate} ({se})"`. #' - `"ci_p"`: Estimates, confidence intervals and asterisks for p-values. This #' is equivalent to `select = "{estimate}{stars} ({ci})"`. #' - `"se_p"`: Estimates, standard errors and asterisks for p-values. This is #' equivalent to `select = "{estimate}{stars} ({se})"`.. #' - `"ci_p2"`: Estimates, confidence intervals and numeric p-values, in two #' columns. This is equivalent to `select = "{estimate} ({ci})|{p}"`. #' - `"se_p2"`: Estimate, standard errors and numeric p-values, in two columns. #' This is equivalent to `select = "{estimate} ({se})|{p}"`. #' #' For `model_parameters()`, glue-like syntax is still experimental in the #' case of more complex models (like mixed models) and may not return expected #' results. #' @param show_sigma Logical, if `TRUE`, adds information about the residual #' standard deviation. #' @param show_formula Logical, if `TRUE`, adds the model formula to the output. #' @param caption Table caption as string. If `NULL`, depending on the model, #' either a default caption or no table caption is printed. Use `caption = ""` #' to suppress the table caption. #' @param footer Can either be `FALSE` or an empty string (i.e. `""`) to #' suppress the footer, `NULL` to print the default footer, or a string. The #' latter will combine the string value with the default footer. #' @param footer_digits Number of decimal places for values in the footer summary. #' @param groups Named list, can be used to group parameters in the printed output. #' List elements may either be character vectors that match the name of those #' parameters that belong to one group, or list elements can be row numbers #' of those parameter rows that should belong to one group. The names of the #' list elements will be used as group names, which will be inserted as "header #' row". A possible use case might be to emphasize focal predictors and control #' variables, see 'Examples'. Parameters will be re-ordered according to the #' order used in `groups`, while all non-matching parameters will be added #' to the end. #' @param column_width Width of table columns. Can be either `NULL`, a named #' numeric vector, or `"fixed"`. If `NULL`, the width for each table column is #' adjusted to the minimum required width. If a named numeric vector, value #' names are matched against column names, and for each match, the specified #' width is used. If `"fixed"`, and table is split into multiple components, #' columns across all table components are adjusted to have the same width. #' @param digits,ci_digits,p_digits Number of digits for rounding or #' significant figures. May also be `"signif"` to return significant #' figures or `"scientific"` to return scientific notation. Control the #' number of digits by adding the value as suffix, e.g. `digits = "scientific4"` #' to have scientific notation with 4 decimal places, or `digits = "signif5"` #' for 5 significant figures (see also [signif()]). #' @param pretty_names Can be `TRUE`, which will return "pretty" (i.e. more human #' readable) parameter names. Or `"labels"`, in which case value and variable #' labels will be used as parameters names. The latter only works for "labelled" #' data, i.e. if the data used to fit the model had `"label"` and `"labels"` #' attributes. See also section _Global Options to Customize Messages when Printing_. #' @param include_reference Logical, if `TRUE`, the reference level of factors will #' be added to the parameters table. This is only relevant for models with #' categorical predictors. The coefficient for the reference level is always #' `0` (except when `exponentiate = TRUE`, then the coefficient will be `1`), #' so this is just for completeness. #' @param align Only applies to HTML tables. May be one of `"left"`, #' `"right"` or `"center"`. #' @param subtitle Table title (same as caption) and subtitle, as strings. If `NULL`, #' no title or subtitle is printed, unless it is stored as attributes (`table_title`, #' or its alias `table_caption`, and `table_subtitle`). If `x` is a list of #' data frames, `caption` may be a list of table captions, one for each table. #' @param font_size For HTML tables, the font size. #' @param line_padding For HTML tables, the distance (in pixel) between lines. #' @param column_labels Labels of columns for HTML tables. If `NULL`, automatic #' column names are generated. See 'Examples'. #' @param ... Arguments passed down to [`format.parameters_model()`], #' [`insight::format_table()`] and [`insight::export_table()`] #' #' @inheritParams insight::format_table #' @inheritParams compare_parameters #' @inheritParams insight::export_table #' @inheritParams display.parameters_model #' #' @inheritSection format_parameters Interpretation of Interaction Terms #' @inheritSection model_parameters Labeling the Degrees of Freedom #' #' @section Global Options to Customize Messages and Tables when Printing: #' The `verbose` argument can be used to display or silence messages and #' warnings for the different functions in the **parameters** package. However, #' some messages providing additional information can be displayed or suppressed #' using `options()`: #' #' - `parameters_info`: `options(parameters_info = TRUE)` will override the #' `include_info` argument in `model_parameters()` and always show the model #' summary for non-mixed models. #' #' - `parameters_mixed_info`: `options(parameters_mixed_info = TRUE)` will #' override the `include_info` argument in `model_parameters()` for mixed #' models, and will then always show the model summary. #' #' - `parameters_cimethod`: `options(parameters_cimethod = TRUE)` will show the #' additional information about the approximation method used to calculate #' confidence intervals and p-values. Set to `FALSE` to hide this message when #' printing `model_parameters()` objects. #' #' - `parameters_exponentiate`: `options(parameters_exponentiate = TRUE)` will #' show the additional information on how to interpret coefficients of models #' with log-transformed response variables or with log-/logit-links when the #' `exponentiate` argument in `model_parameters()` is not `TRUE`. Set this option #' to `FALSE` to hide this message when printing `model_parameters()` objects. #' #' There are further options that can be used to modify the default behaviour #' for printed outputs: #' #' - `parameters_labels`: `options(parameters_labels = TRUE)` will use variable #' and value labels for pretty names, if data is labelled. If no labels #' available, default pretty names are used. #' #' - `parameters_interaction`: `options(parameters_interaction = )` #' will replace the interaction mark (by default, `*`) with the related character. #' #' - `parameters_select`: `options(parameters_select = )` will set the #' default for the `select` argument. See argument's documentation for available #' options. #' #' - `easystats_table_width`: `options(easystats_table_width = )` will #' set the default width for tables in text-format, i.e. for most of the outputs #' printed to console. If not specified, tables will be adjusted to the current #' available width, e.g. of the of the console (or any other source for textual #' output, like markdown files). The argument `table_width` can also be used in #' most `print()` methods to specify the table width as desired. #' #' - `insight_use_symbols`: `options(insight_use_symbols = TRUE)` will try to #' print unicode-chars for symbols as column names, wherever possible (e.g., #' \ifelse{html}{\out{ω}}{\eqn{\omega}} instead of `Omega`). #' #' @details `summary()` is a convenient shortcut for #' `print(object, select = "minimal", show_sigma = TRUE, show_formula = TRUE)`. #' #' @return Invisibly returns the original input object. #' #' @seealso See also [`display()`][display.parameters_model]. #' #' @examplesIf require("gt", quietly = TRUE) && require("glmmTMB", quietly = TRUE) #' \donttest{ #' library(parameters) #' model <- glmmTMB::glmmTMB( #' count ~ spp + mined + (1 | site), #' ziformula = ~mined, #' family = poisson(), #' data = Salamanders #' ) #' mp <- model_parameters(model) #' #' print(mp, pretty_names = FALSE) #' #' print(mp, split_components = FALSE) #' #' print(mp, select = c("Parameter", "Coefficient", "SE")) #' #' print(mp, select = "minimal") #' #' #' # group parameters ------ #' #' data(iris) #' model <- lm( #' Sepal.Width ~ Sepal.Length + Species + Petal.Length, #' data = iris #' ) #' # don't select "Intercept" parameter #' mp <- model_parameters(model, parameters = "^(?!\\(Intercept)") #' groups <- list(`Focal Predictors` = c(1, 4), Controls = c(2, 3)) #' print(mp, groups = groups) #' #' # only show coefficients, CI and p, #' # put non-matched parameters to the end #' #' data(mtcars) #' mtcars$cyl <- as.factor(mtcars$cyl) #' mtcars$gear <- as.factor(mtcars$gear) #' model <- lm(mpg ~ hp + gear * vs + cyl + drat, data = mtcars) #' #' # don't select "Intercept" parameter #' mp <- model_parameters(model, parameters = "^(?!\\(Intercept)") #' print(mp, groups = list( #' Engine = c(5, 6, 4, 1), #' Interactions = c(8, 9) #' )) #' } #' #' #' # custom column layouts ------ #' #' data(iris) #' lm1 <- lm(Sepal.Length ~ Species, data = iris) #' lm2 <- lm(Sepal.Length ~ Species + Petal.Length, data = iris) #' #' # custom style #' result <- compare_parameters(lm1, lm2, select = "{estimate}{stars} ({se})") #' print(result) #' #' \donttest{ #' # custom style, in HTML #' result <- compare_parameters(lm1, lm2, select = "{estimate}
({se})|{p}") #' print_html(result) #' } #' @export print.parameters_model <- function(x, pretty_names = TRUE, split_components = TRUE, select = NULL, caption = NULL, footer = NULL, digits = 2, ci_digits = digits, p_digits = 3, footer_digits = 3, show_sigma = FALSE, show_formula = FALSE, zap_small = FALSE, groups = NULL, column_width = NULL, ci_brackets = c("[", "]"), include_reference = FALSE, ...) { # save original input orig_x <- x # check options --------------- # check if pretty names should be replaced by value labels # (if we have labelled data) if (isTRUE(getOption("parameters_labels", FALSE)) || identical(pretty_names, "labels")) { attr(x, "pretty_names") <- attr(x, "pretty_labels", exact = TRUE) pretty_names <- TRUE } # select which columns to print if (is.null(select)) { select <- getOption("parameters_select") } # check if user supplied digits attributes if (missing(digits)) { digits <- .additional_arguments(x, "digits", digits) } if (missing(ci_digits)) { ci_digits <- .additional_arguments(x, "ci_digits", digits) } if (missing(p_digits)) { p_digits <- .additional_arguments(x, "p_digits", p_digits) } if (missing(footer_digits)) { footer_digits <- .additional_arguments(x, "footer_digits", footer_digits) } # table caption table_caption <- .print_caption(x, caption, format = "text") # main table formatted_table <- format( x = x, pretty_names = pretty_names, split_components = split_components, select = select, digits = digits, ci_digits = ci_digits, p_digits = p_digits, zap_small = zap_small, ci_width = "auto", ci_brackets = ci_brackets, format = "text", groups = groups, include_reference = include_reference, ... ) # if we have multiple components, we can align colum width across components here if (!is.null(column_width) && all(column_width == "fixed") && is.list(formatted_table)) { column_width <- .find_min_colwidth(formatted_table) } # footer footer_stats <- .print_footer( x, digits = footer_digits, show_sigma = show_sigma, show_formula = show_formula ) # check if footer should be printed at all. can be FALSE, or "" to suppress footer if (isFALSE(footer)) { footer <- "" } if (!identical(footer, "")) { if (is.null(footer)) { footer <- footer_stats } else { footer <- paste0("\n", footer, "\n", footer_stats) } } # get attributes verbose <- .additional_arguments(x, "verbose", TRUE) # print main table cat(insight::export_table( formatted_table, format = "text", caption = table_caption, footer = footer, width = column_width, ... )) # inform about CI and df approx. if (isTRUE(verbose)) { .print_footer_cimethod(x) .print_footer_exp(x) } invisible(orig_x) } #' @rdname print.parameters_model #' @export summary.parameters_model <- function(object, ...) { print( x = object, select = "minimal", show_sigma = TRUE, show_formula = TRUE, ... ) } #' @export print.parameters_simulate <- print.parameters_model #' @export print.parameters_brms_meta <- print.parameters_model # Random effects ------------------ #' @export print.parameters_coef <- function(x, ...) { cat(insight::export_table(format(x, ...), ...)) } #' @export print.parameters_random <- function(x, digits = 2, ...) { .print_random_parameters(x, digits = digits) invisible(x) } # helper -------------------- .print_footer <- function(x, digits = 3, show_sigma = FALSE, show_formula = FALSE, format = "text") { # get attributes model_sigma <- attributes(x)$sigma show_summary <- isTRUE(attributes(x)$show_summary) verbose <- .additional_arguments(x, "verbose", TRUE) # override defaults. if argument "summary" is called in "model_parameters()", # this overrides the defaults... show_sigma <- ifelse(show_summary, TRUE, show_sigma) show_formula <- ifelse(show_summary, TRUE, show_formula) show_r2 <- .additional_arguments(x, "show_summary", FALSE) show_rmse <- .additional_arguments(x, "show_summary", FALSE) # set defaults, if necessary if (is.null(model_sigma)) { show_sigma <- FALSE } .format_footer( x, digits = digits, verbose = verbose, show_sigma = show_sigma, show_formula = show_formula, show_r2 = show_r2, show_rmse = show_rmse, format = format ) } .print_caption <- function(x, caption = NULL, format = "text") { no_caption <- attributes(x)$no_caption # no table-title for certain model tables, indicated by the no_caption attribute if (isTRUE(no_caption)) { return(NULL) } title_attribute <- attributes(x)$title[1] # check effects and component parts if (!is.null(x$Effects) && all(x$Effects == "random")) { eff_name <- "Random" } else { eff_name <- "Fixed" } if (!is.null(x$Component) && all(x$Component == "zero_inflated")) { zero_inflated <- " (Zero-Inflation Component)" } else { zero_inflated <- "" } # caption = NULL, set default for HTML tables if (identical(format, "html") && is.null(caption)) { table_caption <- "Model Summary" } else if (isTRUE(attributes(x)$ordinal_model)) { table_caption <- "" # caption is NULL, set default title, using title-attribute } else if (!is.null(title_attribute) && is.null(caption)) { if (length(title_attribute) == 1 && title_attribute == "") { table_caption <- NULL } else { table_caption <- title_attribute } # if caption is not empty, use it as title } else if (!is.null(caption) && caption != "") { table_caption <- caption # no table-title if caption is empty string } else if (!is.null(caption) && caption == "") { table_caption <- NULL # default title for sub-components of models } else if (identical(format, "text")) { table_caption <- c(paste0("# ", eff_name, " Effects", zero_inflated), "blue") } else { table_caption <- paste0(eff_name, " Effects", zero_inflated) } table_caption } #' @keywords internal .print_random_parameters <- function(random_params, digits = 2) { insight::print_color("# Random Effects\n\n", "blue") # create SD random_params$SD <- NA var_components <- random_params$Description %in% c("Within-Group Variance", "Between-Group Variance") random_params$SD[var_components] <- sqrt(random_params$Value[var_components]) # format values random_params$Value <- format(sprintf("%g", round(random_params$Value, digits = digits)), justify = "right") random_params$SD[var_components] <- format( sprintf("(%g)", round(random_params$SD[var_components], digits = digits)), justify = "right" ) # create summary-information for each component random_params$Line <- "" random_params$Term[is.na(random_params$Term)] <- "" random_params$SD[is.na(random_params$SD)] <- "" non_empty <- random_params$Term != "" & random_params$Type != "" # nolint random_params$Line[non_empty] <- sprintf("%s (%s)", random_params$Type[non_empty], random_params$Term[non_empty]) non_empty <- random_params$Term != "" & random_params$Type == "" # nolint random_params$Line[non_empty] <- sprintf("%s", random_params$Term[non_empty]) # final fix, indentions random_params$Line <- sprintf(" %s", format(random_params$Line)) max_len <- max(nchar(random_params$Line)) + 2 out <- split(random_params, factor(random_params$Description, levels = unique(random_params$Description))) for (i in out) { if ("Within-Group Variance" %in% i$Description) { insight::print_color(format("Within-Group Variance", width = max_len), color = "blue") cat(sprintf("%s %s\n", i$Value, i$SD)) } else if ("Between-Group Variance" %in% i$Description) { insight::print_color("Between-Group Variance\n", "blue") for (j in seq_len(nrow(i))) { cat(sprintf("%s %s %s\n", i$Line[j], i$Value[j], i$SD[j])) } } else if ("Correlations" %in% i$Description) { insight::print_color("Correlations\n", "blue") for (j in seq_len(nrow(i))) { cat(sprintf("%s %s\n", i$Line[j], i$Value[j])) } } else if ("N" %in% i$Description) { insight::print_color("N (groups per factor)\n", "blue") for (j in seq_len(nrow(i))) { cat(sprintf(" %s%s\n", format(i$Term[j], width = max_len - 2), i$Value[j])) } } else if ("Observations" %in% i$Description) { insight::print_color(format("Observations", width = max_len), color = "blue") cat(sprintf("%s\n", i$Value)) } } } .find_min_colwidth <- function(formatted_table) { shared_cols <- unique(unlist(lapply(formatted_table, colnames))) col_width <- rep(NA, length(shared_cols)) for (i in seq_along(shared_cols)) { col_width[i] <- max(unlist(lapply(formatted_table, function(j) { column <- j[[shared_cols[i]]] if (is.null(column)) { NA } else { max(nchar(column)) } }))) } stats::na.omit(stats::setNames(col_width, shared_cols)) } parameters/R/methods_htest.R0000644000176200001440000005425315001670564015630 0ustar liggesusers#' Parameters from hypothesis tests #' #' Parameters of h-tests (correlations, t-tests, chi-squared, ...). #' #' @param model Object of class `htest` or `pairwise.htest`. #' @param bootstrap Should estimates be bootstrapped? #' @param ci Level of confidence intervals for effect size statistic. Currently #' only applies to objects from `chisq.test()` or `oneway.test()`. #' @inheritParams model_parameters.default #' @inheritParams model_parameters.aov #' @inherit effectsize::effectsize details #' #' @examples #' #' model <- cor.test(mtcars$mpg, mtcars$cyl, method = "pearson") #' model_parameters(model) #' #' model <- t.test(iris$Sepal.Width, iris$Sepal.Length) #' model_parameters(model, es_type = "hedges_g") #' #' model <- t.test(mtcars$mpg ~ mtcars$vs) #' model_parameters(model, es_type = "hedges_g") #' #' model <- t.test(iris$Sepal.Width, mu = 1) #' model_parameters(model, es_type = "cohens_d") #' #' data(airquality) #' airquality$Month <- factor(airquality$Month, labels = month.abb[5:9]) #' model <- pairwise.t.test(airquality$Ozone, airquality$Month) #' model_parameters(model) #' #' smokers <- c(83, 90, 129, 70) #' patients <- c(86, 93, 136, 82) #' model <- suppressWarnings(pairwise.prop.test(smokers, patients)) #' model_parameters(model) #' #' model <- suppressWarnings(chisq.test(table(mtcars$am, mtcars$cyl))) #' model_parameters(model, es_type = "cramers_v") #' #' @return A data frame of indices related to the model's parameters. #' #' @export model_parameters.htest <- function(model, ci = 0.95, alternative = NULL, bootstrap = FALSE, es_type = NULL, verbose = TRUE, ...) { if (bootstrap) { insight::format_error("Bootstrapped h-tests are not yet implemented.") } else { parameters <- .extract_parameters_htest( model, es_type = es_type, ci = ci, alternative = alternative, verbose = verbose, ... ) } if (!is.null(parameters$Method)) { parameters$Method <- insight::trim_ws(gsub("with continuity correction", "", parameters$Method, fixed = TRUE)) } # save alternative parameters$Alternative <- model$alternative parameters <- .add_htest_parameters_attributes(parameters, model, ci, ...) class(parameters) <- c("parameters_model", "see_parameters_model", class(parameters)) parameters } #' @export standard_error.htest <- function(model, ...) { NULL } #' @export p_value.htest <- function(model, ...) { model$p.value } # .pairwise.htest -------------------- #' @export model_parameters.pairwise.htest <- function(model, verbose = TRUE, ...) { m <- model$p.value parameters <- data.frame( Group1 = rep(rownames(m), each = ncol(m)), Group2 = rep(colnames(m), times = nrow(m)), p = as.numeric(t(m)), stringsAsFactors = FALSE ) parameters <- stats::na.omit(parameters) parameters <- .add_htest_attributes(parameters, model, p_adjust = model$p.adjust.method) class(parameters) <- c("parameters_model", "see_parameters_model", class(parameters)) parameters } # survey-table -------------------- #' @export model_parameters.svytable <- function(model, verbose = TRUE, ...) { model_parameters(summary(model)$statistic, verbose = verbose, ...) } # ==== extract parameters ==== #' @keywords internal .extract_parameters_htest <- function(model, es_type = NULL, ci = 0.95, alternative = NULL, verbose = TRUE, ...) { m_info <- insight::model_info(model, verbose = FALSE) if (!is.null(model$method) && startsWith(model$method, "Box-")) { # Box-Pierce --------- out <- .extract_htest_boxpierce(model) } else if (m_info$is_correlation) { # correlation --------- out <- .extract_htest_correlation(model) } else if (.is_levenetest(model)) { # levene's test --------- out <- .extract_htest_levenetest(model) } else if (m_info$is_ttest) { # t-test ----------- out <- .extract_htest_ttest(model) } else if (m_info$is_ranktest) { # rank-test (kruskal / wilcox / friedman) ----------- out <- .extract_htest_ranktest(model) } else if (m_info$is_onewaytest) { # one-way test ----------- out <- .extract_htest_oneway(model) } else if (m_info$is_chi2test) { # chi2- and mcnemar-test ----------- out <- .extract_htest_chi2(model) } else if (m_info$is_proptest) { # test of proportion -------------- out <- .extract_htest_prop(model) } else if (m_info$is_binomtest) { # exact binomial test -------------- out <- .extract_htest_binom(model) } else if (m_info$is_ftest) { # F test for equal variances -------------- out <- .extract_htest_vartest(model) } else { insight::format_error("`model_parameters()` not implemented for such h-tests yet.") } out <- .add_effectsize_htest(model, out, es_type = es_type, ci = ci, alternative = alternative, verbose = verbose, ... ) row.names(out) <- NULL out } # extract htest Box-Pierce ---------------------- #' @keywords internal .extract_htest_boxpierce <- function(model) { out <- data.frame( Parameter = model$data.name, Chi2 = model$statistic, df_error = model$parameter, p = model$p.value, Method = model$method, stringsAsFactors = FALSE ) attr(out, "htest_type") <- "boxpiercetest" out } # extract htest correlation ---------------------- #' @keywords internal .extract_htest_correlation <- function(model) { data_names <- unlist(strsplit(model$data.name, " (and|by) ")) out <- data.frame( Parameter1 = data_names[1], Parameter2 = data_names[2], stringsAsFactors = FALSE ) if (model$method == "Pearson's Chi-squared test") { out$Chi2 <- model$statistic out$df_error <- model$parameter out$p <- model$p.value } else if (grepl("Pearson", model$method, fixed = TRUE)) { out$r <- model$estimate out$t <- model$statistic out$df_error <- model$parameter out$p <- model$p.value out$CI_low <- model$conf.int[1] out$CI_high <- model$conf.int[2] } else if (grepl("Spearman", model$method, fixed = TRUE)) { out$rho <- model$estimate out$S <- model$statistic out$df_error <- model$parameter out$p <- model$p.value } else { out$tau <- model$estimate out$z <- model$statistic out$df_error <- model$parameter out$p <- model$p.value } out$Method <- model$method # reorder col_order <- c( "Parameter1", "Parameter2", "Parameter", "r", "rho", "tau", "CI_low", "CI_high", "t", "z", "S", "df_error", "p", "Method", "method" ) out <- out[col_order[col_order %in% names(out)]] attr(out, "htest_type") <- "cortest" out } # extract htest ranktest ---------------------- #' @keywords internal .extract_htest_ranktest <- function(model) { # survey if (grepl("design-based", tolower(model$method), fixed = TRUE)) { data_names <- gsub("~", "", unlist(strsplit(model$data.name, " + ", fixed = TRUE)), fixed = TRUE) out <- data.frame( Parameter1 = data_names[1], Parameter2 = data_names[2], Statistic = model$statistic[[1]], df_error = model$parameter[[1]], Method = model$method, p = model$p.value[[1]], stringsAsFactors = FALSE ) out$Method <- gsub("KruskalWallis", "Kruskal-Wallis", out$Method, fixed = TRUE) colnames(out)[colnames(out) == "Statistic"] <- names(model$statistic)[1] } else { if (grepl(" (and|by) ", model$data.name)) { data_names <- unlist(strsplit(model$data.name, " (and|by) ")) out <- data.frame( Parameter1 = data_names[1], Parameter2 = data_names[2], stringsAsFactors = FALSE ) } else { out <- data.frame( Parameter = model$data.name, stringsAsFactors = FALSE ) } if (grepl("Wilcoxon", model$method, fixed = TRUE)) { out$W <- model$statistic[[1]] out$df_error <- model$parameter[[1]] out$p <- model$p.value[[1]] } else if (grepl("Kruskal-Wallis", model$method, fixed = TRUE) || grepl("Friedman", model$method, fixed = TRUE)) { out$Chi2 <- model$statistic[[1]] out$df_error <- model$parameter[[1]] out$p <- model$p.value[[1]] } out$Method <- model$method } attr(out, "htest_type") <- "ranktest" out } # extract htest leveneTest ---------------------- #' @keywords internal .extract_htest_levenetest <- function(model) { out <- data.frame( df = model$Df[1], df_error = model$Df[2], `F` = model$`F value`[1], # nolint p = model$`Pr(>F)`[1], Method = "Levene's Test for Homogeneity of Variance", stringsAsFactors = FALSE ) attr(out, "htest_type") <- "levenetest" out } # extract htest var.test ---------------------- #' @keywords internal .extract_htest_vartest <- function(model) { out <- data.frame( Parameter = model$data.name, Estimate = model$estimate, df = model$parameter[1], df_error = model$parameter[2], `F` = model$statistic, # nolint CI_low = model$conf.int[1], CI_high = model$conf.int[2], p = model$p.value, Method = "F test to compare two variances", stringsAsFactors = FALSE ) attr(out, "htest_type") <- "vartest" out } # extract htest ttest ---------------------- #' @keywords internal .extract_htest_ttest <- function(model, standardized_d = NULL, hedges_g = NULL) { # survey if (grepl("design-based", tolower(model$method), fixed = TRUE)) { data_names <- unlist(strsplit(model$data.name, " ~ ", fixed = TRUE)) out <- data.frame( Parameter1 = data_names[1], Parameter2 = data_names[2], Difference = model$estimate[[1]], t = model$statistic[[1]], df_error = model$parameter[[1]], Method = model$method, p = model$p.value[[1]], stringsAsFactors = FALSE ) out$Method <- gsub("KruskalWallis", "Kruskal-Wallis", out$Method, fixed = TRUE) colnames(out)[colnames(out) == "Statistic"] <- names(model$statistic)[1] } else { paired_test <- startsWith(model$method, "Paired") && length(model$estimate) == 1 if (grepl(" and ", model$data.name, fixed = TRUE) && isFALSE(paired_test)) { data_names <- unlist(strsplit(model$data.name, " and ", fixed = TRUE)) out <- data.frame( Parameter1 = data_names[1], Parameter2 = data_names[2], Mean_Parameter1 = model$estimate[1], Mean_Parameter2 = model$estimate[2], Difference = model$estimate[1] - model$estimate[2], CI_low = model$conf.int[1], CI_high = model$conf.int[2], t = model$statistic, df_error = model$parameter, p = model$p.value, Method = model$method, stringsAsFactors = FALSE ) attr(out, "mean_group_values") <- gsub("mean in group ", "", names(model$estimate), fixed = TRUE) } else if (isTRUE(paired_test)) { data_names <- unlist(strsplit(model$data.name, " (and|by) ")) out <- data.frame( Parameter = data_names[1], Group = data_names[2], Difference = model$estimate, t = model$statistic, df_error = model$parameter, p = model$p.value, CI_low = model$conf.int[1], CI_high = model$conf.int[2], Method = model$method, stringsAsFactors = FALSE ) } else if (grepl(" by ", model$data.name, fixed = TRUE)) { if (length(model$estimate) == 1) { data_names <- unlist(strsplit(model$data.name, " by ", fixed = TRUE)) out <- data.frame( Parameter = data_names[1], Group = data_names[2], Difference = model$estimate, CI = 0.95, CI_low = as.vector(model$conf.int[, 1]), CI_high = as.vector(model$conf.int[, 2]), t = model$statistic, df_error = model$parameter, p = model$p.value, Method = model$method, stringsAsFactors = FALSE ) } else { data_names <- unlist(strsplit(model$data.name, " by ", fixed = TRUE)) out <- data.frame( Parameter = data_names[1], Group = data_names[2], Mean_Group1 = model$estimate[1], Mean_Group2 = model$estimate[2], Difference = model$estimate[1] - model$estimate[2], CI_low = model$conf.int[1], CI_high = model$conf.int[2], t = model$statistic, df_error = model$parameter, p = model$p.value, Method = model$method, stringsAsFactors = FALSE ) attr(out, "mean_group_values") <- gsub("mean in group ", "", names(model$estimate), fixed = TRUE) } } else { out <- data.frame( Parameter = model$data.name, Mean = model$estimate, mu = model$null.value, Difference = model$estimate - model$null.value, CI_low = model$conf.int[1], CI_high = model$conf.int[2], t = model$statistic, df_error = model$parameter, p = model$p.value, Method = model$method, stringsAsFactors = FALSE ) } } attr(out, "htest_type") <- "ttest" out } # extract htest oneway ---------------------- #' @keywords internal .extract_htest_oneway <- function(model) { out <- data.frame( `F` = model$statistic, # nolint df = model$parameter[1], df_error = model$parameter[2], p = model$p.value, Method = model$method, stringsAsFactors = FALSE ) attr(out, "htest_type") <- "onewaytest" out } # extract htest chi2 ---------------------- #' @keywords internal .extract_htest_chi2 <- function(model) { # survey-chisq-test if ((any("observed" %in% names(model)) && inherits(model$observed, "svytable")) || any(startsWith(model$data.name, "svychisq"))) { if (grepl("Pearson's X", model$method, fixed = TRUE)) { model$method <- gsub("(Pearson's X\\^2: )(.*)", "Pearson's Chi2 \\(\\2\\)", model$method) } if (names(model$statistic) == "F") { out <- data.frame( `F` = model$statistic, # nolint df = model$parameter[1], df_error = model$parameter[2], p = model$p.value, Method = model$method, stringsAsFactors = FALSE ) } else { out <- data.frame( Chi2 = model$statistic, df = model$parameter, p = model$p.value, Method = model$method, stringsAsFactors = FALSE ) } } else if (!is.null(model$estimate) && identical(names(model$estimate), "odds ratio")) { out <- data.frame( `Odds Ratio` = model$estimate, # CI = attributes(model$conf.int)$conf.level, CI_low = model$conf.int[1], CI_high = model$conf.int[2], p = model$p.value, Method = model$method, stringsAsFactors = FALSE, check.names = FALSE ) } else { out <- data.frame( Chi2 = model$statistic, df = model$parameter, p = model$p.value, Method = model$method, stringsAsFactors = FALSE ) } attr(out, "htest_type") <- "chi2test" out } # extract htest prop ---------------------- #' @keywords internal .extract_htest_prop <- function(model) { out <- data.frame( Proportion = paste(insight::format_value(model$estimate, as_percent = TRUE), collapse = " / "), stringsAsFactors = FALSE ) if (length(model$estimate) == 2) { out$Difference <- insight::format_value( abs(model$estimate[1] - model$estimate[2]), as_percent = TRUE ) } if (!is.null(model$conf.int)) { out$CI_low <- model$conf.int[1] out$CI_high <- model$conf.int[2] } out$Chi2 <- model$statistic out$df <- model$parameter[1] out$Null_value <- model$null.value out$p <- model$p.value out$Method <- model$method attr(out, "htest_type") <- "proptest" out } # extract htest binom ---------------------- #' @keywords internal .extract_htest_binom <- function(model) { out <- data.frame( Probability = model$estimate, CI_low = model$conf.int[1], CI_high = model$conf.int[2], Success = model$statistic, Trials = model$parameter, stringsAsFactors = FALSE ) out$Null_value <- model$null.value out$p <- model$p.value out$Method <- model$method attr(out, "htest_type") <- "binomtest" out } # ==== effectsizes ===== .add_effectsize_htest <- function(model, out, es_type = NULL, ci = 0.95, alternative = NULL, verbose = TRUE, ...) { # check if effect sizes are requested if (!requireNamespace("effectsize", quietly = TRUE) || is.null(es_type)) { return(out) } # return on invalid options. We may have partial matching with argument # `effects` for `es_type`, and thus all "effects" options should be # ignored. if (es_type %in% c("fixed", "random", "all")) { return(out) } # try to extract effectsize es <- tryCatch( { effectsize::effectsize( model, type = es_type, ci = ci, alternative = alternative, verbose = verbose, ... ) }, error = function(e) { if (verbose) { msg <- c( paste0("Could not compute effectsize ", effectsize::get_effectsize_label(es_type), "."), paste0("Possible reason: ", e$message) ) insight::format_alert(msg) } NULL } ) # return if not successful if (is.null(es)) { return(out) } ## TODO: check if effectsize prefixes are correct @mattansb # Find prefix for CI-columns prefix <- switch(es_type, cohens_g = "Cohens_", cramers_v = "Cramers_", phi = "phi_", cohens_d = "d_", hedges_g = "g_", rank_biserial = "rank_biserial_", rank_epsilon_squared = "rank_epsilon_squared_", kendalls_w = "W_", omega = "Omega2_", eta = "Eta2_", epsilon = "Epsilon2_" ) es$CI <- NULL ci_cols <- startsWith(names(es), "CI") es_ci_cols <- paste0(prefix, names(es)[ci_cols]) names(es)[ci_cols] <- es_ci_cols out <- cbind(out, es) # compose effect size columns es_columns <- unique(c(effectsize::get_effectsize_name(colnames(es)), es_ci_cols)) # reorder col_order <- c( "Parameter1", "Parameter2", "Parameter", "F", "Chi2", "Group", "Mean_Parameter1", "Mean_Parameter2", "Mean_Group1", "Mean_Group2", "mu", "Difference", "W", "CI_low", "CI_high", es_columns, "t", "df", "df_error", "p", "Method", "method" ) out <- out[col_order[col_order %in% names(out)]] out } # ==== add attributes ==== #' @keywords internal .add_htest_parameters_attributes <- function(params, model, ci = 0.95, ...) { attr(params, "title") <- unique(params$Method) attr(params, "model_class") <- class(model) attr(params, "alternative") <- model$alternative if (!is.null(model$alternative)) { h1_text <- "Alternative hypothesis: " if (is.null(model$null.value)) { h1_text <- paste0(h1_text, model$alternative) } else if (length(model$null.value) == 1L) { alt.char <- switch(model$alternative, two.sided = "not equal to", less = "less than", greater = "greater than" ) h1_text <- paste0(h1_text, "true ", names(model$null.value), " is ", alt.char, " ", model$null.value) } else { h1_text <- paste0(h1_text, model$alternative) } attr(params, "text_alternative") <- h1_text } dot.arguments <- lapply(match.call(expand.dots = FALSE)[["..."]], function(x) x) if ("digits" %in% names(dot.arguments)) { attr(params, "digits") <- eval(dot.arguments[["digits"]]) } else { attr(params, "digits") <- 2 } if ("ci_digits" %in% names(dot.arguments)) { attr(params, "ci_digits") <- eval(dot.arguments[["ci_digits"]]) } else { attr(params, "ci_digits") <- NULL } if ("p_digits" %in% names(dot.arguments)) { attr(params, "p_digits") <- eval(dot.arguments[["p_digits"]]) } else { attr(params, "p_digits") <- 3 } attr(params, "ci") <- ci attr(params, "ci_test") <- attributes(model$conf.int)$conf.level # add CI, and reorder if (!"CI" %in% colnames(params) && length(ci) == 1) { ci_pos <- grep("CI_low", colnames(params), fixed = TRUE) if (length(ci_pos)) { if (length(ci_pos) > 1) { ci_pos <- ci_pos[1] } params$CI <- ci a <- attributes(params) params <- params[c(1:(ci_pos - 1), ncol(params), ci_pos:(ncol(params) - 1))] attributes(params) <- utils::modifyList(a, attributes(params)) } } params } #' @keywords internal .add_htest_attributes <- function(params, model, p_adjust = NULL, verbose = TRUE, ...) { dot.arguments <- lapply(match.call(expand.dots = FALSE)[["..."]], function(x) x) attr(params, "p_adjust") <- p_adjust attr(params, "model_class") <- class(model) attr(params, "title") <- params$Method if ("digits" %in% names(dot.arguments)) { attr(params, "digits") <- eval(dot.arguments[["digits"]]) } else { attr(params, "digits") <- 2 } if ("ci_digits" %in% names(dot.arguments)) { attr(params, "ci_digits") <- eval(dot.arguments[["ci_digits"]]) } else { attr(params, "ci_digits") <- NULL } if ("p_digits" %in% names(dot.arguments)) { attr(params, "p_digits") <- eval(dot.arguments[["p_digits"]]) } else { attr(params, "p_digits") <- 3 } if ("s_value" %in% names(dot.arguments)) { attr(params, "s_value") <- eval(dot.arguments[["s_value"]]) } params } parameters/R/utils.R0000644000176200001440000001477015004374217014115 0ustar liggesusers# small wrapper around this commonly used try-catch .safe <- function(code, on_error = NULL) { if (isTRUE(getOption("easystats_errors", FALSE)) && is.null(on_error)) { code } else { tryCatch(code, error = function(e) on_error) } } #' help-functions #' @keywords internal .data_frame <- function(...) { x <- data.frame(..., stringsAsFactors = FALSE) rownames(x) <- NULL x } #' Safe transformation from factor/character to numeric #' #' @keywords internal .factor_to_dummy <- function(x) { if (is.numeric(x)) { return(x) } # get unique levels / values values <- if (is.factor(x)) { levels(x) } else { stats::na.omit(unique(x)) } dummy <- as.data.frame(do.call(cbind, lapply(values, function(i) { out <- rep(0, length(x)) out[is.na(x)] <- NA out[x == i] <- 1 out }))) colnames(dummy) <- values dummy } #' @keywords internal .rename_values <- function(x, old, new) { x[x %in% old] <- new x } #' for models with zero-inflation component, return required component of model-summary #' @keywords internal .filter_component <- function(dat, component) { switch(component, conditional = dat[dat$Component == "conditional", ], zi = , zero_inflated = dat[dat$Component == "zero_inflated", ], dat ) } # Find log-terms inside model formula, and return "clean" term names .log_terms <- function(model) { x <- insight::find_terms(model, flatten = TRUE, verbose = FALSE) gsub("^log\\((.*)\\)", "\\1", grep("^log\\((.*)\\)", x, value = TRUE)) } # Execute a function but store warnings (https://stackoverflow.com/a/4947528/4198688) #' @keywords internal .catch_warnings <- function(expr) { myWarnings <- NULL wHandler <- function(w) { myWarnings <<- c(myWarnings, list(w)) invokeRestart("muffleWarning") } val <- withCallingHandlers(expr, warning = wHandler) list(out = val, warnings = myWarnings) } #' @keywords internal .get_object <- function(x, attribute_name = "object_name") { obj_name <- attr(x, attribute_name, exact = TRUE) model <- NULL if (!is.null(obj_name)) { model <- .safe(get(obj_name, envir = parent.frame())) # prevent self reference if (is.null(model) || inherits(model, "parameters_model")) { model <- .safe(get(obj_name, envir = globalenv())) } # prevent self reference if (is.null(model) || inherits(model, "parameters_model")) { model <- .safe(.dynGet(obj_name)) } } model } .is_semLme <- function(x) { all(inherits(x, c("sem", "lme"))) } .insert_row_at <- function(data, row, index, default_value = NA) { # add missing columns new_columns <- setdiff(colnames(data), colnames(row)) if (length(new_columns) > 0) { row[new_columns] <- default_value } # match column order row <- row[match(colnames(data), colnames(row))] # modify effects and component column - required for printing if ("Effects" %in% colnames(row)) { row$Effects <- data$Effects[1] } if ("Component" %in% colnames(row)) { row$Component <- data$Component[1] } # insert row if (index == 1) { rbind(row, data) } else if (index == (nrow(data) + 1)) { rbind(data, row) } else { rbind(data[1:(index - 1), ], row, data[index:nrow(data), ]) } } .insert_element_at <- function(data, element, index) { if (index == 1) { c(element, data) } else if (index == (length(data) + 1)) { c(data, element) } else { c(data[1:(index - 1)], element, data[index:length(data)]) } } .find_factor_levels <- function(model_data, model = NULL, model_call = NULL) { # check whether we have on-the-fly conversion of factors if (!is.null(model)) { model_terms <- insight::find_terms(model, verbose = FALSE) } else if (!is.null(model_call)) { # nolint model_terms <- insight::find_terms(model_call, verbose = FALSE) } else { model_terms <- NULL } # extract all model terms, we now have "as.factor(term)" etc., if any if (!is.null(model_terms$conditional)) { # extract variable names from "as.factor(term)" etc. factor_terms <- grep("(as\\.factor|factor|as\\.character)", model_terms$conditional, value = TRUE) cleaned <- gsub("(as\\.factor|factor|as\\.character)\\((.*)\\)", "\\2", factor_terms) # convert on-the-fly factors into real factors if (length(cleaned)) { for (i in seq_along(cleaned)) { model_data[[factor_terms[i]]] <- as.factor(model_data[[cleaned[i]]]) } } } # extract levels from factors, so we know the reference level out <- lapply(colnames(model_data), function(i) { v <- model_data[[i]] if (is.factor(v)) { paste0(i, levels(v)) } else if (is.character(v)) { paste0(i, unique(v)) } else { NULL } }) names(out) <- names(model_data) insight::compact_list(out) } # This functions finds contrasts for those factors in a model, where including # a reference level makes sense. This is the case when there are contrasts # that are all zeros, which means that the reference level is not included in # the model matrix. .remove_reference_contrasts <- function(model) { cons <- .safe(model$contrasts) if (is.null(cons)) { return(NULL) } out <- vapply(cons, function(mat) { if (is.matrix(mat) && nrow(mat) > 0) { any(rowSums(mat) == 0) } else if (is.character(mat)) { mat %in% c("contr.treatment", "contr.SAS") } else { FALSE } }, logical(1)) # only return those factors that need to be removed names(out)[!out] } # Almost identical to dynGet(). The difference is that we deparse the expression # because get0() allows symbol only since R 4.1.0 .dynGet <- function(x, ifnotfound = stop(gettextf("%s not found", sQuote(x)), domain = NA, call. = FALSE), minframe = 1L, inherits = FALSE) { x <- insight::safe_deparse(x) n <- sys.nframe() myObj <- structure(list(.b = as.raw(7)), foo = 47L) while (n > minframe) { n <- n - 1L env <- sys.frame(n) r <- get0(x, envir = env, inherits = inherits, ifnotfound = myObj) if (!identical(r, myObj)) { return(r) } } ifnotfound } .deprecated_warning <- function(old, new, verbose = TRUE) { if (verbose) { insight::format_warning(paste0( "Argument `", old, "` is deprecated and will be removed in the future. Please use `", new, "` instead." )) } } parameters/R/methods_merTools.R0000644000176200001440000000256514716604200016300 0ustar liggesusers#' @export model_parameters.merModList <- function(model, ci = 0.95, exponentiate = FALSE, p_adjust = NULL, keep = NULL, drop = NULL, verbose = TRUE, ...) { out <- .model_parameters_generic( model = model, ci = ci, bootstrap = FALSE, iterations = 10, merge_by = "Parameter", standardize = NULL, exponentiate = exponentiate, keep_parameters = keep, drop_parameters = drop, p_adjust = p_adjust, ... ) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(model)) out } #' @export ci.merModList <- function(x, ci = 0.95, ...) { .ci_generic(model = x, ci = ci, dof = NULL, component = "conditional", ...) } #' @export standard_error.merModList <- function(model, ...) { s <- suppressWarnings(summary(model)) out <- .data_frame( Parameter = s$fe$term, SE = s$fe$std.error ) insight::text_remove_backticks(out, verbose = FALSE) } #' @export format_parameters.merModList <- function(model, brackets = c("[", "]"), ...) { .format_parameter_default(model[[1]], brackets = brackets) } parameters/R/methods_lavaan.R0000644000176200001440000001170114736731407015742 0ustar liggesusers# Packages lavaan, blavaan # model parameters --------------------------- #' @rdname model_parameters.principal #' @export model_parameters.lavaan <- function(model, ci = 0.95, standardize = FALSE, component = c("regression", "correlation", "loading", "defined"), keep = NULL, drop = NULL, verbose = TRUE, ...) { params <- .extract_parameters_lavaan(model, ci = ci, standardize = standardize, keep_parameters = keep, drop_parameters = drop, verbose = verbose, ... ) # Filter if (all(component == "all")) { component <- c("regression", "correlation", "loading", "variance", "defined", "mean") } params <- params[tolower(params$Component) %in% component, ] # add class-attribute for printing class(params) <- c("parameters_sem", "see_parameters_sem", class(params)) attr(params, "ci") <- ci attr(params, "model") <- model params } #' @export model_parameters.blavaan <- function(model, centrality = "median", dispersion = FALSE, ci = 0.95, ci_method = "eti", test = "pd", rope_range = "default", rope_ci = 0.95, diagnostic = c("ESS", "Rhat"), component = "all", standardize = NULL, keep = NULL, drop = NULL, verbose = TRUE, ...) { # Processing params <- .extract_parameters_bayesian( model, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, diagnostic = diagnostic, effects = "all", standardize = standardize, keep_parameters = keep, drop_parameters = drop, verbose = verbose, ... ) # Filter if (!all(component == "all")) { params <- params[tolower(params$Component) %in% component, ] } params <- .add_model_parameters_attributes( params, model, ci, exponentiate = FALSE, ci_method = ci_method, verbose = verbose, ... ) attr(params, "object_name") <- insight::safe_deparse_symbol(substitute(model)) class(params) <- c("parameters_sem", "see_parameters_sem", class(params)) params } # ci --------------------------- #' @export ci.lavaan <- function(x, ci = 0.95, ...) { out <- .extract_parameters_lavaan(model = x, ci = ci, ...) out$CI <- ci out[out$Operator != "~1", c("To", "Operator", "From", "CI", "CI_low", "CI_high")] } # SE --------------------------- #' @export standard_error.lavaan <- function(model, ...) { out <- .extract_parameters_lavaan(model, ...) out[out$Operator != "~1", c("To", "Operator", "From", "SE")] } #' @export standard_error.blavaan <- function(model, ...) { params <- insight::get_parameters(model, ...) .data_frame( Parameter = colnames(params), SE = unname(sapply(params, stats::sd, na.rm = TRUE)) ) } # p-value --------------------------- #' @export p_value.lavaan <- function(model, ...) { out <- .extract_parameters_lavaan(model, ...) out[out$Operator != "~1", c("To", "Operator", "From", "p")] } #' @export p_value.blavaan <- p_value.BFBayesFactor # print --------------------------- #' @export print.parameters_sem <- function(x, digits = 2, ci_digits = digits, p_digits = 3, ...) { # check if user supplied digits attributes if (missing(digits)) { digits <- .additional_arguments(x, "digits", 2) } if (missing(ci_digits)) { ci_digits <- .additional_arguments(x, "ci_digits", digits) } if (missing(p_digits)) { p_digits <- .additional_arguments(x, "p_digits", 3) } verbose <- .additional_arguments(x, "verbose", TRUE) formatted_table <- format( x = x, digits = digits, ci_digits, p_digits = p_digits, format = "text", ci_brackets = TRUE, ci_width = "auto", ... ) cat(insight::export_table(formatted_table, format = "text", ...)) if (isTRUE(verbose)) { .print_footer_cimethod(x) } invisible(x) } #' @export #' @inheritParams stats::predict predict.parameters_sem <- function(object, newdata = NULL, ...) { insight::check_if_installed("lavaan") as.data.frame(lavaan::lavPredict( attributes(object)$model, newdata = newdata, method = "EBM", ... )) } parameters/R/methods_survival.R0000644000176200001440000001066714737245125016363 0ustar liggesusers# classes: .coxph, .aareg, .survreg, .riskRegression, .survfit #################### .survfit ------ #' @export model_parameters.survfit <- function(model, keep = NULL, drop = NULL, verbose = TRUE, ...) { s <- summary(model) # extract all elements with same length, which occur most in that list # that is the data we need uniqv <- unique(lengths(s)) tab <- tabulate(match(lengths(s), uniqv)) idx <- which.max(tab) most_len <- uniqv[idx] # convert list into data frame, only for elements of same length params <- as.data.frame(s[lengths(s) == most_len]) # keep specific columns keep_columns <- intersect( c("time", "n.risk", "n.event", "surv", "std.err", "strata", "lower", "upper"), colnames(params) ) params <- params[keep_columns] # rename params <- datawizard::data_rename( params, select = c( Time = "time", `N Risk` = "n.risk", `N Event` = "n.event", Survival = "surv", SE = "std.err", Group = "strata", CI_low = "lower", CI_high = "upper" ) ) # fix labels params$Group <- gsub("x=", "", params$Group, fixed = TRUE) # These are integers, need to be character to display without decimals params$Time <- as.character(params$Time) params[["N Risk"]] <- as.character(params[["N Risk"]]) params[["N Event"]] <- as.character(params[["N Event"]]) attr(params, "ci") <- s$conf.int class(params) <- c("parameters_model", "see_parameters_model", class(params)) params } #################### .coxph ------ #' @export standard_error.coxph <- function(model, method = NULL, ...) { robust <- !is.null(method) && method == "robust" if (isTRUE(robust)) { return(standard_error(model, ...)) } params <- insight::get_parameters(model) cs <- stats::coef(summary(model)) se <- cs[, 3] # check if (length(se) > nrow(params)) { se <- se[match(params$Parameter, .remove_backticks_from_string(rownames(cs)))] } .data_frame( Parameter = params$Parameter, SE = as.vector(se) ) } #' @export p_value.coxph <- function(model, ...) { params <- insight::get_parameters(model) stats <- insight::get_statistic(model) params <- merge(params, stats, sort = FALSE) statistic <- attributes(stats)$statistic # convert in case of z if (identical(statistic, "z-statistic")) { params$Statistic <- params$Statistic^2 } .data_frame( Parameter = params$Parameter, p = as.vector(1 - stats::pchisq(params$Statistic, df = 1)) ) } #################### .aareg ------ #' @export standard_error.aareg <- function(model, ...) { s <- summary(model) se <- s$table[, "se(coef)"] .data_frame( Parameter = .remove_backticks_from_string(names(se)), SE = as.vector(se) ) } #' @export p_value.aareg <- function(model, ...) { s <- summary(model) p <- s$table[, "p"] .data_frame( Parameter = .remove_backticks_from_string(names(p)), p = as.vector(p) ) } #################### .survreg ------ #' @export standard_error.survreg <- function(model, method = NULL, ...) { robust <- !is.null(method) && method == "robust" if (.check_vcov_args(robust, ...)) { return(standard_error.default(model, ...)) } s <- summary(model) se <- s$table[, 2] .data_frame( Parameter = .remove_backticks_from_string(names(se)), SE = as.vector(se) ) } #' @export p_value.survreg <- function(model, method = NULL, ...) { robust <- !is.null(method) && method == "robust" if (.check_vcov_args(robust, ...)) { return(p_value.default(model, ...)) } s <- summary(model) p <- s$table[, "p"] .data_frame( Parameter = .remove_backticks_from_string(names(p)), p = as.vector(p) ) } #################### .riskRegression ------ #' @export standard_error.riskRegression <- function(model, ...) { junk <- utils::capture.output(cs <- stats::coef(model)) .data_frame( Parameter = .remove_backticks_from_string(as.vector(cs[, 1])), SE = as.numeric(cs[, "StandardError"]) ) } #' @export p_value.riskRegression <- function(model, ...) { junk <- utils::capture.output(cs <- stats::coef(model)) .data_frame( Parameter = .remove_backticks_from_string(as.vector(cs[, 1])), p = as.numeric(cs[, "Pvalue"]) ) } parameters/R/reduce_parameters.R0000644000176200001440000001770414736731407016460 0ustar liggesusers#' Dimensionality reduction (DR) / Features Reduction #' #' This function performs a reduction in the parameter space (the number of #' variables). It starts by creating a new set of variables, based on the given #' method (the default method is "PCA", but other are available via the #' `method` argument, such as "cMDS", "DRR" or "ICA"). Then, it names this #' new dimensions using the original variables that correlates the most with it. #' For instance, a variable named `'V1_0.97/V4_-0.88'` means that the V1 and the #' V4 variables correlate maximally (with respective coefficients of .97 and #' -.88) with this dimension. Although this function can be useful in #' exploratory data analysis, it's best to perform the dimension reduction step #' in a separate and dedicated stage, as this is a very important process in the #' data analysis workflow. `reduce_data()` is an alias for #' `reduce_parameters.data.frame()`. #' #' @inheritParams principal_components #' @param method The feature reduction method. Can be one of `"PCA"`, `"cMDS"`, #' `"DRR"`, `"ICA"` (see the 'Details' section). #' @param distance The distance measure to be used. Only applies when #' `method = "cMDS"`. This must be one of `"euclidean"`, `"maximum"`, #' `"manhattan"`, `"canberra"`, `"binary"` or `"minkowski"`. Any unambiguous #' substring can be given. #' #' @details #' The different methods available are described below: #' #' ## Supervised Methods #' - **PCA**: See [`principal_components()`]. #' #' - **cMDS / PCoA**: Classical Multidimensional Scaling (cMDS) takes a #' set of dissimilarities (i.e., a distance matrix) and returns a set of points #' such that the distances between the points are approximately equal to the #' dissimilarities. #' #' - **DRR**: Dimensionality Reduction via Regression (DRR) is a very #' recent technique extending PCA (*Laparra et al., 2015*). Starting from a #' rotated PCA, it predicts redundant information from the remaining components #' using non-linear regression. Some of the most notable advantages of #' performing DRR are avoidance of multicollinearity between predictors and #' overfitting mitigation. DRR tends to perform well when the first principal #' component is enough to explain most of the variation in the predictors. #' Requires the **DRR** package to be installed. #' #' - **ICA**: Performs an Independent Component Analysis using the #' FastICA algorithm. Contrary to PCA, which attempts to find uncorrelated #' sources (through least squares minimization), ICA attempts to find #' independent sources, i.e., the source space that maximizes the #' "non-gaussianity" of all sources. Contrary to PCA, ICA does not rank each #' source, which makes it a poor tool for dimensionality reduction. Requires the #' **fastICA** package to be installed. #' #' See also [package vignette](https://easystats.github.io/parameters/articles/parameters_reduction.html). #' #' @references #' - Nguyen, L. H., and Holmes, S. (2019). Ten quick tips for effective #' dimensionality reduction. PLOS Computational Biology, 15(6). #' #' - Laparra, V., Malo, J., and Camps-Valls, G. (2015). Dimensionality #' reduction via regression in hyperspectral imagery. IEEE Journal of Selected #' Topics in Signal Processing, 9(6), 1026-1036. #' #' @examples #' data(iris) #' model <- lm(Sepal.Width ~ Species * Sepal.Length + Petal.Width, data = iris) #' model #' reduce_parameters(model) #' #' out <- reduce_data(iris, method = "PCA", n = "max") #' head(out) #' @export reduce_parameters <- function(x, method = "PCA", n = "max", distance = "euclidean", ...) { UseMethod("reduce_parameters") } #' @rdname reduce_parameters #' @export reduce_data <- function(x, method = "PCA", n = "max", distance = "euclidean", ...) { if (!is.data.frame(x)) { insight::format_error("Only works on data frames.") } reduce_parameters(x, method = method, n = n, distance = distance, ...) } #' @export reduce_parameters.data.frame <- function(x, method = "PCA", n = "max", distance = "euclidean", ...) { x <- datawizard::to_numeric(x, dummy_factors = TRUE) # N factors if (n == "max") { nfac <- ncol(x) - 1 } else { nfac <- .get_n_factors(x, n = n, type = "PCA", rotation = "none") } # compute new features if (tolower(method) %in% c("pca", "principal")) { features <- principal_components(x, n = nfac, ...) features <- as.data.frame(attributes(features)$scores) } else if (tolower(method) %in% c("cmds", "pcoa")) { features <- .cmds(x, n = nfac, distance = distance, ...) } else if (tolower(method) == "drr") { features <- .drr(x, n = nfac, ...) } else if (tolower(method) == "ica") { features <- .ica(x, n = nfac, ...) } else { insight::format_error("`method` must be one of \"PCA\", \"cMDS\", \"DRR\" or \"ICA\".") } # Get weights / pseudo-loadings (correlations) cormat <- as.data.frame(stats::cor(x = x, y = features)) cormat <- cbind(data.frame(Variable = row.names(cormat)), cormat) pca_weights <- as.data.frame(.sort_loadings(cormat, cols = 2:ncol(cormat))) if (n == "max") { pca_weights <- .filter_loadings(pca_weights, threshold = "max", 2:ncol(pca_weights)) non_empty <- vapply(pca_weights[2:ncol(pca_weights)], function(x) !all(is.na(x)), TRUE) pca_weights <- pca_weights[c(TRUE, non_empty)] features <- features[, non_empty] pca_weights[is.na(pca_weights)] <- 0 pca_weights <- .filter_loadings(.sort_loadings(pca_weights, cols = 2:ncol(pca_weights)), threshold = "max", 2:ncol(pca_weights)) } # Create varnames varnames <- vapply(pca_weights[2:ncol(pca_weights)], function(x) { name <- pca_weights$Variable[!is.na(x)] weight <- insight::format_value(x[!is.na(x)]) paste0(paste(name, weight, sep = "_"), collapse = "/") }, character(1)) names(features) <- as.character(varnames) # Attributes attr(features, "loadings") <- pca_weights class(features) <- c("parameters_reduction", class(features)) # Out features } #' @export reduce_parameters.lm <- function(x, method = "PCA", n = "max", distance = "euclidean", ...) { model_data <- reduce_parameters( datawizard::to_numeric(insight::get_predictors(x, ...), ..., dummy_factors = TRUE), method = method, n = n, distance = distance ) y <- data.frame(.row = seq_along(insight::get_response(x))) y[insight::find_response(x)] <- insight::get_response(x) y$.row <- NULL new_formula <- paste(insight::find_response(x), "~", paste(paste0("`", names(model_data), "`"), collapse = " + ")) stats::update(x, formula = new_formula, data = cbind(model_data, y)) } #' @export reduce_parameters.merMod <- reduce_parameters.lm #' @export principal_components.lm <- function(x, ...) { reduce_parameters(x, method = "PCA", ...) } #' @export principal_components.merMod <- principal_components.lm #' @keywords internal .cmds <- function(x, n = "all", distance = "euclidean", ...) { n <- .get_n_factors(x, n = n, type = "PCA", rotation = "none") d <- stats::dist(x, method = distance) cmd <- stats::cmdscale(d, k = n, eig = TRUE) features <- as.data.frame(cmd$points) names(features) <- paste0("CMDS", seq_len(ncol(features))) features } #' @keywords internal .drr <- function(x, n = "all", ...) { n <- .get_n_factors(x, n = n, type = "PCA", rotation = "none") insight::check_if_installed("DRR") junk <- utils::capture.output(suppressMessages({ rez <- DRR::drr(x, n) })) features <- as.data.frame(rez$fitted.data) names(features) <- paste0("DRR", seq_len(ncol(features))) features } #' @keywords internal .ica <- function(x, n = "all", ...) { n <- .get_n_factors(x, n = n, type = "PCA", rotation = "none") insight::check_if_installed("fastICA") rez <- fastICA::fastICA(x, n.comp = ncol(x) - 1) features <- as.data.frame(rez$S) names(features) <- paste0("ICA", seq_len(ncol(features))) features } parameters/R/methods_gam.R0000644000176200001440000000530614736731407015250 0ustar liggesusers# classes: .gam, .list #################### .gam ------ #' @export model_parameters.gam <- model_parameters.cgam #' @export ci.gam <- function(x, ci = 0.95, method = NULL, ...) { .ci_generic(model = x, ci = ci, method = "wald", ...) } #' @export standard_error.gam <- function(model, ...) { p.table <- summary(model)$p.table s.table <- summary(model)$s.table d1 <- d2 <- NULL if (!is.null(p.table)) { d1 <- .data_frame( Parameter = rownames(p.table), SE = as.vector(p.table[, 2]), Component = "conditional" ) } if (!is.null(s.table)) { d2 <- .data_frame( Parameter = rownames(s.table), SE = NA, Component = "smooth_terms" ) } insight::text_remove_backticks(rbind(d1, d2), verbose = FALSE) } #' @export p_value.gam <- function(model, ...) { p.table <- summary(model)$p.table s.table <- summary(model)$s.table d1 <- d2 <- NULL if (!is.null(p.table)) { d1 <- .data_frame( Parameter = rownames(p.table), p = as.vector(p.table[, 4]), Component = "conditional" ) } if (!is.null(s.table)) { d2 <- .data_frame( Parameter = rownames(s.table), p = as.vector(s.table[, 4]), Component = "smooth_terms" ) } insight::text_remove_backticks(rbind(d1, d2), verbose = FALSE) } #' @export simulate_model.gam <- function(model, iterations = 1000, ...) { if (is.null(iterations)) iterations <- 1000 beta <- stats::coef(model) varcov <- insight::get_varcov(model, component = "all", ...) out <- as.data.frame(.mvrnorm(n = iterations, mu = beta, Sigma = varcov)) class(out) <- c("parameters_simulate_model", class(out)) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(model)) out } #################### .list ------ #' @export model_parameters.list <- function(model, ...) { if ("gam" %in% names(model)) { model <- model$gam class(model) <- c("gam", "lm", "glm") model_parameters(model, ...) } else if ("pamobject" %in% names(model)) { model <- model$pamobject model_parameters(model, ...) } else { insight::format_error("We don't recognize this object of class `list`. Please raise an issue.") } } #' @export ci.list <- function(x, ci = 0.95, ...) { if ("gam" %in% names(x)) { x <- x$gam class(x) <- c("gam", "lm", "glm") ci(x, ci = ci, ...) } else { return(NULL) } } #' @export simulate_model.list <- function(model, iterations = 1000, ...) { if ("gam" %in% names(model)) { model <- model$gam class(model) <- c("gam", "lm", "glm") simulate_model(model, iterations = iterations, ...) } } parameters/R/pool_parameters.R0000644000176200001440000003267615073712214016155 0ustar liggesusers#' Pool Model Parameters #' #' This function "pools" (i.e. combines) model parameters in a similar fashion #' as `mice::pool()`. However, this function pools parameters from #' `parameters_model` objects, as returned by #' [model_parameters()]. #' #' @param x A list of `parameters_model` objects, as returned by #' [model_parameters()], or a list of model-objects that is supported by #' `model_parameters()`. #' @param ... Arguments passed down to `model_parameters()`, if `x` is a list #' of model-objects. Can be used, for instance, to specify arguments like #' `ci` or `ci_method` etc. #' @inheritParams model_parameters.default #' @inheritParams bootstrap_model #' @inheritParams model_parameters.glmmTMB #' #' @note #' Models with multiple components, (for instance, models with zero-inflation, #' where predictors appear in the count and zero-inflation part, or models with #' dispersion component) may fail in rare situations. In this case, compute #' the pooled parameters for components separately, using the `component` #' argument. #' #' Some model objects do not return standard errors (e.g. objects of class #' `htest`). For these models, no pooled confidence intervals nor p-values #' are returned. #' #' @details Averaging of parameters follows Rubin's rules (_Rubin, 1987, p. 76_). #' The pooled degrees of freedom is based on the Barnard-Rubin adjustment for #' small samples (_Barnard and Rubin, 1999_). #' #' @references #' Barnard, J. and Rubin, D.B. (1999). Small sample degrees of freedom with #' multiple imputation. Biometrika, 86, 948-955. Rubin, D.B. (1987). Multiple #' Imputation for Nonresponse in Surveys. New York: John Wiley and Sons. #' #' @examplesIf require("mice") && require("datawizard") #' # example for multiple imputed datasets #' data("nhanes2", package = "mice") #' imp <- mice::mice(nhanes2, printFlag = FALSE) #' models <- lapply(1:5, function(i) { #' lm(bmi ~ age + hyp + chl, data = mice::complete(imp, action = i)) #' }) #' pool_parameters(models) #' #' # should be identical to: #' m <- with(data = imp, exp = lm(bmi ~ age + hyp + chl)) #' summary(mice::pool(m)) #' #' # For glm, mice used residual df, while `pool_parameters()` uses `Inf` #' nhanes2$hyp <- datawizard::slide(as.numeric(nhanes2$hyp)) #' imp <- mice::mice(nhanes2, printFlag = FALSE) #' models <- lapply(1:5, function(i) { #' glm(hyp ~ age + chl, family = binomial, data = mice::complete(imp, action = i)) #' }) #' m <- with(data = imp, exp = glm(hyp ~ age + chl, family = binomial)) #' # residual df #' summary(mice::pool(m))$df #' # df = Inf #' pool_parameters(models)$df_error #' # use residual df instead #' pool_parameters(models, ci_method = "residual")$df_error #' @return A data frame of indices related to the model's parameters. #' @export pool_parameters <- function( x, exponentiate = FALSE, effects = "fixed", component = "all", verbose = TRUE, ... ) { # check input, save original model ----- original_model <- random_params <- NULL obj_name <- insight::safe_deparse_symbol(substitute(x)) if ( all(vapply(x, insight::is_model, TRUE)) && all(vapply(x, insight::is_model_supported, TRUE)) ) { original_model <- x[[1]] # Add exceptions for models with uncommon components here --------------- exception_model_class <- c("polr", "svyolr") # exceptions for "component" argument. Eg, MASS::polr has components # "alpha" and "beta", and "component" needs to be set to all by default if ( identical(component, "conditional") && inherits(original_model, exception_model_class) ) { component <- "all" } x <- lapply(x, model_parameters, effects = effects, component = component, ...) } if (!all(vapply(x, inherits, TRUE, "parameters_model"))) { insight::format_error( "First argument `x` must be a list of `parameters_model` objects, as returned by the `model_parameters()` function." ) } if (is.null(original_model)) { original_model <- .get_object(x[[1]]) } if (isTRUE(attributes(x[[1]])$exponentiate) && verbose) { insight::format_alert( "Pooling on exponentiated parameters is not recommended. Please call `model_parameters()` with 'exponentiate = FALSE', and then call `pool_parameters(..., exponentiate = TRUE)`." ) } # only pool for specific component ----- original_x <- x if ( "Component" %in% colnames(x[[1]]) && !insight::is_empty_object(component) && component != "all" ) { x <- lapply(x, function(i) { i <- i[i$Component == component, ] i$Component <- NULL i }) if (verbose) { insight::format_alert(paste0( "Pooling applied to the ", component, " model component." )) } } # preparation ---- params <- do.call(rbind, x) len <- length(x) ci <- attributes(original_x[[1]])$ci if (is.null(ci)) { ci <- 0.95 } parameter_values <- x[[1]]$Parameter # exceptions ---- # check for special models, like "htest", which have no "Parameter" columns if (!"Parameter" %in% colnames(params)) { # check for possible column names if (all(c("Parameter1", "Parameter2") %in% colnames(params))) { # create combined Parameter column params$Parameter <- paste0(params$Parameter1, " and ", params$Parameter2) # remove old columns params$Parameter1 <- NULL params$Parameter2 <- NULL # update values parameter_values <- paste0(x[[1]]$Parameter1, " and ", x[[1]]$Parameter2) # } # fix coefficient column colnames(params)[colnames(params) == "r"] <- "Coefficient" colnames(params)[colnames(params) == "rho"] <- "Coefficient" colnames(params)[colnames(params) == "tau"] <- "Coefficient" colnames(params)[colnames(params) == "Estimate"] <- "Coefficient" colnames(params)[colnames(params) == "Difference"] <- "Coefficient" } # split multiply (imputed) datasets by parameters, # but only for fixed effects. Filter random effects, # and save parameter names from fixed effects for later use... if ( effects == "all" && "Effects" %in% colnames(params) && "random" %in% params$Effects ) { random_params <- params[params$Effects == "random", ] params <- params[params$Effects != "random", ] parameter_values <- x[[1]]$Parameter[x[[1]]$Effects != "random"] } # split by component if (!is.null(params$Component) && insight::n_unique(params$Component) > 1) { component_values <- x[[1]]$Component estimates <- split( params, list( factor(params$Parameter, levels = unique(parameter_values)), factor(params$Component, levels = unique(component_values)) ) ) } else { component_values <- NULL estimates <- split( params, factor(params$Parameter, levels = unique(parameter_values)) ) } # pool estimates etc. ----- pooled_params <- do.call( rbind, lapply(estimates, function(i) { # if we split by "component", some of the data frames might be empty # in this case, just skip... if (nrow(i) > 0) { # pooled estimate pooled_estimate <- mean(i$Coefficient) # special models that have no standard errors (like "htest" objects) if (is.null(i$SE) || all(is.na(i$SE))) { out <- data.frame( Coefficient = pooled_estimate, SE = NA, CI_low = NA, CI_high = NA, Statistic = NA, df_error = NA, p = NA, stringsAsFactors = FALSE ) if (verbose) { insight::format_alert( "Model objects had no standard errors. Cannot compute pooled confidence intervals and p-values." ) } # regular models that have coefficients and standard errors } else { # pooled standard error ubar <- mean(i$SE^2) tmp <- ubar + (1 + 1 / len) * stats::var(i$Coefficient) pooled_se <- sqrt(tmp) # pooled degrees of freedom, Barnard-Rubin adjustment for small samples df_column <- grep("(\\bdf\\b|\\bdf_error\\b)", colnames(i), value = TRUE)[1] if (length(df_column)) { pooled_df <- .barnad_rubin( m = nrow(i), b = stats::var(i$Coefficient), t = tmp, dfcom = unique(i[[df_column]]) ) # validation check length if (length(pooled_df) > 1 && length(pooled_se) == 1) { pooled_df <- round(mean(pooled_df, na.rm = TRUE)) } } else { pooled_df <- Inf } # pooled statistic pooled_statistic <- pooled_estimate / pooled_se # confidence intervals alpha <- (1 + ci) / 2 fac <- suppressWarnings(stats::qt(alpha, df = pooled_df)) out <- data.frame( Coefficient = pooled_estimate, SE = pooled_se, CI_low = pooled_estimate - pooled_se * fac, CI_high = pooled_estimate + pooled_se * fac, Statistic = pooled_statistic, df_error = pooled_df, p = 2 * stats::pt(abs(pooled_statistic), df = pooled_df, lower.tail = FALSE), stringsAsFactors = FALSE ) } out } else { NULL } }) ) # pool random effect variances ----- pooled_random <- NULL if (!is.null(random_params)) { estimates <- split( random_params, factor(random_params$Parameter, levels = unique(random_params$Parameter)) ) pooled_random <- do.call( rbind, lapply(estimates, function(i) { pooled_estimate <- mean(i$Coefficient, na.rm = TRUE) data.frame( Parameter = unique(i$Parameter), Coefficient = pooled_estimate, Effects = "random", stringsAsFactors = FALSE ) }) ) pooled_params$Effects <- "fixed" } # reorder ------ pooled_params$Parameter <- parameter_values columns <- c( "Parameter", "Coefficient", "SE", "CI_low", "CI_high", "Statistic", "df_error", "p", "Effects", "Component" ) pooled_params <- pooled_params[intersect(columns, colnames(pooled_params))] # final attributes ----- # exponentiate coefficients and SE/CI, if requested pooled_params <- .exponentiate_parameters(pooled_params, NULL, exponentiate) if (!is.null(pooled_random)) { pooled_params <- merge(pooled_params, pooled_random, all = TRUE, sort = FALSE) } # add back component column if (!is.null(component_values)) { pooled_params$Component <- component_values } # this needs to be done extra here, cannot call ".add_model_parameters_attributes()" pooled_params <- .add_pooled_params_attributes( pooled_params, model_params = original_x[[1]], model = original_model, ci, exponentiate, verbose = verbose ) attr(pooled_params, "object_name") <- obj_name # pool sigma ---- sig <- unlist(insight::compact_list(lapply(original_x, function(i) { attributes(i)$sigma }))) if (!insight::is_empty_object(sig)) { attr(pooled_params, "sigma") <- mean(sig, na.rm = TRUE) } class(pooled_params) <- c( "parameters_model", "see_parameters_model", class(pooled_params) ) pooled_params } # helper ------ .barnad_rubin <- function(m, b, t, dfcom = 999999) { # fix for z-statistic if (is.null(dfcom) || all(is.na(dfcom)) || all(is.infinite(dfcom))) { return(Inf) } lambda <- (1 + 1 / m) * b / t lambda[lambda < 1e-04] <- 1e-04 dfold <- (m - 1) / lambda^2 dfobs <- (dfcom + 1) / (dfcom + 3) * dfcom * (1 - lambda) dfold * dfobs / (dfold + dfobs) } .add_pooled_params_attributes <- function( pooled_params, model_params, model, ci, exponentiate, verbose = TRUE ) { info <- insight::model_info(model, verbose = FALSE) pretty_names <- attributes(model_params)$pretty_names if (length(pretty_names) < nrow(model_params)) { pretty_names <- c( pretty_names, model_params$Parameter[(length(pretty_names) + 1):nrow(model_params)] ) } attr(pooled_params, "ci") <- ci attr(pooled_params, "exponentiate") <- exponentiate attr(pooled_params, "pretty_names") <- pretty_names attr(pooled_params, "verbose") <- verbose attr(pooled_params, "ordinal_model") <- attributes(model_params)$ordinal_model attr(pooled_params, "model_class") <- attributes(model_params)$model_class attr(pooled_params, "bootstrap") <- attributes(model_params)$bootstrap attr(pooled_params, "iterations") <- attributes(model_params)$iterations attr(pooled_params, "ci_method") <- attributes(model_params)$ci_method attr(pooled_params, "digits") <- attributes(model_params)$digits attr(pooled_params, "ci_digits") <- attributes(model_params)$ci_digits attr(pooled_params, "p_digits") <- attributes(model_params)$p_digits # column name for coefficients coef_col <- .find_coefficient_type(info, exponentiate) attr(pooled_params, "coefficient_name") <- coef_col attr(pooled_params, "zi_coefficient_name") <- if (isTRUE(exponentiate)) { "Odds Ratio" } else { "Log-Odds" } # formula attr(pooled_params, "model_formula") <- insight::find_formula(model, verbose = FALSE) pooled_params } parameters/R/methods_metafor.R0000644000176200001440000001545714717111737016146 0ustar liggesusers# package metafor ####### .rma ----------------- #' Parameters from Meta-Analysis #' #' Extract and compute indices and measures to describe parameters of meta-analysis models. #' #' @param include_studies Logical, if `TRUE` (default), includes parameters for #' all studies. Else, only parameters for overall-effects are shown. #' @inheritParams model_parameters.default #' @inheritParams model_parameters.glimML #' #' @examples #' library(parameters) #' mydat <<- data.frame( #' effectsize = c(-0.393, 0.675, 0.282, -1.398), #' stderr = c(0.317, 0.317, 0.13, 0.36) #' ) #' if (require("metafor", quietly = TRUE)) { #' model <- rma(yi = effectsize, sei = stderr, method = "REML", data = mydat) #' model_parameters(model) #' } #' \donttest{ #' # with subgroups #' if (require("metafor", quietly = TRUE)) { #' data(dat.bcg) #' dat <- escalc( #' measure = "RR", #' ai = tpos, #' bi = tneg, #' ci = cpos, #' di = cneg, #' data = dat.bcg #' ) #' dat$alloc <- ifelse(dat$alloc == "random", "random", "other") #' d <<- dat #' model <- rma(yi, vi, mods = ~alloc, data = d, digits = 3, slab = author) #' model_parameters(model) #' } #' #' if (require("metaBMA", quietly = TRUE)) { #' data(towels) #' m <- suppressWarnings(meta_random(logOR, SE, study, data = towels)) #' model_parameters(m) #' } #' } #' #' @return A data frame of indices related to the model's parameters. #' @export model_parameters.rma <- function(model, ci = 0.95, bootstrap = FALSE, iterations = 1000, standardize = NULL, exponentiate = FALSE, include_studies = TRUE, keep = NULL, drop = NULL, verbose = TRUE, ...) { # handle ci-level that was defined in function call... ci_level <- parse(text = insight::safe_deparse(model$call))[[1]]$level if (!is.null(ci_level) && missing(ci)) { ci <- ci_level / 100 } # validation check, warn if unsupported argument is used. .check_dots( dots = list(...), not_allowed = c("vcov", "vcov_args"), class(model)[1], verbose = verbose ) meta_analysis_overall <- .model_parameters_generic( model = model, ci = ci, bootstrap = bootstrap, iterations = iterations, merge_by = "Parameter", standardize = standardize, exponentiate = exponentiate, keep_parameters = keep, drop_parameters = drop, verbose = verbose, ... ) subgroups <- NULL group_variable <- NULL # subgroup analyses? if (!is.null(model$formula.mods)) { group_variable <- deparse(model$formula.mods[[2]])[1] model_data <- insight::get_data(model, verbose = FALSE) if (group_variable %in% colnames(model_data)) { subgroups <- sort(unique(model_data[[group_variable]])) } } if (nrow(meta_analysis_overall) > 1 && !is.null(subgroups)) { meta_analysis_overall$Subgroup <- subgroups meta_analysis_overall$Parameter <- "(Intercept)" } alpha <- (1 + ci) / 2 rma_parameters <- if (!is.null(model$slab) && !is.numeric(model$slab)) { sprintf("%s", model$slab) } else { sprintf("Study %i", 1:model[["k"]]) } # find missing if (!is.null(model$yi.f) && anyNA(model$yi.f)) { rma_parameters <- rma_parameters[match(model$yi, model$yi.f)] } rma_coeffients <- as.vector(model$yi) rma_se <- as.vector(sqrt(model$vi)) rma_ci_low <- rma_coeffients - rma_se * stats::qt(alpha, df = Inf) rma_ci_high <- rma_coeffients + rma_se * stats::qt(alpha, df = Inf) rma_statistic <- rma_coeffients / rma_se rma_ci_p <- 2 * stats::pt(abs(rma_statistic), df = Inf, lower.tail = FALSE) meta_analysis_studies <- data.frame( Parameter = rma_parameters, Coefficient = rma_coeffients, SE = rma_se, CI = ci, CI_low = rma_ci_low, CI_high = rma_ci_high, z = rma_statistic, df_error = NA, p = rma_ci_p, Weight = 1 / as.vector(model$vi), stringsAsFactors = FALSE ) # subgroup analyses? if (!is.null(subgroups)) { meta_analysis_studies$Subgroup <- insight::get_data(model, verbose = FALSE)[[group_variable]] } original_attributes <- attributes(meta_analysis_overall) out <- merge(meta_analysis_studies, meta_analysis_overall, all = TRUE, sort = FALSE) # fix intercept name out$Parameter[out$Parameter == "(Intercept)"] <- "Overall" # filter studies? if (isFALSE(include_studies)) { out <- out[out$Parameter == "Overall", ] } original_attributes$names <- names(out) original_attributes$row.names <- seq_len(nrow(out)) original_attributes$pretty_names <- stats::setNames(out$Parameter, out$Parameter) attributes(out) <- original_attributes # no df out$df_error <- NULL attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(model)) attr(out, "measure") <- model$measure if (!"Method" %in% names(out)) { out$Method <- "Meta-analysis using 'metafor'" } attr(out, "title") <- unique(out$Method) out } #' @export p_value.rma <- function(model, ...) { params <- insight::get_parameters(model) .data_frame( Parameter = .remove_backticks_from_string(params$Parameter), p = model$pval ) } #' @export ci.rma <- function(x, ci = 0.95, ...) { params <- insight::get_parameters(x) out <- tryCatch( { tmp <- lapply(ci, function(i) { model <- stats::update(x, level = i) .data_frame( Parameter = params$Parameter, CI = i, CI_low = as.vector(model$ci.lb), CI_high = as.vector(model$ci.ub) ) }) insight::text_remove_backticks(do.call(rbind, tmp), verbose = FALSE) }, error = function(e) { NULL } ) if (is.null(out)) { se <- standard_error(x) out <- lapply(ci, function(i) { alpha <- (1 + i) / 2 fac <- stats::qnorm(alpha) .data_frame( Parameter = params$Parameter, CI = i, CI_low = params$Estimate - as.vector(se$SE) * fac, CI_high = params$Estimate + as.vector(se$SE) * fac ) }) out <- insight::text_remove_backticks(do.call(rbind, out), verbose = FALSE) } out } #' @export standard_error.rma <- function(model, ...) { params <- insight::get_parameters(model) .data_frame( Parameter = .remove_backticks_from_string(params$Parameter), SE = model[["se"]] ) } #' @export format_parameters.rma <- function(model, ...) { params <- insight::find_parameters(model, flatten = TRUE) names(params) <- params params } parameters/R/methods_BayesFactor.R0000644000176200001440000002203015003670523016664 0ustar liggesusers# classes: .BFBayesFactor #' Parameters from BayesFactor objects #' #' Parameters from `BFBayesFactor` objects from `{BayesFactor}` package. #' #' @param model Object of class `BFBayesFactor`. #' @param include_proportions Logical that decides whether to include posterior #' cell proportions/counts for Bayesian contingency table analysis (from #' `BayesFactor::contingencyTableBF()`). Defaults to `FALSE`, as this #' information is often redundant. #' @inheritParams bayestestR::describe_posterior #' @inheritParams p_value #' @inheritParams model_parameters.htest #' #' @details #' The meaning of the extracted parameters: #' #' - For [BayesFactor::ttestBF()]: `Difference` is the raw difference between #' the means. #' - For [BayesFactor::correlationBF()]: `rho` is the linear correlation #' estimate (equivalent to Pearson's *r*). #' - For [BayesFactor::lmBF()] / [BayesFactor::generalTestBF()] #' / [BayesFactor::regressionBF()] / [BayesFactor::anovaBF()]: in addition to #' parameters of the fixed and random effects, there are: `mu` is the #' (mean-centered) intercept; `sig2` is the model's sigma; `g` / `g_*` are #' the *g* parameters; See the *Bayes Factors for ANOVAs* paper #' (\doi{10.1016/j.jmp.2012.08.001}). #' #' @examplesIf require("BayesFactor") #' \donttest{ #' # Bayesian t-test #' model <- BayesFactor::ttestBF(x = rnorm(100, 1, 1)) #' model_parameters(model) #' model_parameters(model, es_type = "cohens_d", ci = 0.9) #' #' # Bayesian contingency table analysis #' data(raceDolls) #' bf <- BayesFactor::contingencyTableBF( #' raceDolls, #' sampleType = "indepMulti", #' fixedMargin = "cols" #' ) #' model_parameters(bf, #' centrality = "mean", #' dispersion = TRUE, #' verbose = FALSE, #' es_type = "cramers_v" #' ) #' } #' @return A data frame of indices related to the model's parameters. #' @export model_parameters.BFBayesFactor <- function(model, centrality = "median", dispersion = FALSE, ci = 0.95, ci_method = "eti", test = "pd", rope_range = "default", rope_ci = 0.95, priors = TRUE, es_type = NULL, include_proportions = FALSE, verbose = TRUE, ...) { insight::check_if_installed("BayesFactor") if (any(startsWith(names(model@numerator), "Null"))) { if (isTRUE(verbose)) { insight::format_alert( "Nothing to compute for point-null models.", "See github.com/easystats/parameters/issues/226" ) } return(NULL) } if (is.null(insight::get_parameters(model, verbose = FALSE))) { if (isTRUE(verbose)) { insight::format_warning("Can't extract model parameters.") } return(NULL) } out <- bayestestR::describe_posterior( model, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, priors = priors, verbose = verbose, ... ) bf_type <- .classify_BFBayesFactor(model) # Add components and effects columns cleaned_params <- NULL out <- tryCatch( { cleaned_params <- insight::clean_parameters(model) merge(out, cleaned_params[, c("Parameter", "Effects", "Component")], sort = FALSE) }, error = function(e) { out } ) # Extract BF tryCatch( { bfm <- as.data.frame(bayestestR::bayesfactor_models(model)[-1, ]) if (is.null(bfm$log_BF)) { out$BF <- bfm$BF } else { out$BF <- exp(bfm$log_BF) } }, error = function(e) { NULL } ) # leave out redundant posterior cell proportions/counts if (bf_type == "xtable" && isFALSE(include_proportions)) { out <- out[which(!startsWith(out$Parameter, "cell[")), , drop = FALSE] } # Effect size? if (!is.null(es_type)) { # needs {effectsize} to be installed insight::check_if_installed("effectsize") tryCatch( { effsize <- effectsize::effectsize(model, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, rope_ci = rope_ci, type = es_type, ... ) if (bf_type == "xtable" && isTRUE(include_proportions)) { out <- merge(out, effsize, sort = FALSE, all = TRUE) } else { if (bf_type == "xtable") { prefix <- "Cramers_" } else { prefix <- "d_" } ci_cols <- startsWith(colnames(effsize), "CI_") colnames(effsize)[ci_cols] <- paste0(prefix, colnames(effsize)[ci_cols]) out$CI <- NULL out <- cbind(out, effsize) } }, error = function(e) { NULL } ) } # # Remove unnecessary columns # if ("CI" %in% names(out) && length(stats::na.omit(unique(out$CI))) == 1) { # out$CI <- NULL # } if ("ROPE_CI" %in% names(out) && length(stats::na.omit(unique(out$ROPE_CI))) == 1) { out$ROPE_CI <- NULL } if ("ROPE_low" %in% names(out)) { out$ROPE_low <- NULL out$ROPE_high <- NULL } # ==== remove Component column if not needed if (!is.null(out$Component) && insight::has_single_value(out$Component, remove_na = TRUE)) out$Component <- NULL if (!is.null(out$Effects) && insight::has_single_value(out$Effects, remove_na = TRUE)) out$Effects <- NULL # ==== remove rows and columns with complete `NA`s out <- datawizard::remove_empty(out) # validation check: make sure BF column still exists, # see https://github.com/easystats/correlation/issues/269 if (is.null(out$BF)) { out$BF <- NA } # ==== pretty parameter names cp <- out$Parameter if (!is.null(cleaned_params) && length(cleaned_params$Cleaned_Parameter) == length(cp) && bf_type == "linear") { match_params <- stats::na.omit(match(cp, cleaned_params$Parameter)) cp <- cleaned_params$Cleaned_Parameter[match_params] } pretty_names <- stats::setNames( gsub("Cohens_d", "Cohen's D", gsub("Cramers_v", "Cramer's V", cp, fixed = TRUE), fixed = TRUE), out$Parameter ) if (!"Method" %in% names(out)) { out$Method <- .method_BFBayesFactor(model) } # reorder col_order <- c( "Parameter", "Mean", "Median", "MAD", "CI", "CI_low", "CI_high", "SD", "Cohens_d", "Cramers_v", "Cramers_v_adjusted", "d_CI_low", "d_CI_high", "Cramers_CI_low", "Cramers_CI_high", "pd", "ROPE_Percentage", "Prior_Distribution", "Prior_Location", "Prior_Scale", "Effects", "Component", "BF", "Method" ) out <- out[col_order[col_order %in% names(out)]] attr(out, "title") <- unique(out$Method) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(model)) attr(out, "pretty_names") <- pretty_names attr(out, "ci_test") <- ci out <- .add_model_parameters_attributes( params = out, model = model, ci = ci, ci_method = ci_method, verbose = verbose ) class(out) <- c("parameters_model", "see_parameters_model", class(out)) out } #' @export p_value.BFBayesFactor <- function(model, ...) { p <- bayestestR::p_direction(model, ...) .data_frame( Parameter = .remove_backticks_from_string(p$Parameter), p = sapply(p$pd, bayestestR::convert_pd_to_p, simplify = TRUE) ) } # helper ------- .classify_BFBayesFactor <- function(x) { insight::check_if_installed("BayesFactor") if (inherits(x@denominator, "BFcorrelation")) { "correlation" } else if (inherits(x@denominator, "BFoneSample")) { "ttest1" } else if (inherits(x@denominator, "BFindepSample")) { "ttest2" } else if (inherits(x@denominator, "BFmetat")) { "meta" } else if (inherits(x@denominator, "BFlinearModel")) { "linear" } else if (inherits(x@denominator, "BFcontingencyTable")) { "xtable" } else if (inherits(x@denominator, "BFproportion")) { "proptest" } else { class(x@denominator) } } .method_BFBayesFactor <- function(x) { if (inherits(x@denominator, "BFcorrelation")) { "Bayesian correlation analysis" } else if (inherits(x@denominator, c("BFoneSample", "BFindepSample"))) { "Bayesian t-test" } else if (inherits(x@denominator, "BFmetat")) { "Meta-analytic Bayes factors" } else if (inherits(x@denominator, "BFlinearModel")) { "Bayes factors for linear models" } else if (inherits(x@denominator, "BFcontingencyTable")) { "Bayesian contingency table analysis" } else if (inherits(x@denominator, "BFproportion")) { "Bayesian proportion test" } else { NA_character_ } } parameters/R/utils_clustering.R0000644000176200001440000000243214362231726016347 0ustar liggesusers# Utils ------------------------------------------------------------------- #' @keywords internal .prepare_data_clustering <- function(x, include_factors = FALSE, standardize = FALSE, preprocess = TRUE, ...) { if (isFALSE(preprocess)) { return(x) } # include factors? if (include_factors) { # ordered factors to numeric factors <- vapply(x, is.ordered, TRUE) if (any(factors)) { x[factors] <- sapply( x[factors], datawizard::to_numeric, dummy_factors = FALSE, preserve_levels = TRUE ) } # character and factors to dummies factors <- sapply(x, function(i) is.character(i) | is.factor(i)) if (any(factors)) { dummies <- lapply(x[factors], .factor_to_dummy) x <- cbind(x[!factors], dummies) } } else { # remove factors x <- x[vapply(x, is.numeric, TRUE)] } # Remove all missing values from data, only use numerics x <- stats::na.omit(x) if (isTRUE(standardize)) { x <- datawizard::standardize(x, ...) # remove "dw_transformer" attribute x[] <- lapply(x, as.numeric) } x } parameters/R/cluster_discrimination.R0000644000176200001440000000602514736731407017535 0ustar liggesusers#' Compute a linear discriminant analysis on classified cluster groups #' #' Computes linear discriminant analysis (LDA) on classified cluster groups, and #' determines the goodness of classification for each cluster group. See `MASS::lda()` #' for details. #' #' @param x A data frame #' @param cluster_groups Group classification of the cluster analysis, which can #' be retrieved from the [cluster_analysis()] function. #' @param ... Other arguments to be passed to or from. #' #' @seealso [`n_clusters()`] to determine the number of clusters to extract, #' [`cluster_analysis()`] to compute a cluster analysis and #' [`performance::check_clusterstructure()`] to check suitability of data for #' clustering. #' #' @examplesIf requireNamespace("MASS", quietly = TRUE) #' # Retrieve group classification from hierarchical cluster analysis #' clustering <- cluster_analysis(iris[, 1:4], n = 3) #' #' # Goodness of group classification #' cluster_discrimination(clustering) #' @export cluster_discrimination <- function(x, cluster_groups = NULL, ...) { UseMethod("cluster_discrimination") } #' @export cluster_discrimination.cluster_analysis <- function(x, cluster_groups = NULL, ...) { if (is.null(cluster_groups)) { cluster_groups <- stats::predict(x) } cluster_discrimination(attributes(x)$data, cluster_groups, ...) } #' @export cluster_discrimination.default <- function(x, cluster_groups = NULL, ...) { if (is.null(cluster_groups)) { insight::format_error("Please provide cluster assignments via `cluster_groups`.") } x <- stats::na.omit(x) cluster_groups <- stats::na.omit(cluster_groups) # compute discriminant analysis of groups on original data frame insight::check_if_installed("MASS") disc <- MASS::lda(cluster_groups ~ ., data = x, na.action = "na.omit", CV = TRUE) # Assess the accuracy of the prediction # percent correct for each category of groups classification_table <- table(cluster_groups, disc$class) correct <- diag(prop.table(classification_table, 1)) # total correct percentage total_correct <- sum(diag(prop.table(classification_table))) out <- data.frame( Group = unique(cluster_groups), Accuracy = correct, stringsAsFactors = FALSE ) # Sort according to accuracy out <- out[order(out$Group), ] attr(out, "Overall_Accuracy") <- total_correct class(out) <- c("cluster_discrimination", class(out)) out } # Utils ------------------------------------------------------------------- #' @export print.cluster_discrimination <- function(x, ...) { orig_x <- x insight::print_color("# Accuracy of Cluster Group Classification via Linear Discriminant Analysis (LDA)\n\n", "blue") total_accuracy <- attributes(x)$Overall_Accuracy x$Accuracy <- sprintf("%.2f%%", 100 * x$Accuracy) total <- sprintf("%.2f%%", 100 * total_accuracy) print.data.frame(x, row.names = FALSE, ...) insight::print_color(sprintf("\nOverall accuracy of classification: %s\n", total), "yellow") invisible(orig_x) } parameters/R/methods_bayesQR.R0000644000176200001440000000275014507235543016046 0ustar liggesusers#' @export model_parameters.bayesQR <- function(model, centrality = "median", dispersion = FALSE, ci = 0.95, ci_method = "eti", test = "pd", rope_range = "default", rope_ci = 0.95, bf_prior = NULL, diagnostic = c("ESS", "Rhat"), priors = TRUE, keep = NULL, drop = NULL, verbose = TRUE, ...) { # Processing params <- .extract_parameters_bayesian( model, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, bf_prior = bf_prior, diagnostic = diagnostic, priors = priors, keep_parameters = keep, drop_parameters = drop, verbose = verbose, ... ) attr(params, "ci") <- ci attr(params, "object_name") <- insight::safe_deparse_symbol(substitute(model)) class(params) <- c("parameters_model", "see_parameters_model", class(params)) params } #' @export p_value.bayesQR <- p_value.BFBayesFactor parameters/R/methods_glmm.R0000644000176200001440000000435514507235543015437 0ustar liggesusers#' @export model_parameters.glmm <- function(model, ci = 0.95, effects = c("all", "fixed", "random"), bootstrap = FALSE, iterations = 1000, standardize = NULL, exponentiate = FALSE, keep = NULL, drop = NULL, verbose = TRUE, ...) { effects <- match.arg(effects) out <- .model_parameters_generic( model = model, ci = ci, bootstrap = bootstrap, iterations = iterations, merge_by = c("Parameter", "Effects"), standardize = standardize, exponentiate = exponentiate, keep_parameters = keep, drop_parameters = drop, effects = effects, ... ) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(model)) out } #' @export ci.glmm <- function(x, ci = 0.95, effects = c("all", "fixed", "random"), ...) { effects <- match.arg(effects) .ci_generic(model = x, ci = ci, dof = Inf, effects = effects, ...) } #' @export standard_error.glmm <- function(model, effects = c("all", "fixed", "random"), ...) { effects <- match.arg(effects) out <- insight::get_parameters(model, effects = "all") out$SE <- sqrt(diag(insight::get_varcov(model, effects = "all"))) out <- out[, c("Parameter", "SE", "Effects")] if (effects != "all") { out <- out[out$Effects == effects, , drop = FALSE] out$Effects <- NULL } out } #' @export p_value.glmm <- function(model, effects = c("all", "fixed", "random"), ...) { effects <- match.arg(effects) s <- summary(model) out <- insight::get_parameters(model, effects = "all") out$p <- c(s$coefmat[, 4], s$nucoefmat[, 4]) out <- out[, c("Parameter", "p", "Effects")] if (effects != "all") { out <- out[out$Effects == effects, , drop = FALSE] out$Effects <- NULL } out } #' @export format_parameters.glmm <- function(model, brackets = c("[", "]"), ...) { .format_parameter_default(model, effects = "all", brackets = brackets) } parameters/R/display.R0000644000176200001440000001061215057525051014413 0ustar liggesusers#' @title Print tables in different output formats #' @name display.parameters_model #' #' @description Prints tables (i.e. data frame) in different output formats. #' `print_md()` is an alias for `display(format = "markdown")` and #' `print_html()` is an alias for `display(format = "html")`. A third option is #' `display(format = "tt")`, which returns a `tinytable` object, which is either #' printed as markdown or HTML table, depending on the environment. #' #' @param object An object returned by one of the package's function, for example #' [`model_parameters()`], [`simulate_parameters()`], [`equivalence_test()`] or #' [`principal_components()`]. #' @param format String, indicating the output format. Can be `"markdown"` #' `"html"`, or `"tt"`. `format = "tt"` creates a `tinytable` object, which is #' either printed as markdown or HTML table, depending on the environment. See #' [`insight::export_table()`] for details. #' @param ... Arguments passed to the underlying functions, such as `print_md()` #' or `print_html()`. #' #' @return If `format = "markdown"`, the return value will be a character #' vector in markdown-table format. If `format = "html"`, an object of #' class `gt_tbl`. If `format = "tt"`, an object of class `tinytable`. #' #' @details `display()` is useful when the table-output from functions, #' which is usually printed as formatted text-table to console, should #' be formatted for pretty table-rendering in markdown documents, or if #' knitted from rmarkdown to PDF or Word files. See #' [vignette](https://easystats.github.io/parameters/articles/model_parameters_formatting.html) #' for examples. #' #' @seealso [`print.parameters_model()`] and [`print.compare_parameters()`] #' #' @examplesIf require("gt", quietly = TRUE) #' model <- lm(mpg ~ wt + cyl, data = mtcars) #' mp <- model_parameters(model) #' display(mp) #' #' \donttest{ #' data(iris) #' lm1 <- lm(Sepal.Length ~ Species, data = iris) #' lm2 <- lm(Sepal.Length ~ Species + Petal.Length, data = iris) #' lm3 <- lm(Sepal.Length ~ Species * Petal.Length, data = iris) #' out <- compare_parameters(lm1, lm2, lm3) #' #' print_html( #' out, #' select = "{coef}{stars}|({ci})", #' column_labels = c("Estimate", "95% CI") #' ) #' #' # line break, unicode minus-sign #' print_html( #' out, #' select = "{estimate}{stars}
({ci_low} \u2212 {ci_high})", #' column_labels = c("Est. (95% CI)") #' ) #' } #' #' @examplesIf all(insight::check_if_installed(c("glmmTMB", "lme4", "tinytable"), quietly = TRUE)) #' \donttest{ #' data(iris) #' data(Salamanders, package = "glmmTMB") #' m1 <- lm(Sepal.Length ~ Species * Petal.Length, data = iris) #' m2 <- lme4::lmer( #' Sepal.Length ~ Petal.Length + Petal.Width + (1 | Species), #' data = iris #' ) #' m3 <- glmmTMB::glmmTMB( #' count ~ spp + mined + (1 | site), #' ziformula = ~mined, #' family = poisson(), #' data = Salamanders #' ) #' out <- compare_parameters(m1, m2, m3, effects = "all", component = "all") #' #' display(out, format = "tt") #' #' display(out, select = "{estimate}|{ci}", format = "tt") #' } #' @export display.parameters_model <- function(object, format = "markdown", ...) { format <- .display_default_format(format) if (format %in% c("html", "tt")) { print_html(x = object, backend = ifelse(format == "tt", "tt", "html"), ...) } else { print_md(x = object, ...) } } #' @export display.parameters_simulate <- display.parameters_model #' @export display.parameters_brms_meta <- display.parameters_model #' @export display.compare_parameters <- display.parameters_model #' @export display.parameters_sem <- display.parameters_model #' @export display.parameters_efa_summary <- display.parameters_model #' @export display.parameters_pca_summary <- display.parameters_model #' @export display.parameters_omega_summary <- display.parameters_model #' @export display.parameters_efa <- display.parameters_model #' @export display.parameters_pca <- display.parameters_model #' @export display.parameters_omega <- display.parameters_model #' @export display.equivalence_test_lm <- display.parameters_model #' @export display.parameters_p_function <- display.parameters_model .display_default_format <- function(format) { format <- getOption("easystats_display_format", format) insight::validate_argument(format, c("markdown", "html", "md", "tt")) } parameters/R/methods_modelbased.R0000644000176200001440000000236315066721001016565 0ustar liggesusers# model_parameters ---------------- #' @export model_parameters.estimate_means <- function(model, ...) { out <- model class(out) <- c("parameters_model", "see_parameters_model", class(out)) out } #' @export model_parameters.estimate_slopes <- model_parameters.estimate_means #' @export model_parameters.estimate_contrasts <- model_parameters.estimate_means # standard_error ---------------- #' @export standard_error.estimate_means <- function(model, ...) { params <- insight::get_parameters(model) data.frame(Parameter = params$Parameter, SE = model$SE, stringsAsFactors = FALSE) } #' @export standard_error.estimate_slopes <- standard_error.estimate_means #' @export standard_error.estimate_contrasts <- standard_error.estimate_means # ci ---------------- #' @export ci.estimate_means <- function(x, ...) { params <- insight::get_parameters(x) ci_value <- attributes(x)$ci if (is.null(ci_value)) { ci_value <- 0.95 } data.frame( Parameter = params$Parameter, CI = ci_value, CI_low = x$CI_low, CI_high = x$CI_high, stringsAsFactors = FALSE ) } #' @export ci.estimate_slopes <- ci.estimate_means #' @export ci.estimate_contrasts <- ci.estimate_means parameters/R/methods_BBMM.R0000644000176200001440000000261614716604200015206 0ustar liggesusers#' @export ci.BBmm <- ci.default #' @export ci.BBreg <- ci.default #' @export standard_error.BBmm <- function(model, ...) { .data_frame( Parameter = insight::find_parameters(model, effects = "fixed", component = "conditional", flatten = TRUE ), SE = as.data.frame(summary(model)$fixed.coefficients)$StdErr ) } #' @export standard_error.BBreg <- function(model, ...) { .data_frame( Parameter = insight::find_parameters(model, effects = "fixed", component = "conditional", flatten = TRUE ), SE = as.data.frame(summary(model)$coefficients)$StdErr ) } ## TODO add ci_method later? ## TODO BBmm only has p based on normal distribution assumptions? #' @export p_value.BBmm <- function(model, ...) { .data_frame( Parameter = insight::find_parameters(model, effects = "fixed", component = "conditional", flatten = TRUE ), p = as.data.frame(summary(model)$fixed.coefficients)$p.value ) } ## TODO add ci_method later? ## TODO BBreg only has p based on normal distribution assumptions? #' @export p_value.BBreg <- function(model, ...) { .data_frame( Parameter = insight::find_parameters(model, effects = "fixed", component = "conditional", flatten = TRUE ), p = as.data.frame(summary(model)$coefficients)$p.value ) } parameters/R/methods_base.R0000644000176200001440000000767014736731407015424 0ustar liggesusers#' @rdname model_parameters.brmsfit #' @export model_parameters.data.frame <- function(model, as_draws = FALSE, exponentiate = FALSE, verbose = TRUE, ...) { # treat data frame as bootstraps/posteriors? if (isTRUE(as_draws)) { return(model_parameters.draws(model, exponentiate = exponentiate, verbose = verbose, ...)) } if (isTRUE(verbose)) { insight::format_warning( "A `data.frame` object is no valid regression model object and cannot be used with `model_parameters()`." ) } NULL } # Standard Errors from standard classes --------------------------------------------- #' @rdname standard_error #' @export standard_error.factor <- function(model, force = FALSE, verbose = TRUE, ...) { if (!force) { if (verbose) { insight::format_warning("Can't compute standard error of non-numeric variables.") } return(NA) } standard_error(as.numeric(model), ...) } #' @export standard_error.character <- standard_error.factor #' @export standard_error.numeric <- function(model, ...) { sqrt(stats::var(model, na.rm = TRUE) / length(stats::na.omit(model))) } #' @export standard_error.data.frame <- function(model, verbose = TRUE, ...) { unlist(sapply(model, standard_error, verbose = verbose)) } #' @export standard_error.list <- function(model, verbose = TRUE, ...) { if ("gam" %in% names(model)) { model <- model$gam class(model) <- c("gam", "lm", "glm") standard_error(model) } else if (isTRUE(verbose)) { insight::print_color("\nCould not extract standard errors from model object.\n", "red") } } #' @export standard_error.table <- function(model, ...) { # compute standard error of proportions if (length(dim(model)) == 1) { total.n <- as.vector(sum(model)) rel.frq <- as.vector(model) / total.n out <- .data_frame( Value = names(model), Proportion = rel.frq, SE = suppressWarnings(sqrt(rel.frq * (1 - rel.frq) / total.n)) ) } else { out <- NA } out } #' @export standard_error.xtabs <- standard_error.table #' @export standard_error.parameters_standardized <- function(model, verbose = TRUE, ...) { se <- attr(model, "standard_error") if (is.null(se)) { if (isTRUE(verbose)) { insight::print_color("\nCould not extract standard errors of standardized coefficients.\n", "red") } return(NULL) } # for "refit" method if (is.data.frame(se) && "SE" %in% colnames(se)) { se <- se$SE } out <- .data_frame( Parameter = model$Parameter, SE = as.vector(se) ) insight::text_remove_backticks(out, verbose = FALSE) } # p-Values from standard classes --------------------------------------------- #' @export p_value.numeric <- function(model, null = 0, ...) { # k_lt0 <- sum(model <= 0) # k_gt0 <- sum(model >= 0) # k <- 2 * min(k_lt0, k_gt0) # N <- length(model) # https://blogs.sas.com/content/iml/2011/11/02/how-to-compute-p-values-for-a-bootstrap-distribution.html # https://stats.stackexchange.com/a/28725/293056 x <- stats::na.omit(model) xM <- mean(x) x0 <- x - xM k <- sum(abs(x0) > abs(xM - null)) # two tailed p-value N <- length(x) (k + 1) / (N + 1) } #' @export p_value.data.frame <- function(model, ...) { model_data <- model[vapply(model, is.numeric, TRUE)] .data_frame( Parameter = names(model_data), p = vapply(model_data, p_value, 1) ) } #' @export p_value.list <- function(model, method = NULL, verbose = TRUE, ...) { if ("gam" %in% names(model)) { model <- model$gam class(model) <- c("gam", "lm", "glm") p_value(model, method = method) } else if (isTRUE(verbose)) { insight::format_warning("Could not extract p-values from model object.") } } parameters/R/datasets.R0000644000176200001440000000163114077467603014570 0ustar liggesusers#' @docType data #' @title Sample data set #' @name fish #' @keywords data #' #' @description A sample data set, used in tests and some examples. NULL #' @docType data #' @title Sample data set #' @name qol_cancer #' @keywords data #' #' @description A sample data set with longitudinal data, used in the vignette describing the `datawizard::demean()` function. Health-related quality of life from cancer-patients was measured at three time points (pre-surgery, 6 and 12 months after surgery). #' #' @format A data frame with 564 rows and 7 variables: #' \describe{ #' \item{ID}{Patient ID} #' \item{QoL}{Quality of Life Score} #' \item{time}{Timepoint of measurement} #' \item{age}{Age in years} #' \item{phq4}{Patients' Health Questionnaire, 4-item version} #' \item{hospital}{Hospital ID, where patient was treated} #' \item{education}{Patients' educational level} #' } NULL parameters/R/dof_kenward.R0000644000176200001440000000016215073732442015232 0ustar liggesusers#' @rdname p_value_kenward #' @export dof_kenward <- function(model) { insight::get_df(model, "kenward") } parameters/R/methods_mmrm.R0000644000176200001440000000530414761570351015447 0ustar liggesusers# model_parameters -------------------- #' @export model_parameters.mmrm <- function(model, ci = 0.95, ci_method = NULL, bootstrap = FALSE, iterations = 1000, standardize = NULL, exponentiate = FALSE, p_adjust = NULL, include_info = getOption("parameters_info", FALSE), keep = NULL, drop = NULL, verbose = TRUE, ...) { ci_method <- switch(model$method, Satterthwaite = "satterthwaite", "kenward" ) # extract model parameters table, as data frame out <- .model_parameters_generic( model = model, ci = ci, ci_method = ci_method, bootstrap = bootstrap, iterations = iterations, merge_by = "Parameter", standardize = standardize, exponentiate = exponentiate, p_adjust = p_adjust, include_info = include_info, keep_parameters = keep, drop_parameters = drop, vcov = NULL, vcov_args = NULL, verbose = verbose, ... ) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(model)) out } #' @export model_parameters.mmrm_fit <- model_parameters.mmrm #' @export model_parameters.mmrm_tmb <- model_parameters.mmrm # ci -------------------- #' @export ci.mmrm <- function(x, ci = 0.95, method = "residual", ...) { .ci_generic(model = x, ci = ci, method = "residual", ...) } #' @export ci.mmrm_fit <- ci.mmrm #' @export ci.mmrm_tmb <- ci.mmrm # p -------------------- #' @export p_value.mmrm <- function(model, dof = NULL, method = NULL, component = "all", vcov = NULL, vcov_args = NULL, verbose = TRUE, ...) { p_value.default( model, dof = NULL, method = NULL, component = "all", vcov = NULL, vcov_args = NULL, verbose = verbose, ... ) } #' @export p_value.mmrm_fit <- p_value.mmrm #' @export p_value.mmrm_tmb <- p_value.mmrm # SE -------------------- #' @export standard_error.mmrm <- function(model, ...) { se <- .get_se_from_summary(model) .data_frame(Parameter = names(se), SE = as.vector(se)) } #' @export standard_error.mmrm_fit <- standard_error.mmrm #' @export standard_error.mmrm_tmb <- standard_error.mmrm parameters/R/methods_gamlss.R0000644000176200001440000000124014507235543015757 0ustar liggesusers#################### .gamlss ------ #' @export model_parameters.gamlss <- model_parameters.gam #' @export standard_error.gamlss <- function(model, ...) { parms <- insight::get_parameters(model) utils::capture.output({ cs <- summary(model) }) .data_frame( Parameter = parms$Parameter, SE = as.vector(cs[, 2]), Component = parms$Component ) } #' @export p_value.gamlss <- function(model, ...) { parms <- insight::get_parameters(model) utils::capture.output({ cs <- summary(model) }) .data_frame( Parameter = parms$Parameter, p = as.vector(cs[, 4]), Component = parms$Component ) } parameters/R/methods_phylolm.R0000644000176200001440000000162314411001335016142 0ustar liggesusers# ci ----------------- #' @export ci.phylolm <- function(x, ci = 0.95, dof = NULL, method = "wald", verbose = TRUE, ...) { method <- match.arg(method, choices = c("wald", "residual", "normal", "boot")) if (method == "boot" && (is.null(x$boot) || x$boot <= 0)) { insight::format_warning( "Bootstrapped confidence intervals are not available", "Try re-fitting your model, using `boot = `, where `n` is the number of bootstrap replicates." ) method <- "wald" } if (method == "boot") { s <- stats::coef(summary(x)) out <- .data_frame( Parameter = row.names(s), CI_low = as.vector(s[, "lowerbootCI"]), CI_high = as.vector(s[, "upperbootCI"]) ) } else { out <- ci.default(x = x, ci = ci, dof = dof, method = method, verbose = verbose, ...) } row.names(out) <- NULL out } #' @export ci.phyloglm <- ci.phylolm parameters/R/methods_coxrobust.R0000644000176200001440000000102614355245205016520 0ustar liggesusers#' @export standard_error.coxr <- function(model, ...) { params <- insight::get_parameters(model) vc <- insight::get_varcov(model) .data_frame( Parameter = params$Parameter, SE = as.vector(sqrt(diag(vc))) ) } ## TODO add ci_method later? #' @export p_value.coxr <- function(model, ...) { stat <- insight::get_statistic(model) if (!is.null(stat)) { .data_frame( Parameter = stat$Parameter, p = as.vector(2 * stats::pnorm(abs(stat$Statistic), lower.tail = FALSE)) ) } } parameters/R/methods_glm.R0000644000176200001440000000006014214310473015236 0ustar liggesusers# classes: .glm #################### .glm parameters/R/methods_posterior.R0000644000176200001440000000561614736731407016536 0ustar liggesusers#' @export model_parameters.draws <- function(model, centrality = "median", dispersion = FALSE, ci = 0.95, ci_method = "eti", test = "pd", rope_range = "default", rope_ci = 0.95, exponentiate = FALSE, keep = NULL, drop = NULL, verbose = TRUE, ...) { out <- .posterior_draws_to_df(model) # Processing params <- .extract_parameters_bayesian( out, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, bf_prior = NULL, diagnostic = NULL, priors = FALSE, keep_parameters = keep, drop_parameters = drop, verbose = verbose, ... ) # exponentiate coefficients and SE/CI, if requested params <- .exponentiate_parameters(params, exponentiate = exponentiate) attr(params, "ci") <- ci attr(params, "object_name") <- insight::safe_deparse_symbol(substitute(model)) class(params) <- c("parameters_model", "see_parameters_model", class(params)) params } # Standard Errors --------------------------------------------- #' @export standard_error.draws <- function(model, verbose = TRUE, ...) { params <- .posterior_draws_to_df(model) .data_frame( Parameter = colnames(params), SE = unname(vapply(params, stats::sd, 1, na.rm = TRUE)) ) } # p-Values --------------------------------------------- #' @export p_value.draws <- function(model, ...) { params <- .posterior_draws_to_df(model) p <- bayestestR::p_direction(params) .data_frame( Parameter = .remove_backticks_from_string(p$Parameter), p = vapply(p$pd, bayestestR::convert_pd_to_p, 1) ) } # helper ------------------------------ .posterior_draws_to_df <- function(x) { UseMethod(".posterior_draws_to_df") } .posterior_draws_to_df.default <- function(x) { insight::format_error(sprintf("Objects of class `%s` are not yet supported.", class(x)[1])) } .posterior_draws_to_df.data.frame <- function(x) { x } .posterior_draws_to_df.draws_df <- function(x) { insight::check_if_installed("posterior") datawizard::data_remove(as.data.frame(posterior::as_draws_df(x)), c(".chain", ".iteration", ".draw")) } .posterior_draws_to_df.draws_matrix <- .posterior_draws_to_df.draws_df .posterior_draws_to_df.draws_array <- .posterior_draws_to_df.draws_df .posterior_draws_to_df.draws_list <- .posterior_draws_to_df.draws_df .posterior_draws_to_df.draws_rvars <- .posterior_draws_to_df.draws_df parameters/R/cluster_meta.R0000644000176200001440000001124714736731407015451 0ustar liggesusers#' Metaclustering #' #' One of the core "issue" of statistical clustering is that, in many cases, #' different methods will give different results. The **metaclustering** approach #' proposed by *easystats* (that finds echoes in *consensus clustering*; see Monti #' et al., 2003) consists of treating the unique clustering solutions as a ensemble, #' from which we can derive a probability matrix. This matrix contains, for each #' pair of observations, the probability of being in the same cluster. For instance, #' if the 6th and the 9th row of a dataframe has been assigned to a similar cluster #' by 5 our of 10 clustering methods, then its probability of being grouped together #' is 0.5. #' #' Metaclustering is based on the hypothesis that, as each clustering algorithm #' embodies a different prism by which it sees the data, running an infinite #' amount of algorithms would result in the emergence of the "true" clusters. #' As the number of algorithms and parameters is finite, the probabilistic #' perspective is a useful proxy. This method is interesting where there is no #' obvious reasons to prefer one over another clustering method, as well as to #' investigate how robust some clusters are under different algorithms. #' #' This metaclustering probability matrix can be transformed into a dissimilarity #' matrix (such as the one produced by `dist()`) and submitted for instance to #' hierarchical clustering (`hclust()`). See the example below. #' #' #' @param list_of_clusters A list of vectors with the clustering assignments from various methods. #' @param rownames An optional vector of row.names for the matrix. #' @param ... Currently not used. #' #' @return A matrix containing all the pairwise (between each observation) #' probabilities of being clustered together by the methods. #' #' #' @examples #' \donttest{ #' data <- iris[1:4] #' #' rez1 <- cluster_analysis(data, n = 2, method = "kmeans") #' rez2 <- cluster_analysis(data, n = 3, method = "kmeans") #' rez3 <- cluster_analysis(data, n = 6, method = "kmeans") #' #' list_of_clusters <- list(rez1, rez2, rez3) #' #' m <- cluster_meta(list_of_clusters) #' #' # Visualize matrix without reordering #' heatmap(m, Rowv = NA, Colv = NA, scale = "none") # Without reordering #' # Reordered heatmap #' heatmap(m, scale = "none") #' #' # Extract 3 clusters #' predict(m, n = 3) #' #' # Convert to dissimilarity #' d <- as.dist(abs(m - 1)) #' model <- hclust(d) #' plot(model, hang = -1) #' } #' @export cluster_meta <- function(list_of_clusters, rownames = NULL, ...) { x <- list() # Sanitize output for (i in seq_along(list_of_clusters)) { # Get name name <- names(list_of_clusters[i]) if (is.null(name)) name <- paste0("Solution", i) solution <- list_of_clusters[[i]] if (inherits(solution, "cluster_analysis")) { if (name == paste0("Solution", i)) { name <- paste0(name, "_", attributes(solution)$method) } solution <- stats::predict(solution, ...) } solution[solution == "0"] <- NA x[[name]] <- solution } # validation check if (length(unique(lengths(x))) != 1) { insight::format_error("The clustering solutions are not of equal lengths.") } # Convert to dataframe cluster_data <- as.data.frame(x) if (!is.null(names(solution))) row.names(cluster_data) <- names(solution) if (!is.null(rownames)) row.names(cluster_data) <- rownames # Get probability matrix m <- .cluster_meta_matrix(cluster_data) class(m) <- c("cluster_meta", class(m)) m } #' @keywords internal .cluster_meta_matrix <- function(data) { # Internal function .get_prob <- function(x) { if (anyNA(x)) { NA } else if (length(unique(x[!is.na(x)])) == 1) { 0 } else { 1 } } # Initialize matrix m <- matrix(data = NA, nrow = nrow(data), ncol = nrow(data), dimnames = list(rev(row.names(data)), row.names(data))) for (row in row.names(m)) { for (col in colnames(m)) { if (row == col) { m[row, col] <- 0 next } subset_rows <- data[row.names(data) %in% c(row, col), ] rez <- sapply(subset_rows[2:ncol(subset_rows)], .get_prob) m[row, col] <- sum(rez, na.rm = TRUE) / length(stats::na.omit(rez)) } } m } # Methods ---------------------------------------------------------------- #' @export #' @inheritParams stats::predict predict.cluster_meta <- function(object, n = NULL, ...) { if (is.null(n)) { insight::format_error("The number of clusters to extract `n` must be entered.") } d <- stats::as.dist(abs(object - 1)) model <- stats::hclust(d) stats::cutree(model, k = n) } parameters/R/group_level_total.R0000644000176200001440000001756315003670050016477 0ustar liggesusers.group_level_total <- function(x, ...) { UseMethod(".group_level_total") } .group_level_total.glmmTMB <- function(x, ...) { params <- suppressWarnings(insight::compact_list(stats::coef(x))) params_cond <- params$cond params_zi <- params$zi # handle random effects in conditional component if (!is.null(params_cond)) { # extract levels of group factors group_levels <- insight::compact_list(lapply( x$modelInfo$reTrms$cond$flist, levels )) # extract names of slopes slope_names <- insight::compact_list(x$modelInfo$reTrms$cond$cnms) # reshape "coef()" data params_cond <- .reshape_group_level_coefficients( x, params = params_cond, group_levels = group_levels, slope_names = slope_names ) params_cond$Component <- "conditional" } # handle random effects in zero-inflation component if (!is.null(params_zi)) { # extract levels of group factors group_levels <- insight::compact_list(lapply( x$modelInfo$reTrms$zi$flist, levels )) # extract names of slopes slope_names <- insight::compact_list(x$modelInfo$reTrms$zi$cnms) # reshape "coef()" data params_zi <- .reshape_group_level_coefficients( x, params = params_zi, group_levels = group_levels, slope_names = slope_names, component = "zero_inflated_random" ) params_zi$Component <- "zero_inflated" } # create list of data frames out <- insight::compact_list(list(params_cond, params_zi)) if (length(out) == 1) { # unlist if only one component out <- out[[1]] } else { # else, join - we can't use rbind() here, because column # names do not necessarily match out <- datawizard::data_join(out, join = "bind") } rownames(out) <- NULL out } .group_level_total.merMod <- function(x, ...) { params <- suppressWarnings(stats::coef(x)) # extract levels of group factors group_levels <- insight::compact_list(lapply(methods::slot(x, "flist"), levels)) # extract names of slopes slope_names <- insight::compact_list(methods::slot(x, "cnms")) # reshape "coef()" data params <- .reshape_group_level_coefficients( x, params = params, group_levels = group_levels, slope_names = slope_names ) params } .group_level_total.stanreg <- function(x, ...) { params <- suppressWarnings(stats::coef(x)) # extract levels of group factors group_levels <- insight::compact_list(lapply(x$glmod$reTrms$flist, levels)) # extract names of slopes slope_names <- insight::compact_list(x$glmod$reTrms$cnms) # reshape "coef()" data params <- .reshape_group_level_coefficients( x, params = params, group_levels = group_levels, slope_names = slope_names ) params } .group_level_total.brmsfit <- function(x, ...) { # extract random effects information group_factors <- insight::find_random(x, split_nested = TRUE, flatten = TRUE) random_slopes <- insight::find_random_slopes(x) params <- NULL # extract coefficients - only once, not in the loop model_coefficients <- stats::coef(x, summary = FALSE) # create full data frame of all random effects retrieved from coef() params <- do.call( rbind, lapply(group_factors, function(i) { # we want the posterior distribution from coef(), so we can # use bayestestR ranef <- model_coefficients[[i]] parameter_names <- dimnames(ranef)[[3]] out <- lapply( parameter_names, function(pn) { # summary of posterior d <- bayestestR::describe_posterior(as.data.frame(ranef[, , pn]), verbose = FALSE, ...) # add information about group factor and levels d$Group <- i # Parameters in the returned data frame are actually the levels # # from the group factors d$Level <- d$Parameter # the parameter names can be taken from dimnames d$Parameter <- pn d } ) names(out) <- parameter_names do.call(rbind, out) }) ) # select parameters to keep. We want all intercepts, and all random slopes components <- insight::find_auxiliary(x, verbose = FALSE) # standard components parameters_to_keep <- params$Parameter %in% c("Intercept", random_slopes$random) parameters_to_keep <- parameters_to_keep | params$Parameter %in% c("zi_Intercept", random_slopes$zero_inflated_random) # auxiliary components for (comp in components) { parameters_to_keep <- parameters_to_keep | params$Parameter %in% c(paste0(comp, "_Intercept"), random_slopes[[paste0(comp, "_random")]]) } # furthermore, categorical random slopes have levels in their name, so we # try to find those parameters here, too if (!is.null(random_slopes$random)) { parameters_to_keep <- parameters_to_keep | startsWith(params$Parameter, random_slopes$random) } if (!is.null(random_slopes$zero_inflated_random)) { parameters_to_keep <- parameters_to_keep | startsWith(params$Parameter, paste0("zi_", random_slopes$zero_inflated_random)) } # auxiliary components for (comp in components) { rc <- paste0(comp, "_random") if (!is.null(random_slopes[[rc]])) { parameters_to_keep <- parameters_to_keep | startsWith(params$Parameter, paste0(comp, "_", random_slopes[[rc]])) } } # add Component column params$Component <- "conditional" params$Component[startsWith(params$Parameter, "zi_")] <- "zero_inflated" for (comp in components) { params$Component[startsWith(params$Parameter, paste0(comp, "_"))] <- comp } # clean names params$Parameter <- gsub("^zi_", "", params$Parameter) for (comp in components) { params$Parameter <- gsub(paste0("^", comp, "_"), "", params$Parameter) } rownames(params) <- NULL # make sure first columns are group and level datawizard::data_relocate(params[parameters_to_keep, ], c("Group", "Level")) } # helper ---------------------------------------------------------------------- .reshape_group_level_coefficients <- function(x, params, group_levels, slope_names = NULL, component = "random") { group_factors <- insight::find_random(x) random_slopes <- insight::find_random_slopes(x) # find all columns for which we can add fixed and random effects cols <- c(random_slopes[[component]], "(Intercept)") # iterate all random effects, add group name and levels for (i in group_factors[[component]]) { # overwrite cols? if random slopes are factors, the names are # not the variable names, but name + factor level, so we need # to upate the columns to select here if (!is.null(slope_names) && length(slope_names)) { cols <- slope_names[[i]] } # select columns params[[i]] <- params[[i]][cols] # add information about group factor and levels params[[i]]$Group <- i params[[i]]$Level <- group_levels[[i]] } # if only one component, unlist if (length(params) == 1) { out <- params[[1]] } else { # else, join - we can't use rbind() here, because column # names do not necessarily match class(params) <- "list" out <- datawizard::data_join(params, join = "bind") } # reshape to_reshape <- setdiff(colnames(out), c("Group", "Level")) out <- datawizard::reshape_longer(out, select = to_reshape) # rename out <- datawizard::data_rename( out, select = c(Parameter = "name", Coefficient = "value") ) # make sure first columns are group and level out <- datawizard::data_relocate(out, c("Group", "Level")) # remove those without valid values out[stats::complete.cases(out), ] } parameters/R/methods_kmeans.R0000644000176200001440000000576014736731407015766 0ustar liggesusers#' @export model_parameters.kmeans <- function(model, ...) { params <- cbind( data.frame( Cluster = row.names(model$centers), n_Obs = model$size, Sum_Squares = model$withinss, stringsAsFactors = FALSE ), model$centers ) # Long means means <- datawizard::reshape_longer(params, select = 4:ncol(params), values_to = "Mean", names_to = "Variable" ) # Attributes attr(params, "variance") <- model$betweenss / model$totss attr(params, "Sum_Squares_Between") <- model$betweenss attr(params, "Sum_Squares_Total") <- model$totss attr(params, "means") <- means attr(params, "model") <- model attr(params, "iterations") <- model$iter attr(params, "scores") <- model$cluster attr(params, "type") <- "kmeans" class(params) <- c("parameters_clusters", class(params)) params } # factoextra::hkmeans ----------------------------------------------------- #' @export model_parameters.hkmeans <- model_parameters.kmeans # Methods ------------------------------------------------------------------- #' @export print.parameters_clusters <- function(x, digits = 2, ...) { clusterHeading <- "# Clustering Solution" if ("title" %in% attributes(x)) { clusterHeading <- attributes(x)$title } insight::print_color(clusterHeading, "blue") cat("\n\n") insight::print_colour(.text_components_variance(x), "yellow") cat("\n\n") cat(insight::export_table(x, digits = digits, ...)) invisible(x) } # Predict ----------------------------------------------------------------- #' Predict method for parameters_clusters objects #' #' @export #' @param names character vector or list #' @param newdata data.frame #' @inheritParams stats::predict predict.parameters_clusters <- function(object, newdata = NULL, names = NULL, ...) { if (is.null(newdata)) { out <- attributes(object)$scores } else { out <- stats::predict(attributes(object)$model, newdata = newdata, ...) } # Add labels if (!is.null(names)) { # List if (is.list(names)) { out <- as.factor(out) for (i in names(names)) { levels(out)[levels(out) == i] <- names[[i]] } # Vector } else if (is.character(names)) { out <- names[as.numeric(out)] } else { insight::format_error("`names` must be a character vector or a list.") } out <- as.character(out) } out } #' @export #' @inheritParams stats::predict predict.kmeans <- function(object, newdata = NULL, ...) { if (is.null(newdata)) { return(object$cluster) } # compute squared euclidean distance from each sample to each cluster center centers <- object$centers sumsquares_by_center <- apply(centers, 1, function(x) { colSums((t(newdata) - x)^2) }) if (is.null(nrow(sumsquares_by_center))) { as.vector(which.min(sumsquares_by_center)) } else { as.vector(apply(as.data.frame(sumsquares_by_center), 1, which.min)) } } parameters/R/methods_glmgee.R0000644000176200001440000000334514716604200015731 0ustar liggesusers#' @export standard_error.glmgee <- function(model, vcov = c("robust", "df-adjusted", "model", "bias-corrected", "jackknife"), verbose = TRUE, ...) { vcov <- match.arg(vcov) se <- NULL .vcov <- insight::get_varcov( model, vcov = vcov, verbose = verbose, ... ) se <- sqrt(diag(.vcov)) .data_frame(Parameter = names(se), SE = as.vector(se)) } #' @export p_value.glmgee <- function(model, method = NULL, vcov = c("robust", "df-adjusted", "model", "bias-corrected", "jackknife"), ...) { vcov <- match.arg(vcov) est <- insight::get_parameters(model, component = "conditional") se <- standard_error(model, vcov = vcov, verbose = FALSE) if (is.null(method)) { method <- "wald" } p <- 2 * stats::pt( abs(est$Estimate / se$SE), df = insight::get_df(x = model, type = method), lower.tail = FALSE ) .data_frame( Parameter = est$Parameter, p = as.vector(p) ) } #' @export ci.glmgee <- function(x, ci = 0.95, dof = NULL, method = NULL, vcov = c("robust", "df-adjusted", "model", "bias-corrected", "jackknife"), verbose = TRUE, ...) { vcov <- match.arg(vcov) out <- .ci_generic( model = x, ci = ci, dof = dof, method = method, vcov = vcov, vcov_args = NULL, component = "conditional", verbose = verbose ) # Return the CI bounds as a data frame. row.names(out) <- NULL out } parameters/R/methods_varest.R0000644000176200001440000000636314736731407016014 0ustar liggesusers# .varest #' @export model_parameters.varest <- function(model, ci = 0.95, bootstrap = FALSE, iterations = 1000, standardize = NULL, exponentiate = FALSE, p_adjust = NULL, verbose = TRUE, ...) { params <- lapply(names(model$varresult), function(i) { out <- model_parameters( model = model$varresult[[i]], ci = ci, bootstrap = bootstrap, iterations = iterations, standardize = standardize, exponentiate = exponentiate, p_adjust = p_adjust, verbose = verbose, ... ) out$Group <- paste0("Equation ", i) out }) params <- do.call(rbind, params) attr(params, "object_name") <- insight::safe_deparse_symbol(substitute(model)) params } #' @export ci.varest <- function(x, ci = 0.95, method = NULL, ...) { params <- lapply(names(x$varresult), function(i) { out <- ci(x = x$varresult[[i]], ci = ci, method = method, ...) out$Group <- paste0("Equation ", i) out }) do.call(rbind, params) } #' @export standard_error.varest <- function(model, method = NULL, ...) { params <- lapply(names(model$varresult), function(i) { out <- standard_error(model = model$varresult[[i]], method = method, ...) out$Group <- paste0("Equation ", i) out }) do.call(rbind, params) } #' @export p_value.varest <- function(model, ...) { params <- lapply(names(model$varresult), function(i) { out <- p_value(model = model$varresult[[i]], ...) out$Group <- paste0("Equation ", i) out }) do.call(rbind, params) } #' @export simulate_model.varest <- function(model, iterations = 1000, ...) { out <- lapply(names(model$varresult), function(i) { simulate_model(model = model$varresult[[i]], iterations = iterations, ...) }) names(out) <- paste0("Equation ", names(model$varresult)) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(model)) out } #' @export simulate_parameters.varest <- function(model, iterations = 1000, centrality = "median", ci = 0.95, ci_method = "quantile", test = "p-value", ...) { data <- simulate_model(model, iterations = iterations, ...) out <- lapply(names(data), function(i) { x <- .summary_bootstrap( data = data[[i]], test = test, centrality = centrality, ci = ci, ci_method = ci_method, ... ) x$Group <- i x }) out <- do.call(rbind, out) class(out) <- c("parameters_simulate", "see_parameters_simulate", class(out)) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(model)) attr(out, "iterations") <- iterations attr(out, "ci") <- ci attr(out, "ci_method") <- ci_method attr(out, "centrality") <- centrality out } parameters/R/principal_components.R0000644000176200001440000005036315030725674017207 0ustar liggesusers#' Principal Component Analysis (PCA) and Factor Analysis (FA) #' #' The functions `principal_components()` and `factor_analysis()` can be used to #' perform a principal component analysis (PCA) or a factor analysis (FA). They #' return the loadings as a data frame, and various methods and functions are #' available to access / display other information (see the 'Details' section). #' #' @param x A data frame or a statistical model. For `closest_component()`, the #' output of the `principal_components()` function. #' @param n Number of components to extract. If `n="all"`, then `n` is set as #' the number of variables minus 1 (`ncol(x)-1`). If `n="auto"` (default) or #' `n=NULL`, the number of components is selected through [`n_factors()`] #' resp. [`n_components()`]. Else, if `n` is a number, `n` components are #' extracted. If `n` exceeds number of variables in the data, it is #' automatically set to the maximum number (i.e. `ncol(x)`). In #' [`reduce_parameters()`], can also be `"max"`, in which case it will select #' all the components that are maximally pseudo-loaded (i.e., correlated) by #' at least one variable. #' @param rotation If not `"none"`, the PCA / FA will be computed using the #' **psych** package. Possible options include `"varimax"`, `"quartimax"`, #' `"promax"`, `"oblimin"`, `"simplimax"`, or `"cluster"` (and more). See #' [`psych::fa()`] for details. The default is `"none"` for PCA, and #' `"oblimin"` for FA. #' @param factor_method The factoring method to be used. Passed to the `fm` #' argument in `psych::fa()`. Defaults to `"minres"` (minimum residual). Other #' options include `"uls"`, `"ols"`, `"wls"`, `"gls"`, `"ml"`, `"minchi"`, #' `"minrank"`, `"old.min"`, and `"alpha"`. See `?psych::fa` for details. #' @param sparse Whether to compute sparse PCA (SPCA, using [`sparsepca::spca()`]). #' SPCA attempts to find sparse loadings (with few nonzero values), which improves #' interpretability and avoids overfitting. Can be `TRUE` or `"robust"` (see #' [`sparsepca::robspca()`]). #' @param sort Sort the loadings. #' @param n_obs An integer or a matrix. #' - **Integer:** Number of observations in the original data set if `x` is a #' correlation matrix. Required to compute correct fit indices. #' - **Matrix:** A matrix where each cell `[i, j]` specifies the number of #' pairwise complete observations used to compute the correlation between #' variable `i` and variable `j` in the input `x`. It is crucial when `x` is #' a correlation matrix (rather than raw data), especially if that matrix #' was derived from a dataset containing missing values using pairwise #' deletion. Providing a matrix allows `psych::fa()` to accurately calculate #' statistical measures, such as chi-square fit statistics, by accounting #' for the varying sample sizes that contribute to each individual #' correlation coefficient. #' @param threshold A value between 0 and 1 indicates which (absolute) values #' from the loadings should be removed. An integer higher than 1 indicates the #' n strongest loadings to retain. Can also be `"max"`, in which case it will #' only display the maximum loading per variable (the most simple structure). #' @param standardize A logical value indicating whether the variables should be #' standardized (centered and scaled) to have unit variance before the #' analysis (in general, such scaling is advisable). **Note:** This defaults #' to `TRUE` for PCA, but to `FALSE` for FA (because `factor_analysis()` #' computes a correlation matrix and uses that r-matrix for the factor analysis #' by default - therefore, standardization of the raw variables is unnecessary, #' and even undesirable when using `cor = "poly"`). #' @param object An object of class `parameters_pca`, `parameters_efa` or #' `psych_efa`. #' @param newdata An optional data frame in which to look for variables with #' which to predict. If omitted, the fitted values are used. #' @param names Optional character vector to name columns of the returned data #' frame. #' @param keep_na Logical, if `TRUE`, predictions also return observations #' with missing values from the original data, hence the number of rows of #' predicted data and original data is equal. #' @param ... Arguments passed to or from other methods. #' @param digits Argument for `print()`, indicates the number of digits #' (rounding) to be used. #' @param labels Argument for `print()`, character vector of same length as #' columns in `x`. If provided, adds an additional column with the labels. #' @param verbose Toggle warnings. #' #' @details #' #' ## Methods and Utilities #' - [`n_components()`] and [`n_factors()`] automatically estimates the optimal #' number of dimensions to retain. #' #' - [`performance::check_factorstructure()`] checks the suitability of the #' data for factor analysis using the sphericity (see #' [`performance::check_sphericity_bartlett()`]) and the KMO (see #' [`performance::check_kmo()`]) measure. #' #' - [`performance::check_itemscale()`] computes various measures of internal #' consistencies applied to the (sub)scales (i.e., components) extracted from #' the PCA. #' #' - Running `summary()` returns information related to each component/factor, #' such as the explained variance and the Eivenvalues. #' #' - Running [`get_scores()`] computes scores for each subscale. #' #' - [`factor_scores()`] extracts the factor scores from objects returned by #' [`psych::fa()`], [`factor_analysis()`], or [`psych::omega()`]. #' #' - Running [`closest_component()`] will return a numeric vector with the #' assigned component index for each column from the original data frame. #' #' - Running [`rotated_data()`] will return the rotated data, including missing #' values, so it matches the original data frame. #' #' - `performance::item_omega()` is a convenient wrapper around `psych::omega()`, #' which provides some additional methods to work seamlessly within the #' *easystats* framework. #' #' - [`performance::check_normality()`] checks residuals from objects returned #' by [`psych::fa()`], [`factor_analysis()`], `performance::item_omega()`, #' or [`psych::omega()`] for normality. #' #' - [`performance::model_performance()`] returns fit-indices for objects returned #' by [`psych::fa()`], [`factor_analysis()`], or [`psych::omega()`]. #' #' - Running #' [`plot()`](https://easystats.github.io/see/articles/parameters.html#principal-component-analysis) #' visually displays the loadings (that requires the #' [**see**-package](https://easystats.github.io/see/) to work). #' #' ## Complexity #' Complexity represents the number of latent components needed to account #' for the observed variables. Whereas a perfect simple structure solution #' has a complexity of 1 in that each item would only load on one factor, #' a solution with evenly distributed items has a complexity greater than 1 #' (_Hofman, 1978; Pettersson and Turkheimer, 2010_). #' #' ## Uniqueness #' Uniqueness represents the variance that is 'unique' to the variable and #' not shared with other variables. It is equal to `1 - communality` #' (variance that is shared with other variables). A uniqueness of `0.20` #' suggests that `20%` or that variable's variance is not shared with other #' variables in the overall factor model. The greater 'uniqueness' the lower #' the relevance of the variable in the factor model. #' #' ## MSA #' MSA represents the Kaiser-Meyer-Olkin Measure of Sampling Adequacy #' (_Kaiser and Rice, 1974_) for each item. It indicates whether there is #' enough data for each factor give reliable results for the PCA. The value #' should be > 0.6, and desirable values are > 0.8 (_Tabachnick and Fidell, 2013_). #' #' ## PCA or FA? #' There is a simplified rule of thumb that may help do decide whether to run #' a factor analysis or a principal component analysis: #' #' - Run *factor analysis* if you assume or wish to test a theoretical model of #' *latent factors* causing observed variables. #' #' - Run *principal component analysis* If you want to simply *reduce* your #' correlated observed variables to a smaller set of important independent #' composite variables. #' #' (Source: [CrossValidated](https://stats.stackexchange.com/q/1576/54740)) #' #' ## Computing Item Scores #' Use [`get_scores()`] to compute scores for the "subscales" represented by the #' extracted principal components or factors. `get_scores()` takes the results #' from `principal_components()` or `factor_analysis()` and extracts the #' variables for each component found by the PCA. Then, for each of these #' "subscales", raw means are calculated (which equals adding up the single #' items and dividing by the number of items). This results in a sum score for #' each component from the PCA, which is on the same scale as the original, #' single items that were used to compute the PCA. One can also use `predict()` #' to back-predict scores for each component, to which one can provide `newdata` #' or a vector of `names` for the components. #' #' ## Explained Variance and Eingenvalues #' Use `summary()` to get the Eigenvalues and the explained variance for each #' extracted component. The eigenvectors and eigenvalues represent the "core" #' of a PCA: The eigenvectors (the principal components) determine the #' directions of the new feature space, and the eigenvalues determine their #' magnitude. In other words, the eigenvalues explain the variance of the #' data along the new feature axes. #' #' @return A data frame of loadings. For `factor_analysis()`, this data frame is #' also of class `parameters_efa()`. Objects from `principal_components()` are #' of class `parameters_pca()`. #' #' @references #' - Kaiser, H.F. and Rice. J. (1974). Little jiffy, mark iv. Educational #' and Psychological Measurement, 34(1):111–117 #' #' - Hofmann, R. (1978). Complexity and simplicity as objective indices #' descriptive of factor solutions. Multivariate Behavioral Research, 13:2, #' 247-250, \doi{10.1207/s15327906mbr1302_9} #' #' - Pettersson, E., & Turkheimer, E. (2010). Item selection, evaluation, #' and simple structure in personality data. Journal of research in #' personality, 44(4), 407-420, \doi{10.1016/j.jrp.2010.03.002} #' #' - Tabachnick, B. G., and Fidell, L. S. (2013). Using multivariate #' statistics (6th ed.). Boston: Pearson Education. #' #' @examplesIf require("nFactors", quietly = TRUE) && require("sparsepca", quietly = TRUE) && require("psych", quietly = TRUE) #' library(parameters) #' #' \donttest{ #' # Principal Component Analysis (PCA) ------------------- #' principal_components(mtcars[, 1:7], n = "all", threshold = 0.2) #' #' # Automated number of components #' principal_components(mtcars[, 1:4], n = "auto") #' #' # labels can be useful if variable names are not self-explanatory #' print( #' principal_components(mtcars[, 1:4], n = "auto"), #' labels = c( #' "Miles/(US) gallon", #' "Number of cylinders", #' "Displacement (cu.in.)", #' "Gross horsepower" #' ) #' ) #' #' # Sparse PCA #' principal_components(mtcars[, 1:7], n = 4, sparse = TRUE) #' principal_components(mtcars[, 1:7], n = 4, sparse = "robust") #' #' # Rotated PCA #' principal_components(mtcars[, 1:7], #' n = 2, rotation = "oblimin", #' threshold = "max", sort = TRUE #' ) #' principal_components(mtcars[, 1:7], n = 2, threshold = 2, sort = TRUE) #' #' pca <- principal_components(mtcars[, 1:5], n = 2, rotation = "varimax") #' pca # Print loadings #' summary(pca) # Print information about the factors #' predict(pca, names = c("Component1", "Component2")) # Back-predict scores #' #' # which variables from the original data belong to which extracted component? #' closest_component(pca) #' } #' #' # Factor Analysis (FA) ------------------------ #' #' factor_analysis(mtcars[, 1:7], n = "all", threshold = 0.2, rotation = "Promax") #' factor_analysis(mtcars[, 1:7], n = 2, threshold = "max", sort = TRUE) #' factor_analysis(mtcars[, 1:7], n = 2, rotation = "none", threshold = 2, sort = TRUE) #' #' efa <- factor_analysis(mtcars[, 1:5], n = 2) #' summary(efa) #' predict(efa, verbose = FALSE) #' #' \donttest{ #' # Automated number of components #' factor_analysis(mtcars[, 1:4], n = "auto") #' } #' #' @export principal_components <- function(x, ...) { UseMethod("principal_components") } #' @rdname principal_components #' @export rotated_data <- function(x, verbose = TRUE) { original_data <- attributes(x)$dataset rotated_matrix <- insight::get_predicted(attributes(x)$model) out <- NULL if (is.null(original_data) || is.null(rotated_matrix)) { if (verbose) { insight::format_warning("Either the original or the rotated data could not be retrieved.") } return(NULL) } compl_cases <- attributes(x)$complete_cases if (is.null(compl_cases) && nrow(original_data) != nrow(rotated_matrix)) { if (verbose) { insight::format_warning("Could not retrieve information about missing data.") } return(NULL) } original_data$.parameters_merge_id <- seq_len(nrow(original_data)) rotated_matrix$.parameters_merge_id <- (seq_len(nrow(original_data)))[compl_cases] out <- merge(original_data, rotated_matrix, by = ".parameters_merge_id", all = TRUE, sort = FALSE) out$.parameters_merge_id <- NULL out } #' @rdname principal_components #' @export principal_components.data.frame <- function(x, n = "auto", rotation = "none", sparse = FALSE, sort = FALSE, threshold = NULL, standardize = TRUE, ...) { # save name of data set data_name <- insight::safe_deparse_symbol(substitute(x)) # original data original_data <- x # remove missing x <- stats::na.omit(x) # Select numeric only x <- x[vapply(x, is.numeric, TRUE)] # N factors n <- .get_n_factors(x, n = n, type = "PCA", rotation = rotation) # Catch and compute Rotated PCA if (rotation != "none") { if (sparse) { insight::format_error("Sparse PCA is currently incompatible with rotation. Use either `sparse=TRUE` or `rotation`.") } pca_loadings <- .pca_rotate( x, n, rotation = rotation, sort = sort, threshold = threshold, original_data = original_data, ... ) attr(pca_loadings, "data") <- data_name attr(pca_loadings, "dataset") <- original_data return(pca_loadings) } # Compute PCA if (is.character(sparse) && sparse == "robust") { # Robust sparse PCA insight::check_if_installed("sparsepca") model <- sparsepca::robspca( x, center = standardize, scale = standardize, verbose = FALSE, ... ) model$rotation <- model$loadings row.names(model$rotation) <- names(x) model$x <- model$scores } else if (isTRUE(sparse)) { # Sparse PCA insight::check_if_installed("sparsepca") model <- sparsepca::spca( x, center = standardize, scale = standardize, verbose = FALSE, ... ) model$rotation <- stats::setNames(model$loadings, names(x)) row.names(model$rotation) <- names(x) model$x <- model$scores } else { # Normal PCA model <- stats::prcomp(x, retx = TRUE, center = standardize, scale. = standardize, ... ) } # Re-add centers and scales # if (standardize) { # model$center <- attributes(x)$center # model$scale <- attributes(x)$scale # } # Summary (cumulative variance etc.) eigenvalues <- model$sdev^2 data_summary <- .data_frame( Component = sprintf("PC%i", seq_len(length(model$sdev))), Eigenvalues = eigenvalues, Variance = eigenvalues / sum(eigenvalues), Variance_Cumulative = cumsum(eigenvalues / sum(eigenvalues)) ) data_summary$Variance_Proportion <- data_summary$Variance / sum(data_summary$Variance) # Sometimes if too large n is requested the returned number is lower, so we # have to adjust n to the new number n <- pmin(sum(!is.na(model$sdev)), n) model$sdev <- model$sdev[1:n] model$rotation <- model$rotation[, 1:n, drop = FALSE] model$x <- model$x[, 1:n, drop = FALSE] data_summary <- data_summary[1:n, , drop = FALSE] # Compute loadings if (length(model$sdev) > 1) { pca_loadings <- as.data.frame(model$rotation %*% diag(model$sdev)) } else { pca_loadings <- as.data.frame(model$rotation %*% model$sdev) } names(pca_loadings) <- data_summary$Component # Format pca_loadings <- cbind(data.frame(Variable = row.names(pca_loadings)), pca_loadings) row.names(pca_loadings) <- NULL # Add information loading_cols <- 2:(n + 1) pca_loadings$Complexity <- (apply(pca_loadings[, loading_cols, drop = FALSE], 1, function(x) sum(x^2)))^2 / apply(pca_loadings[, loading_cols, drop = FALSE], 1, function(x) sum(x^4)) # Add attributes attr(pca_loadings, "summary") <- data_summary attr(pca_loadings, "model") <- model attr(pca_loadings, "rotation") <- "none" attr(pca_loadings, "scores") <- model$x attr(pca_loadings, "standardize") <- standardize attr(pca_loadings, "additional_arguments") <- list(...) attr(pca_loadings, "n") <- n attr(pca_loadings, "type") <- "prcomp" attr(pca_loadings, "loadings_columns") <- loading_cols attr(pca_loadings, "complete_cases") <- stats::complete.cases(original_data) # Sorting if (isTRUE(sort)) { pca_loadings <- .sort_loadings(pca_loadings) } # Replace by NA all cells below threshold if (!is.null(threshold)) { pca_loadings <- .filter_loadings(pca_loadings, threshold = threshold) } # Add some more attributes attr(pca_loadings, "loadings_long") <- .long_loadings(pca_loadings, threshold = threshold) # here we match the original columns in the data set with the assigned components # for each variable, so we know which column in the original data set belongs # to which extracted component... attr(pca_loadings, "closest_component") <- .closest_component( pca_loadings, loadings_columns = loading_cols, variable_names = colnames(x) ) attr(pca_loadings, "data") <- data_name attr(pca_loadings, "dataset") <- original_data # add class-attribute for printing class(pca_loadings) <- unique(c("parameters_pca", "see_parameters_pca", class(pca_loadings))) pca_loadings } #' @keywords internal .get_n_factors <- function(x, n = NULL, type = "PCA", rotation = "varimax", ...) { # N factors if (is.null(n) || n == "auto") { n <- as.numeric(n_factors(x, type = type, rotation = rotation, ...)) } else if (n == "all") { n <- ncol(x) - 1 } else if (n >= ncol(x)) { n <- ncol(x) } else if (n < 1) { n <- 1 } n } #' @keywords internal .pca_rotate <- function(x, n, rotation, sort = FALSE, threshold = NULL, original_data = NULL, ...) { rotation <- insight::validate_argument( rotation, c("varimax", "quartimax", "promax", "oblimin", "simplimax", "cluster", "none") ) if (!inherits(x, c("prcomp", "data.frame"))) { insight::format_error("`x` must be of class `prcomp` or a data frame.") } if (!inherits(x, "data.frame") && rotation != "varimax") { insight::format_error(sprintf("`x` must be a data frame for `%s`-rotation.", rotation)) } # rotate loadings insight::check_if_installed("psych", reason = sprintf("`%s`-rotation.", rotation)) pca <- psych::principal(x, nfactors = n, rotate = rotation, ...) msa <- psych::KMO(x) attr(pca, "MSA") <- msa$MSAi out <- model_parameters(pca, sort = sort, threshold = threshold) attr(out, "dataset") <- original_data attr(out, "complete_cases") <- stats::complete.cases(original_data) out } parameters/R/methods_bfsl.R0000644000176200001440000000217114761570351015424 0ustar liggesusers#' @export model_parameters.bfsl <- function(model, ci = 0.95, ci_method = "residual", p_adjust = NULL, include_info = getOption("parameters_info", FALSE), keep = NULL, drop = NULL, verbose = TRUE, ...) { out <- .model_parameters_generic( model = model, ci = ci, ci_method = ci_method, merge_by = "Parameter", p_adjust = p_adjust, keep_parameters = keep, drop_parameters = drop, include_info = include_info, ... ) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(model)) out } #' @export standard_error.bfsl <- function(model, ...) { cf <- stats::coef(model) params <- data.frame( Parameter = rownames(cf), SE = unname(cf[, "Std. Error"]), stringsAsFactors = FALSE, row.names = NULL ) insight::text_remove_backticks(params, verbose = FALSE) } parameters/R/methods_flexsurvreg.R0000644000176200001440000000124714716604200017044 0ustar liggesusers#' @export standard_error.flexsurvreg <- function(model, ...) { params <- insight::find_parameters(model, flatten = TRUE) se <- model$res[rownames(model$res) %in% params, "se"] .data_frame( Parameter = .remove_backticks_from_string(names(se)), SE = as.vector(se) ) } ## TODO add ci_method later? #' @export p_value.flexsurvreg <- function(model, ...) { params <- insight::get_parameters(model) est <- params$Estimate se <- standard_error(model)$SE p <- 2 * stats::pt(abs(est / se), df = insight::get_df(x = model, type = "wald"), lower.tail = FALSE) .data_frame( Parameter = params$Parameter, p = as.vector(p) ) } parameters/R/methods_nestedLogit.R0000644000176200001440000002105314761570351016757 0ustar liggesusers#' @export model_parameters.nestedLogit <- function(model, ci = 0.95, ci_method = NULL, component = "all", bootstrap = FALSE, iterations = 1000, standardize = NULL, exponentiate = FALSE, p_adjust = NULL, vcov = NULL, vcov_args = NULL, include_info = getOption("parameters_info", FALSE), keep = NULL, drop = NULL, verbose = TRUE, ...) { dots <- list(...) # set default if (is.null(ci_method)) { if (isTRUE(bootstrap)) { ci_method <- "quantile" } else if (!is.null(vcov) || !is.null(vcov_args)) { ci_method <- "wald" } else { ci_method <- "profile" } } # "component" might be set to "conditional", when called from "compare_parameters()" # set to "all" here. if (identical(component, "conditional")) { component <- "all" } # profiled CIs may take a long time to compute, so we warn the user about it if (any(unlist(insight::n_obs(model)) > 1e4) && identical(ci_method, "profile")) { insight::format_alert( "Profiled confidence intervals may take longer time to compute.", "Use `ci_method=\"wald\"` for faster computation of CIs." ) } # tell user that profiled CIs don't respect vcov-args if (identical(ci_method, "profile") && (!is.null(vcov) || !is.null(vcov_args)) && isTRUE(verbose)) { insight::format_alert( "When `ci_method=\"profile\"`, `vcov` only modifies standard errors, test-statistic and p-values, but not confidence intervals.", # nolint "Use `ci_method=\"wald\"` to return confidence intervals based on robust standard errors." ) } fun_args <- list( model = model, ci = ci, ci_method = ci_method, component = component, bootstrap = bootstrap, iterations = iterations, merge_by = c("Parameter", "Response", "Component"), standardize = standardize, exponentiate = exponentiate, p_adjust = p_adjust, include_info = include_info, keep_parameters = keep, drop_parameters = drop, vcov = vcov, vcov_args = vcov_args ) fun_args <- c(fun_args, dots) out <- do.call(".model_parameters_generic", fun_args) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(model)) out } #' @export standard_error.nestedLogit <- function(model, component = "all", vcov = NULL, vcov_args = NULL, verbose = TRUE, ...) { dots <- list(...) se <- NULL # vcov: matrix if (is.matrix(vcov)) { se <- sqrt(diag(vcov)) } # vcov: function which returns a matrix if (is.function(vcov)) { fun_args <- c(list(model), vcov_args, dots) se <- .safe(sqrt(diag(do.call("vcov", fun_args)))) } # vcov: character if (is.character(vcov)) { .vcov <- insight::get_varcov( model, component = component, vcov = vcov, vcov_args = vcov_args, verbose = verbose, ... ) se <- unlist(lapply(.vcov, function(i) sqrt(diag(i))), use.names = FALSE) } # classical se from summary() if (is.null(se)) { se <- as.vector(as.data.frame(do.call(rbind, lapply(model$models, function(i) { stats::coef(summary(i)) })))[, "Std. Error"]) } # classical se from get_varcov() if (is.null(se)) { .vcov <- insight::get_varcov( model, component = component, verbose = verbose, ... ) se <- unlist(lapply(.vcov, function(i) sqrt(diag(i))), use.names = FALSE) } params <- insight::get_parameters(model, component = component) .data_frame( Parameter = params$Parameter, SE = as.vector(se), Response = params$Response, Component = params$Component ) } #' @export p_value.nestedLogit <- function(model, dof = NULL, method = NULL, component = "all", vcov = NULL, vcov_args = NULL, verbose = TRUE, ...) { if (is.null(vcov)) { p <- as.vector(as.data.frame(do.call(rbind, lapply(model$models, function(i) { stats::coef(summary(i)) })))[, "Pr(>|z|)"]) } else { p <- p_value.default( model, dof = dof, method = method, component = component, vcov = vcov, vcov_args = vcov_args, verbose = verbose, ... )[["p"]] } params <- insight::get_parameters(model, component = component) .data_frame( Parameter = params$Parameter, p = p, Response = params$Response, Component = params$Component ) } #' @export ci.nestedLogit <- function(x, ci = 0.95, dof = NULL, method = "profile", component = "all", vcov = NULL, vcov_args = NULL, verbose = TRUE, ...) { out <- lapply( x$models, ci, dof = dof, method = method, vcov = vcov, vcov_args = vcov_args, verbose = verbose, ... ) for (i in names(out)) { out[[i]]$Component <- i } out <- do.call(rbind, out) row.names(out) <- NULL if (!is.null(component) && !identical(component, "all")) { comp <- intersect(names(x$models), component) if (!length(comp) && verbose) { insight::format_alert( paste0( "No matching model found. Possible values for `component` are ", toString(paste0("\"", names(x$models), "\"")), "." ) ) } else { out <- out[out$Component %in% component, ] } } params <- insight::get_parameters(x, component = component) out$Response <- params$Response out[c("Parameter", "CI", "CI_low", "CI_high", "Response", "Component")] } #' @export simulate_model.nestedLogit <- function(model, iterations = 1000, ...) { if (is.null(iterations)) iterations <- 1000 params <- insight::get_parameters(model, component = "all", verbose = FALSE) varcov <- insight::get_varcov(model, component = "all", verbose = FALSE, ...) out <- lapply(unique(params$Component), function(i) { pars <- params[params$Component == i, ] betas <- stats::setNames(pars$Estimate, pars$Parameter) d <- as.data.frame(.mvrnorm(n = iterations, mu = betas, Sigma = varcov[[i]])) d$Component <- i d }) out <- do.call(rbind, out) class(out) <- c("parameters_simulate_model", class(out)) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(model)) out } #' @export simulate_parameters.nestedLogit <- function(model, iterations = 1000, centrality = "median", ci = 0.95, ci_method = "quantile", test = "p-value", ...) { sim_data <- simulate_model(model, iterations = iterations, ...) out <- lapply(unique(sim_data$Component), function(i) { pars <- sim_data[sim_data$Component == i, ] d <- .summary_bootstrap( data = pars, test = test, centrality = centrality, ci = ci, ci_method = ci_method, ... ) d$Component <- i d }) out <- do.call(rbind, out) class(out) <- c("parameters_simulate", "see_parameters_simulate", class(out)) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(model)) attr(out, "iterations") <- iterations attr(out, "ci") <- ci attr(out, "ci_method") <- ci_method attr(out, "centrality") <- centrality attr(out, "simulated") <- TRUE out } parameters/R/methods_panelr.R0000644000176200001440000001452314761570351015763 0ustar liggesusers# .wbm, .wbgee # model parameters ------------------- #' @export model_parameters.wbm <- function(model, ci = 0.95, ci_random = NULL, bootstrap = FALSE, iterations = 1000, effects = "all", group_level = FALSE, exponentiate = FALSE, p_adjust = NULL, include_sigma = FALSE, keep = NULL, drop = NULL, verbose = TRUE, ...) { effects <- match.arg(effects, choices = c("fixed", "random", "all")) params <- .mixed_model_parameters_generic( model = model, ci = ci, bootstrap = bootstrap, iterations = iterations, merge_by = c("Parameter", "Component"), standardize = NULL, exponentiate = exponentiate, effects = effects, p_adjust = p_adjust, group_level = group_level, ci_method = NULL, include_sigma = include_sigma, ci_random = ci_random, keep_parameters = keep, drop_parameters = drop, verbose = verbose, ... ) attr(params, "object_name") <- insight::safe_deparse_symbol(substitute(model)) class(params) <- c("parameters_model", "see_parameters_model", "data.frame") params } #' @export model_parameters.wbgee <- model_parameters.wbm #' @export model_parameters.asym <- function(model, ci = 0.95, ci_method = NULL, bootstrap = FALSE, iterations = 1000, standardize = NULL, exponentiate = FALSE, p_adjust = NULL, include_info = getOption("parameters_info", FALSE), keep = NULL, drop = NULL, verbose = TRUE, ...) { params <- model_parameters.default( model, ci = ci, ci_method = ci_method, bootstrap = bootstrap, iterations = iterations, standardize = standardize, exponentiate = exponentiate, p_adjust = p_adjust, include_info = include_info, keep = keep, drop = drop, verbose = verbose, ... ) attr(params, "no_caption") <- TRUE params } # standard errors ------------------- #' @export standard_error.wbm <- function(model, ...) { s <- summary(model) se <- c( s$within_table[, "S.E."], s$between_table[, "S.E."], s$ints_table[, "S.E."] ) params <- insight::get_parameters(model, effects = "fixed") .data_frame( Parameter = params$Parameter, SE = as.vector(se), Component = params$Component ) } #' @export standard_error.wbgee <- standard_error.wbm # p values ------------------- #' @export p_value.wbm <- function(model, ...) { s <- summary(model) p <- c( s$within_table[, "p"], s$between_table[, "p"], s$ints_table[, "p"] ) params <- insight::get_parameters(model, effects = "fixed") .data_frame( Parameter = params$Parameter, p = as.vector(p), Component = params$Component ) } #' @export p_value.wbgee <- p_value.wbm # utils ------------------- .mixed_model_parameters_generic <- function(model, ci, ci_random = NULL, bootstrap, # nolint iterations, # nolint merge_by, # nolint standardize, # nolint exponentiate, # nolint effects, # nolint p_adjust, # nolint group_level, # nolint ci_method, # nolint include_sigma = FALSE, keep_parameters = NULL, drop_parameters = NULL, verbose = TRUE, ...) { params <- params_random <- params_variance <- att <- NULL if (effects %in% c("fixed", "all")) { params <- .model_parameters_generic( model = model, ci = ci, bootstrap = bootstrap, iterations = iterations, merge_by = merge_by, standardize = standardize, exponentiate = exponentiate, effects = "fixed", p_adjust = p_adjust, ci_method = ci_method, include_sigma = include_sigma, keep_parameters = keep_parameters, drop_parameters = drop_parameters, verbose = verbose, ... ) params$Effects <- "fixed" att <- attributes(params) } if (effects %in% c("random", "all") && isTRUE(group_level)) { params_random <- .extract_random_parameters(model, ci = ci, effects = effects) } if (effects %in% c("random", "all") && isFALSE(group_level)) { params_variance <- .extract_random_variances(model, ci = ci, effects = effects, ci_method = ci_method, ci_random = ci_random, verbose = verbose) } # merge random and fixed effects, if necessary if (!is.null(params) && (!is.null(params_random) || !is.null(params_variance))) { params$Level <- NA params$Group <- "" # reorder if (!is.null(params_random)) { params <- params[match(colnames(params_random), colnames(params))] } else { params <- params[match(colnames(params_variance), colnames(params))] } } params <- rbind(params, params_random, params_variance) if (!is.null(att)) { attributes(params) <- utils::modifyList(att, attributes(params)) } # remove empty column if (!is.null(params$Level) && all(is.na(params$Level))) { params$Level <- NULL } params } parameters/R/p_value_betwithin.R0000644000176200001440000000524314331167101016452 0ustar liggesusers#' @title Between-within approximation for SEs, CIs and p-values #' @name p_value_betwithin #' #' @description Approximation of degrees of freedom based on a "between-within" heuristic. #' #' @param model A mixed model. #' @param dof Degrees of Freedom. #' @inheritParams ci.default #' #' @details #' ## Small Sample Cluster corrected Degrees of Freedom #' Inferential statistics (like p-values, confidence intervals and #' standard errors) may be biased in mixed models when the number of clusters #' is small (even if the sample size of level-1 units is high). In such cases #' it is recommended to approximate a more accurate number of degrees of freedom #' for such inferential statistics (see _Li and Redden 2015_). The #' *Between-within* denominator degrees of freedom approximation is #' recommended in particular for (generalized) linear mixed models with repeated #' measurements (longitudinal design). `dof_betwithin()` implements a heuristic #' based on the between-within approach. **Note** that this implementation #' does not return exactly the same results as shown in _Li and Redden 2015_, #' but similar. #' #' ## Degrees of Freedom for Longitudinal Designs (Repeated Measures) #' In particular for repeated measure designs (longitudinal data analysis), #' the *between-within* heuristic is likely to be more accurate than simply #' using the residual or infinite degrees of freedom, because `dof_betwithin()` #' returns different degrees of freedom for within-cluster and between-cluster #' effects. #' #' @seealso `dof_betwithin()` is a small helper-function to calculate approximated #' degrees of freedom of model parameters, based on the "between-within" heuristic. #' #' @examples #' \donttest{ #' if (require("lme4")) { #' data(sleepstudy) #' model <- lmer(Reaction ~ Days + (1 + Days | Subject), data = sleepstudy) #' dof_betwithin(model) #' p_value_betwithin(model) #' } #' } #' @return A data frame. #' @references #' - Elff, M.; Heisig, J.P.; Schaeffer, M.; Shikano, S. (2019). Multilevel #' Analysis with Few Clusters: Improving Likelihood-based Methods to Provide #' Unbiased Estimates and Accurate Inference, British Journal of Political Science. #' - Li, P., Redden, D. T. (2015). Comparing denominator degrees of freedom #' approximations for the generalized linear mixed model in analyzing binary #' outcome in small sample cluster-randomized trials. BMC Medical Research #' Methodology, 15(1), 38. \doi{10.1186/s12874-015-0026-x} #' @export p_value_betwithin <- function(model, dof = NULL, ...) { if (is.null(dof)) { dof <- dof_betwithin(model) } .p_value_dof(model, dof, method = "betwithin", ...) } parameters/R/methods_mcmc.R0000644000176200001440000000010213774144072015405 0ustar liggesusers#' @export model_parameters.mcmc <- model_parameters.data.frame parameters/R/methods_skewness_kurtosis.R0000644000176200001440000000027713774144072020310 0ustar liggesusers#' @export standard_error.parameters_skewness <- function(model, ...) { attributes(model)$SE } #' @export standard_error.parameters_kurtosis <- standard_error.parameters_skewness parameters/R/dof_betwithin.R0000644000176200001440000000172514407046317015601 0ustar liggesusers#' @rdname p_value_betwithin #' @export dof_betwithin <- function(model) { if (!insight::is_mixed_model(model)) { insight::format_error("Model must be a mixed model.") } ngrps <- sum(.n_randomeffects(model)) parameters <- insight::find_parameters(model, effects = "fixed")[["conditional"]] within_effects <- unlist(insight::find_random_slopes(model)) has_intcp <- insight::has_intercept(model) ddf_within <- ngrps - n_parameters(model) ddf_between <- insight::n_obs(model, disaggregate = TRUE) - ngrps - n_parameters(model) if (has_intcp) { ddf_between <- ddf_between - 1 ddf_within <- ddf_within - 1 } within_index <- match(within_effects, parameters) ddf <- stats::setNames(seq_along(parameters), parameters) if (length(within_index) > 0) { ddf[match(within_effects, parameters)] <- ddf_within ddf[-match(within_effects, parameters)] <- ddf_between } else { ddf <- ddf_between } ddf } parameters/R/methods_rstan.R0000644000176200001440000000430114716604200015611 0ustar liggesusers#' @export model_parameters.stanfit <- function(model, centrality = "median", dispersion = FALSE, ci = 0.95, ci_method = "eti", test = "pd", rope_range = "default", rope_ci = 0.95, diagnostic = c("ESS", "Rhat"), effects = "fixed", exponentiate = FALSE, standardize = NULL, group_level = FALSE, keep = NULL, drop = NULL, verbose = TRUE, ...) { # Processing params <- .extract_parameters_bayesian( model, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, bf_prior = NULL, diagnostic = diagnostic, priors = FALSE, effects = effects, standardize = standardize, keep_parameters = keep, drop_parameters = drop, verbose = verbose, ... ) if (effects != "fixed") { random_effect_levels <- which( params$Effects == "random" & !startsWith(params$Parameter, "Sigma[") ) if (length(random_effect_levels) && isFALSE(group_level)) { params <- params[-random_effect_levels, ] } } # exponentiate coefficients and SE/CI, if requested params <- .exponentiate_parameters(params, model, exponentiate) params <- .add_model_parameters_attributes( params, model, ci, exponentiate, ci_method = ci_method, verbose = verbose, ... ) attr(params, "parameter_info") <- insight::clean_parameters(model) attr(params, "object_name") <- insight::safe_deparse_symbol(substitute(model)) class(params) <- c("parameters_model", "see_parameters_model", class(params)) params } parameters/R/methods_robustlmm.R0000644000176200001440000000016614507235543016523 0ustar liggesusers#' @export model_parameters.rlmerMod <- model_parameters.cpglmm #' @export p_value.rlmerMod <- p_value.cpglmm parameters/R/3_p_value.R0000644000176200001440000001335714717111736014637 0ustar liggesusers#' @title p-values #' @name p_value #' #' @description This function attempts to return, or compute, p-values of a model's #' parameters. #' #' @param model A statistical model. #' @param adjust Character value naming the method used to adjust p-values or #' confidence intervals. See `?emmeans::summary.emmGrid` for details. #' @param ... Additional arguments #' @inheritParams ci.default #' @inheritParams standard_error.default #' #' @inheritSection model_parameters Confidence intervals and approximation of degrees of freedom #' #' @inheritSection model_parameters.zcpglm Model components #' #' @details #' For Bayesian models, the p-values corresponds to the *probability of #' direction* ([`bayestestR::p_direction()`]), which is converted to a p-value #' using `bayestestR::convert_pd_to_p()`. #' #' @return A data frame with at least two columns: the parameter names and the #' p-values. Depending on the model, may also include columns for model #' components etc. #' #' @examplesIf require("pscl", quietly = TRUE) #' data(iris) #' model <- lm(Petal.Length ~ Sepal.Length + Species, data = iris) #' p_value(model) #' #' data("bioChemists", package = "pscl") #' model <- pscl::zeroinfl( #' art ~ fem + mar + kid5 | kid5 + phd, #' data = bioChemists #' ) #' p_value(model) #' p_value(model, component = "zi") #' @export p_value <- function(model, ...) { UseMethod("p_value") } # p-Values from Standard Models ----------------------------------------------- #' @rdname p_value #' @export p_value.default <- function(model, dof = NULL, method = NULL, component = "all", vcov = NULL, vcov_args = NULL, verbose = TRUE, ...) { # check for valid input .is_model_valid(model) dots <- list(...) p <- NULL if (is.character(method)) { method <- tolower(method) } else { method <- "wald" } # robust standard errors if (!is.null(vcov)) { method <- "robust" } # default p-value method for profiled or uniroot CI if (method %in% c("uniroot", "profile", "likelihood", "boot")) { method <- "normal" } if (method == "ml1") { return(p_value_ml1(model)) } if (method == "betwithin") { return(p_value_betwithin(model)) } if (method %in% c("residual", "wald", "normal", "satterthwaite", "kenward", "kr")) { if (is.null(dof)) { dof <- insight::get_df(x = model, type = method, verbose = FALSE) } return(.p_value_dof( model, dof = dof, method = method, component = component, verbose = verbose, ... )) } if (method %in% c("hdi", "eti", "si", "bci", "bcai", "quantile")) { return(bayestestR::p_direction(model, ...)) } # robust standard errors if (method == "robust") { co <- insight::get_parameters(model) # for polr, we need to fix parameter names co$Parameter <- gsub("Intercept: ", "", co$Parameter, fixed = TRUE) # this allows us to pass the output of `standard_error()` # to the `vcov` argument in order to avoid computing the SE twice. if (inherits(vcov, "data.frame") || "SE" %in% colnames(vcov)) { se <- vcov } else { fun_args <- list(model, vcov_args = vcov_args, vcov = vcov, verbose = verbose ) fun_args <- c(fun_args, dots) se <- do.call("standard_error", fun_args) } dof <- insight::get_df(x = model, type = "wald", verbose = FALSE) se <- merge(se, co, sort = FALSE) se$Statistic <- se$Estimate / se$SE se$p <- 2 * stats::pt(abs(se$Statistic), df = dof, lower.tail = FALSE) p <- stats::setNames(se$p, se$Parameter) } # default 1st try: summary() if (is.null(p)) { p <- .safe({ # Zelig-models are weird if (grepl("Zelig-", class(model)[1], fixed = TRUE)) { unlist(model$get_pvalue()) } else { # try to get p-value from classical summary for default models .get_pval_from_summary(model) } }) } # default 2nd try: p value from test-statistic if (is.null(p)) { p <- .safe({ stat <- insight::get_statistic(model) p_from_stat <- 2 * stats::pt(abs(stat$Statistic), df = Inf, lower.tail = FALSE) names(p_from_stat) <- stat$Parameter p_from_stat }) } # failure warning if (is.null(p)) { if (isTRUE(verbose)) { insight::format_warning("Could not extract p-values from model object.") } return(NULL) } # output params <- insight::get_parameters(model, component = component) if (length(p) == nrow(params) && "Component" %in% colnames(params)) { .data_frame(Parameter = params$Parameter, p = as.vector(p), Component = params$Component) } else { .data_frame(Parameter = names(p), p = as.vector(p)) } } # helper -------------------------------------------------------- .get_pval_from_summary <- function(model, cs = NULL) { if (is.null(cs)) cs <- suppressWarnings(stats::coef(summary(model))) p <- NULL if (ncol(cs) >= 4) { # do we have a p-value column based on t? pvcn <- which(colnames(cs) == "Pr(>|t|)") # if not, do we have a p-value column based on z? if (length(pvcn) == 0) { pvcn <- which(colnames(cs) == "Pr(>|z|)") } # if not, default to 4 if (length(pvcn) == 0) { pvcn <- 4 } p <- cs[, pvcn] if (is.null(names(p))) { coef_names <- rownames(cs) if (length(coef_names) == length(p)) { names(p) <- coef_names } } } names(p) <- .remove_backticks_from_string(names(p)) p } parameters/R/methods_lcmm.R0000644000176200001440000000615115066721001015415 0ustar liggesusers#' @export model_parameters.lcmm <- function( model, ci = 0.95, ci_method = "residual", component = "all", p_adjust = NULL, keep = NULL, drop = NULL, verbose = TRUE, ... ) { out <- .model_parameters_generic( model = model, effects = "all", component = component, ci = ci, ci_method = ci_method, merge_by = c("Parameter", "Component", "Group"), p_adjust = p_adjust, keep_parameters = keep, drop_parameters = drop, include_info = FALSE, verbose = verbose, ... ) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(model)) out } #' @export model_parameters.externVar <- function( model, ci = 0.95, ci_method = "residual", component = "all", p_adjust = NULL, keep = NULL, drop = NULL, verbose = TRUE, ... ) { out <- .model_parameters_generic( model = model, effects = "all", component = component, ci = ci, ci_method = ci_method, merge_by = c("Parameter", "Group"), p_adjust = p_adjust, keep_parameters = keep, drop_parameters = drop, include_info = FALSE, verbose = verbose, ... ) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(model)) out } #' @export model_parameters.externX <- model_parameters.externVar # p-values ---------------------------------------------------------------- # Helper to format output for lcmm methods .format_lcmm_output <- function(model, value, colname, component, ...) { component <- insight::validate_argument( component, c("all", "conditional", "membership", "longitudinal", "beta", "splines", "linear") ) out <- insight::get_parameters(model, component = "all", ...) out[[colname]] <- as.vector(value) if (component != "all") { out <- out[out$Component == component, , drop = FALSE] } # clean up out$Estimate <- NULL out <- out[intersect(c("Parameter", colname, "Component", "Group"), colnames(out))] insight::text_remove_backticks(out, verbose = FALSE) } #' @export p_value.lcmm <- function(model, component = "all", ...) { id <- seq_along(model$best) indice <- rep(id * (id + 1) / 2) se <- sqrt(model$V[indice]) statistic <- model$best / se p <- 2 * stats::pt(abs(statistic), df = Inf, lower.tail = FALSE) p <- p[!startsWith(names(model$best), "cholesky ") & !startsWith(names(model$best), "varcov ")] .format_lcmm_output(model, p, "p", component, ...) } #' @export p_value.externX <- p_value.lcmm #' @export p_value.externVar <- p_value.lcmm # standard errors ------------------------------------------------------------- #' @export standard_error.lcmm <- function(model, component = "all", ...) { id <- seq_along(model$best) indice <- rep(id * (id + 1) / 2) se <- sqrt(model$V[indice]) se <- se[!startsWith(names(model$best), "cholesky ") & !startsWith(names(model$best), "varcov ")] .format_lcmm_output(model, se, "SE", component, ...) } #' @export standard_error.externX <- standard_error.lcmm #' @export standard_error.externVar <- standard_error.lcmm parameters/R/methods_other.R0000644000176200001440000000234014736731407015620 0ustar liggesusers############# .complmrob -------------- #' @export standard_error.complmrob <- function(model, ...) { stats <- summary(model)$stats params <- insight::get_parameters(model) .data_frame( Parameter = params$Parameter, SE = as.vector(stats[, "Std. Error"]) ) } #' @export p_value.complmrob <- p_value.default #' @export ci.complmrob <- ci.default ############# .Gam -------------- #' @inheritParams model_parameters.aov #' @export model_parameters.Gam <- function(model, es_type = NULL, df_error = NULL, type = NULL, table_wide = FALSE, verbose = TRUE, ...) { model_parameters( summary(model)$parametric.anova, es_type = es_type, df_error = df_error, type = type, table_wide = table_wide, verbose = verbose, ... ) } #' @export p_value.Gam <- function(model, ...) { p.aov <- stats::na.omit(summary(model)$parametric.anova) .data_frame( Parameter = .remove_backticks_from_string(rownames(p.aov)), p = as.vector(p.aov[, 5]) ) } parameters/R/n_clusters_easystats.R0000644000176200001440000003607215066721001017230 0ustar liggesusers#' @rdname n_clusters #' @examplesIf require("see", quietly = TRUE) && require("factoextra", quietly = TRUE) #' \donttest{ #' x <- n_clusters_elbow(iris[1:4]) #' x #' as.data.frame(x) #' # plotting is also possible: #' # plot(x) #' } #' @export n_clusters_elbow <- function( x, standardize = TRUE, include_factors = FALSE, clustering_function = stats::kmeans, n_max = 10, ... ) { t0 <- Sys.time() out <- .n_clusters_factoextra( x, method = "wss", standardize = standardize, include_factors = include_factors, clustering_function = clustering_function, n_max = n_max, ... ) names(out) <- c("n_Clusters", "WSS") gradient <- c(0, diff(out$WSS)) optimal <- out$n_Clusters[which.min(gradient)] attr(out, "n") <- optimal attr(out, "gradient") <- gradient attr(out, "duration") <- as.numeric(difftime(Sys.time(), t0, units = "secs")) class(out) <- c("n_clusters_elbow", class(out)) out } #' @rdname n_clusters #' @examples #' \donttest{ #' # #' # Gap method -------------------- #' if (require("see", quietly = TRUE) && #' require("cluster", quietly = TRUE) && #' require("factoextra", quietly = TRUE)) { #' x <- n_clusters_gap(iris[1:4]) #' x #' as.data.frame(x) #' plot(x) #' } #' } #' @export n_clusters_gap <- function(x, standardize = TRUE, include_factors = FALSE, clustering_function = stats::kmeans, n_max = 10, gap_method = "firstSEmax", ...) { insight::check_if_installed("cluster") t0 <- Sys.time() rez <- .n_clusters_factoextra( x, method = "gap_stat", standardize = standardize, include_factors = include_factors, clustering_function = clustering_function, n_max = n_max, ... ) out <- rez[c("clusters", "gap", "SE.sim")] names(out) <- c("n_Clusters", "Gap", "SE") optimal <- cluster::maxSE(f = out$Gap, SE.f = out$SE, method = gap_method) attr(out, "n") <- optimal attr(out, "ymin") <- rez$ymin attr(out, "ymax") <- rez$ymax attr(out, "duration") <- as.numeric(difftime(Sys.time(), t0, units = "secs")) class(out) <- c("n_clusters_gap", class(out)) out } #' @rdname n_clusters #' @examples #' \donttest{ #' # #' # Silhouette method -------------------------- #' if (require("factoextra", quietly = TRUE)) { #' x <- n_clusters_silhouette(iris[1:4]) #' x #' as.data.frame(x) #' # plotting is also possible: #' # plot(x) #' } #' } #' @export n_clusters_silhouette <- function(x, standardize = TRUE, include_factors = FALSE, clustering_function = stats::kmeans, n_max = 10, ...) { t0 <- Sys.time() out <- .n_clusters_factoextra( x, method = "silhouette", standardize = standardize, include_factors = include_factors, clustering_function = clustering_function, n_max = n_max, ... ) names(out) <- c("n_Clusters", "Silhouette") optimal <- which.max(out$Silhouette) attr(out, "n") <- optimal attr(out, "duration") <- as.numeric(difftime(Sys.time(), t0, units = "secs")) class(out) <- c("n_clusters_silhouette", class(out)) out } #' @rdname n_clusters #' @examples #' \donttest{ #' # #' if (require("dbscan", quietly = TRUE)) { #' # DBSCAN method ------------------------- #' # NOTE: This actually primarily estimates the 'eps' parameter, the number of #' # clusters is a side effect (it's the number of clusters corresponding to #' # this 'optimal' EPS parameter). #' x <- n_clusters_dbscan(iris[1:4], method = "kNN", min_size = 0.05) # 5 percent #' x #' head(as.data.frame(x)) #' plot(x) #' #' x <- n_clusters_dbscan(iris[1:4], method = "SS", eps_n = 100, eps_range = c(0.1, 2)) #' x #' head(as.data.frame(x)) #' plot(x) #' } #' } #' @export n_clusters_dbscan <- function(x, standardize = TRUE, include_factors = FALSE, method = c("kNN", "SS"), min_size = 0.1, eps_n = 50, eps_range = c(0.1, 3), ...) { method <- match.arg(method) t0 <- Sys.time() x <- .prepare_data_clustering(x, include_factors = include_factors, standardize = standardize, ...) if (method == "SS") { out <- data.frame() for (eps in seq(eps_range[1], eps_range[2], length.out = eps_n)) { rez <- .cluster_analysis_dbscan(x, dbscan_eps = eps, min_size = min_size) out <- rbind(out, data.frame( eps = eps, n_Clusters = length(unique(rez$clusters)) - 1, total_SS = sum(.cluster_centers_SS(x, rez$clusters)$WSS) )) } attr(out, "min_size") <- rez$model$MinPts attr(out, "eps") <- out$eps[which.min(out$total_SS)] attr(out, "n") <- out$n_Clusters[which.min(out$total_SS)] } else { insight::check_if_installed("dbscan") if (min_size < 1) min_size <- round(min_size * nrow(x)) out <- data.frame(n_Obs = seq_len(nrow(x)), eps = sort(dbscan::kNNdist(x, k = min_size))) row.names(out) <- NULL gradient <- c(0, diff(out$eps)) eps <- out$eps[which.max(gradient)] rez <- .cluster_analysis_dbscan(x, dbscan_eps = eps, min_size = min_size) attr(out, "gradient") <- gradient attr(out, "min_size") <- min_size attr(out, "eps") <- eps attr(out, "n") <- length(unique(rez$clusters)) - 1 } attr(out, "duration") <- as.numeric(difftime(Sys.time(), t0, units = "secs")) class(out) <- c("n_clusters_dbscan", class(out)) out } #' @rdname n_clusters #' @examples #' \donttest{ #' # #' # hclust method ------------------------------- #' if (require("pvclust", quietly = TRUE)) { #' # iterations should be higher for real analyses #' x <- n_clusters_hclust(iris[1:4], iterations = 50, ci = 0.90) #' x #' head(as.data.frame(x), n = 10) # Print 10 first rows #' plot(x) #' } #' } #' @export n_clusters_hclust <- function(x, standardize = TRUE, include_factors = FALSE, distance_method = "correlation", hclust_method = "average", ci = 0.95, iterations = 100, ...) { insight::check_if_installed("pvclust") t0 <- Sys.time() x <- .prepare_data_clustering( x, include_factors = include_factors, standardize = standardize, ... ) # pvclust works on columns, so we need to pivot the dataframe model <- suppressWarnings(pvclust::pvclust( datawizard::data_transpose(x, verbose = FALSE), method.hclust = hclust_method, method.dist = distance_method, nboot = iterations, quiet = TRUE )) out <- .model_parameters_pvclust_clusters(model, x, ci) attr(out, "model") <- model attr(out, "ci") <- ci attr(out, "n") <- length(unique(out$Cluster)[unique(out$Cluster) != 0]) attr(out, "duration") <- as.numeric(difftime(Sys.time(), t0, units = "secs")) class(out) <- c("n_clusters_hclust", class(out)) out } # Utils ------------------------------------------------------------------- #' @keywords internal .n_clusters_factoextra <- function( x, method = "wss", standardize = TRUE, include_factors = FALSE, clustering_function = stats::kmeans, n_max = 10, ... ) { x <- .prepare_data_clustering( x, include_factors = include_factors, standardize = standardize, ... ) insight::check_if_installed("factoextra") suppressWarnings( factoextra::fviz_nbclust( x, clustering_function, method = method, k.max = n_max, verbose = FALSE )$data ) } # Printing ---------------------------------------------------------------- #' @export print.n_clusters_elbow <- function(x, ...) { insight::print_color(paste0("The Elbow method, that aims at minimizing the total intra-cluster variation (i.e., the total within-cluster sum of square), suggests that the optimal number of clusters is ", attributes(x)$n, "."), "green") # nolint invisible(x) } #' @export print.n_clusters_gap <- function(x, ...) { insight::print_color(paste0("The Gap method, that compares the total intracluster variation of k clusters with their expected values under null reference distribution of the data, suggests that the optimal number of clusters is ", attributes(x)$n, "."), "green") # nolint invisible(x) } #' @export print.n_clusters_silhouette <- function(x, ...) { insight::print_color(paste0("The Silhouette method, based on the average quality of clustering, suggests that the optimal number of clusters is ", attributes(x)$n, "."), "green") # nolint invisible(x) } #' @export print.n_clusters_dbscan <- function(x, ...) { insight::print_color(paste0("The DBSCAN method, based on the total clusters sum of squares, suggests that the optimal eps = ", attributes(x)$eps, " (with min. cluster size set to ", attributes(x)$min_size, "), which corresponds to ", attributes(x)$n, " clusters."), "green") # nolint invisible(x) } #' @export print.n_clusters_hclust <- function(x, ...) { insight::print_color(paste0("The bootstrap analysis of hierarchical clustering highlighted ", attributes(x)$n, " significant clusters."), "green") # nolint invisible(x) } # Plotting ---------------------------------------------------------------- #' @export visualisation_recipe.n_clusters_elbow <- function(x, ...) { input_df <- as.data.frame(x) input_df$Gradient <- datawizard::rescale( attributes(x)$gradient, min(input_df$WSS, max(input_df$WSS)) ) layers <- list() # Layers ----------------------- layers[["l1"]] <- list( geom = "line", data = input_df, aes = list(x = "n_Clusters", y = "WSS", group = 1), size = 1 ) layers[["l2"]] <- list( geom = "point", data = input_df, aes = list(x = "n_Clusters", y = "WSS") ) layers[["l3"]] <- list( geom = "line", data = input_df, aes = list(x = "n_Clusters", y = "Gradient", group = 1), size = 0.5, color = "red", linetype = "dashed" ) layers[["l4"]] <- list( geom = "vline", data = input_df, xintercept = attributes(x)$n, linetype = "dotted" ) layers[["l5"]] <- list( geom = "labs", x = "Number of Clusters", y = "Total Within-Clusters Sum of Squares", title = "Elbow Method" ) # Out class(layers) <- c("visualisation_recipe", "see_visualisation_recipe", class(layers)) attr(layers, "data") <- input_df layers } #' @export visualisation_recipe.n_clusters_gap <- function(x, ...) { dataset <- as.data.frame(x) dataset$ymin <- attributes(x)$ymin dataset$ymax <- attributes(x)$ymax layers <- list() # Layers ----------------------- layers[["l1"]] <- list( geom = "line", data = dataset, aes = list(x = "n_Clusters", y = "Gap", group = 1) ) layers[["l2"]] <- list( geom = "pointrange", data = dataset, aes = list(x = "n_Clusters", y = "Gap", ymin = "ymin", ymax = "ymax") ) layers[["l4"]] <- list( geom = "vline", data = dataset, xintercept = attributes(x)$n, linetype = "dotted" ) layers[["l5"]] <- list( geom = "labs", x = "Number of Clusters", y = "Gap statistic", title = "Gap Method" ) # Out class(layers) <- c("visualisation_recipe", "see_visualisation_recipe", class(layers)) attr(layers, "data") <- dataset layers } #' @export visualisation_recipe.n_clusters_silhouette <- function(x, ...) { dataset <- as.data.frame(x) layers <- list() # Layers ----------------------- layers[["l1"]] <- list( geom = "line", data = dataset, aes = list(x = "n_Clusters", y = "Silhouette", group = 1) ) layers[["l2"]] <- list( geom = "point", data = dataset, aes = list(x = "n_Clusters", y = "Silhouette") ) layers[["l4"]] <- list( geom = "vline", data = dataset, xintercept = attributes(x)$n, linetype = "dotted" ) layers[["l5"]] <- list( geom = "labs", x = "Number of Clusters", y = "Average Silhouette Width", title = "Silhouette Method" ) # Out class(layers) <- c("visualisation_recipe", "see_visualisation_recipe", class(layers)) attr(layers, "data") <- dataset layers } #' @export visualisation_recipe.n_clusters_dbscan <- function(x, ...) { dataset <- as.data.frame(x) layers <- list() # Layers ----------------------- if ("gradient" %in% names(attributes(x))) { dataset$gradient <- datawizard::rescale( attributes(x)$gradient, c(min(dataset$eps), max(dataset$eps)) ) layers[["l1"]] <- list( geom = "line", data = dataset, aes = list(x = "n_Obs", y = "eps"), size = 1 ) layers[["l2"]] <- list( geom = "line", data = dataset, aes = list(x = "n_Obs", y = "gradient"), color = "red", linetype = "dashed" ) layers[["l3"]] <- list( geom = "hline", data = dataset, yintercept = attributes(x)$eps, linetype = "dotted" ) layers[["l4"]] <- list( geom = "labs", x = "Observations", y = paste0("EPS Value (min. size = ", attributes(x)$min_size, ")"), title = "DBSCAN Method" ) } else { dataset$y <- datawizard::rescale( dataset$total_SS, c(min(dataset$n_Clusters), max(dataset$n_Clusters)) ) layers[["l1"]] <- list( geom = "line", data = dataset, aes = list(x = "eps", y = "n_Clusters"), size = 1 ) layers[["l2"]] <- list( geom = "line", data = dataset, aes = list(x = "eps", y = "y"), color = "red", linetype = "dashed" ) layers[["l3"]] <- list( geom = "vline", data = dataset, xintercept = attributes(x)$eps, linetype = "dotted" ) layers[["l4"]] <- list( geom = "labs", x = paste0("EPS Value (min. size = ", attributes(x)$min_size, ")"), y = paste0("Number of CLusters"), title = "DBSCAN Method" ) } # Out class(layers) <- c("visualisation_recipe", "see_visualisation_recipe", class(layers)) attr(layers, "data") <- dataset layers } #' @export plot.n_clusters_elbow <- function(x, ...) { graphics::plot(visualisation_recipe(x, ...)) } #' @export plot.n_clusters_gap <- plot.n_clusters_elbow #' @export plot.n_clusters_silhouette <- plot.n_clusters_elbow #' @export plot.n_clusters_dbscan <- plot.n_clusters_elbow #' @export plot.n_clusters_hclust <- function(x, ...) { insight::check_if_installed("pvclust") graphics::plot(attributes(x)[["model"]]) pvclust::pvrect(attributes(x)[["model"]], alpha = attributes(x)$ci, pv = "si") } parameters/R/methods_dbscan.R0000644000176200001440000000123014717111737015723 0ustar liggesusers#' @export model_parameters.dbscan <- function(model, data = NULL, clusters = NULL, ...) { if (is.null(data)) { insight::format_error("This function requires the data used to compute the clustering to be provided via `data` as it is not accessible from the clustering object itself.") } if (is.null(clusters)) { clusters <- model$cluster } params <- .cluster_centers_params(data, clusters, ...) attr(params, "model") <- model attr(params, "type") <- "dbscan" attr(params, "title") <- ifelse(inherits(model, "hdbscan"), "HDBSCAN", "DBSCAN") params } #' @export model_parameters.hdbscan <- model_parameters.dbscan parameters/R/methods_lmodel2.R0000644000176200001440000000352114507235543016033 0ustar liggesusers# lmodel2 #' @export model_parameters.lmodel2 <- function(model, ci = 0.95, exponentiate = FALSE, p_adjust = NULL, keep = NULL, drop = NULL, verbose = TRUE, ...) { if (!missing(ci)) { if (isTRUE(verbose)) { insight::format_alert("`lmodel2` models do not support other levels for confidence intervals than 0.95. Argument `ci` is ignored.") } ci <- 0.95 } out <- .model_parameters_generic( model = model, ci = ci, bootstrap = FALSE, iterations = 10, merge_by = c("Parameter", "Component"), standardize = NULL, exponentiate = exponentiate, p_adjust = p_adjust, keep_parameters = keep, drop_parameters = drop, verbose = verbose, ... ) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(model)) out } #' @export standard_error.lmodel2 <- function(model, ...) { NULL } #' @export p_value.lmodel2 <- function(model, ...) { res <- model$regression.results data.frame( Parameter = rep(c("Intercept", "Slope"), each = nrow(res)), p = utils::stack(res, select = 5)[[1]], Component = rep(res$Method, 2), stringsAsFactors = FALSE, row.names = NULL ) } #' @export ci.lmodel2 <- function(x, ...) { res <- x$confidence.intervals data.frame( Parameter = rep(c("Intercept", "Slope"), each = nrow(res)), CI = 95, CI_low = utils::stack(res, select = c(2, 4))[[1]], CI_high = utils::stack(res, select = c(3, 5))[[1]], Component = rep(res$Method, 2), stringsAsFactors = FALSE, row.names = NULL ) } parameters/R/methods_glmmTMB.R0000644000176200001440000006612715073732442016006 0ustar liggesusers# Package glmmTMB # model_parameters ----- #' @title Parameters from Mixed Models #' @name model_parameters.glmmTMB #' #' @description Parameters from (linear) mixed models. #' #' @param model A mixed model. #' @param effects Should parameters for fixed effects (`"fixed"`), random effects #' (`"random"`), or both fixed and random effects (`"all"`) be returned? By #' default, the variance components for random effects are returned. If #' group-level effects are requested, `"grouplevel"` returns the group-level #' random effects (BLUPs), while `"random_total"` return the overall (sum of #' fixed and random) effects (similar to what `coef()` returns). Using #' `"grouplevel"` is equivalent to setting `group_level = TRUE`. The `effects` #' argument only applies to mixed models. If the calculation of random effects #' parameters takes too long, you may use `effects = "fixed"`. #' @param wb_component Logical, if `TRUE` and models contains within- and #' between-effects (see `datawizard::demean()`), the `Component` column #' will indicate which variables belong to the within-effects, #' between-effects, and cross-level interactions. By default, the #' `Component` column indicates, which parameters belong to the #' conditional or zero-inflation component of the model. #' @param include_sigma Logical, if `TRUE`, includes the residual standard #' deviation. For mixed models, this is defined as the sum of the distribution-specific #' variance and the variance for the additive overdispersion term (see #' [insight::get_variance()] for details). Defaults to `FALSE` for mixed models #' due to the longer computation time. #' @param ci_random Logical, if `TRUE`, includes the confidence intervals for #' random effects parameters. Only applies if `effects` is not `"fixed"` and #' if `ci` is not `NULL`. Set `ci_random = FALSE` if computation of the model #' summary is too much time consuming. By default, `ci_random = NULL`, which #' uses a heuristic to guess if computation of confidence intervals for random #' effects is fast enough or not. For models with larger sample size and/or #' more complex random effects structures, confidence intervals will not be #' computed by default, for simpler models or fewer observations, confidence #' intervals will be included. Set explicitly to `TRUE` or `FALSE` to enforce #' or omit calculation of confidence intervals. #' @param group_level Logical, for multilevel models (i.e. models with random #' effects) and when `effects = "random"`, include the parameters for each #' group level from random effects. If `group_level = FALSE` (the default), #' only information on SD and COR are shown. #' @param ... Arguments passed to or from other methods. For instance, when #' `bootstrap = TRUE`, arguments like `type` or `parallel` are passed down to #' `bootstrap_model()`. #' #' Further non-documented arguments are: #' #' - `digits`, `p_digits`, `ci_digits` and `footer_digits` to set the number of #' digits for the output. `groups` can be used to group coefficients. These #' arguments will be passed to the print-method, or can directly be used in #' `print()`, see documentation in [`print.parameters_model()`]. #' - If `s_value = TRUE`, the p-value will be replaced by the S-value in the #' output (cf. _Rafi and Greenland 2020_). #' - `pd` adds an additional column with the _probability of direction_ (see #' [`bayestestR::p_direction()`] for details). Furthermore, see 'Examples' for #' this function. #' - For developers, whose interest mainly is to get a "tidy" data frame of #' model summaries, it is recommended to set `pretty_names = FALSE` to speed #' up computation of the summary table. #' #' @inheritParams model_parameters.default #' @inheritParams model_parameters.brmsfit #' @inheritParams simulate_model #' #' @inheritSection model_parameters.zcpglm Model components #' #' @inheritSection model_parameters Confidence intervals and approximation of degrees of freedom #' #' @section Confidence intervals for random effects variances: #' For models of class `merMod` and `glmmTMB`, confidence intervals for random #' effect variances can be calculated. #' #' - For models of from package **lme4**, when `ci_method` is either `"profile"` #' or `"boot"`, and `effects` is either `"random"` or `"all"`, profiled resp. #' bootstrapped confidence intervals are computed for the random effects. #' #' - For all other options of `ci_method`, and only when the **merDeriv** #' package is installed, confidence intervals for random effects are based on #' normal-distribution approximation, using the delta-method to transform #' standard errors for constructing the intervals around the log-transformed #' SD parameters. These are than back-transformed, so that random effect #' variances, standard errors and confidence intervals are shown on the original #' scale. Due to the transformation, the intervals are asymmetrical, however, #' they are within the correct bounds (i.e. no negative interval for the SD, #' and the interval for the correlations is within the range from -1 to +1). #' #' - For models of class `glmmTMB`, confidence intervals for random effect #' variances always use a Wald t-distribution approximation. #' #' @section Singular fits (random effects variances near zero): #' If a model is "singular", this means that some dimensions of the #' variance-covariance matrix have been estimated as exactly zero. This #' often occurs for mixed models with complex random effects structures. #' #' There is no gold-standard about how to deal with singularity and which #' random-effects specification to choose. One way is to fully go Bayesian #' (with informative priors). Other proposals are listed in the documentation #' of [`performance::check_singularity()`]. However, since version 1.1.9, the #' **glmmTMB** package allows to use priors in a frequentist framework, too. One #' recommendation is to use a Gamma prior (_Chung et al. 2013_). The mean may #' vary from 1 to very large values (like `1e8`), and the shape parameter should #' be set to a value of 2.5. You can then `update()` your model with the specified #' prior. In **glmmTMB**, the code would look like this: #' #' ``` #' # "model" is an object of class gmmmTMB #' prior <- data.frame( #' prior = "gamma(1, 2.5)", # mean can be 1, but even 1e8 #' class = "ranef" # for random effects #' ) #' model_with_priors <- update(model, priors = prior) #' ``` #' #' Large values for the mean parameter of the Gamma prior have no large impact #' on the random effects variances in terms of a "bias". Thus, if `1` doesn't #' fix the singular fit, you can safely try larger values. #' #' @section Dispersion parameters in *glmmTMB*: #' For some models from package **glmmTMB**, both the dispersion parameter and #' the residual variance from the random effects parameters are shown. Usually, #' these are the same but presented on different scales, e.g. #' #' ``` #' model <- glmmTMB(Sepal.Width ~ Petal.Length + (1|Species), data = iris) #' exp(fixef(model)$disp) # 0.09902987 #' sigma(model)^2 # 0.09902987 #' ``` #' #' For models where the dispersion parameter and the residual variance are #' the same, only the residual variance is shown in the output. #' #' @seealso [insight::standardize_names()] to #' rename columns into a consistent, standardized naming scheme. #' #' @note If the calculation of random effects parameters takes too long, you may #' use `effects = "fixed"`. There is also a [`plot()`-method](https://easystats.github.io/see/articles/parameters.html) #' implemented in the [**see**-package](https://easystats.github.io/see/). #' #' @references #' Chung Y, Rabe-Hesketh S, Dorie V, Gelman A, and Liu J. 2013. "A Nondegenerate #' Penalized Likelihood Estimator for Variance Parameters in Multilevel Models." #' Psychometrika 78 (4): 685–709. \doi{10.1007/s11336-013-9328-2} #' #' @examplesIf require("lme4") && require("glmmTMB") #' library(parameters) #' data(mtcars) #' model <- lme4::lmer(mpg ~ wt + (1 | gear), data = mtcars) #' model_parameters(model) #' #' \donttest{ #' data(Salamanders, package = "glmmTMB") #' model <- glmmTMB::glmmTMB( #' count ~ spp + mined + (1 | site), #' ziformula = ~mined, #' family = poisson(), #' data = Salamanders #' ) #' model_parameters(model, effects = "all") #' #' model <- lme4::lmer(mpg ~ wt + (1 | gear), data = mtcars) #' model_parameters(model, bootstrap = TRUE, iterations = 50, verbose = FALSE) #' } #' @return A data frame of indices related to the model's parameters. #' @export model_parameters.glmmTMB <- function( model, ci = 0.95, ci_method = "wald", ci_random = NULL, bootstrap = FALSE, iterations = 1000, standardize = NULL, effects = "all", component = "all", group_level = FALSE, exponentiate = FALSE, p_adjust = NULL, vcov = NULL, vcov_args = NULL, wb_component = FALSE, include_info = getOption("parameters_mixed_info", FALSE), include_sigma = FALSE, keep = NULL, drop = NULL, verbose = TRUE, ... ) { insight::check_if_installed("glmmTMB") # p-values, CI and se might be based on different df-methods ci_method <- insight::validate_argument( ci_method, # fmt: skip c( "wald", "normal", "residual", "ml1", "betwithin", "satterthwaite", "kenward", "kr", "boot", "profile", "uniroot", "robust" ) ) # which components to return? effects <- insight::validate_argument( effects, c("fixed", "random", "total", "random_total", "grouplevel", "all") ) component <- insight::validate_argument( component, c("all", "conditional", "zi", "zero_inflated", "dispersion") ) modelinfo <- insight::model_info(model, verbose = FALSE) # group level estimates ================================================= # ======================================================================= # for coef(), we don't need all the attributes and just return early here if (effects %in% c("total", "random_total")) { params <- .group_level_total(model) params$Effects <- "total" class(params) <- c("parameters_coef", "see_parameters_coef", class(params)) return(params) } # group grouplevel estimates (BLUPs), handle alias if (effects == "grouplevel") { effects <- "random" group_level <- TRUE } # standardize only works for fixed effects... if (!is.null(standardize) && standardize != "refit") { if (!missing(effects) && effects != "fixed" && verbose) { insight::format_warning( "Standardizing coefficients only works for fixed effects of the mixed model." ) } effects <- "fixed" } # fix argument, if model has only conditional component cs <- stats::coef(summary(model)) has_zeroinf <- modelinfo$is_zero_inflated has_disp <- is.list(cs) && !is.null(cs$disp) if (!has_zeroinf && !has_disp && component != "conditional") { component <- "conditional" } # for ci_method kenward or satterthwaite, only conditional component if (ci_method %in% c("satterthwaite", "kenward", "kr") && component != "conditional") { component <- "conditional" } # initialize params <- att <- NULL dispersion_param <- FALSE # fixed effects ================================================= # =============================================================== if (effects %in% c("fixed", "all")) { if (bootstrap) { # bootstrapped parameters params <- bootstrap_parameters(model, iterations = iterations, ci = ci, ...) if (effects != "fixed") { effects <- "fixed" if (verbose) { insight::format_warning( "Bootstrapping only returns fixed effects of the mixed model." ) } } } else { # regular parameters fun_args <- list( model, ci = ci, component = component, merge_by = c("Parameter", "Component"), standardize = standardize, effects = "fixed", ci_method = ci_method, p_adjust = p_adjust, keep_parameters = NULL, drop_parameters = NULL, verbose = verbose, vcov = vcov, vcov_args = vcov_args, keep_component_column = component != "conditional", include_sigma = include_sigma, wb_component = wb_component, include_info = include_info ) fun_args <- c(fun_args, list(...)) params <- do.call(".extract_parameters_generic", fun_args) } # add dispersion parameter out <- .add_dispersion_param_glmmTMB(model, params, effects, component, ci, verbose) params <- out$params dispersion_param <- out$dispersion_param # exponentiate coefficients and SE/CI, if requested params <- .exponentiate_parameters(params, model, exponentiate) params$Effects <- "fixed" att <- attributes(params) } # add random effects, either group level or re variances # ====================================================== params <- .add_random_effects_glmmTMB( model, params, ci, ci_method, ci_random, effects, component, dispersion_param, group_level, verbose ) # clean-up # ====================================================== # remove empty column if (!is.null(params$Level) && all(is.na(params$Level))) { params$Level <- NULL } # filter parameters if (!is.null(keep) || !is.null(drop)) { params <- .filter_parameters(params, keep, drop, verbose = verbose) } # due to rbind(), we lose attributes from "extract_parameters()", # so we add those attributes back here... if (!is.null(att)) { attributes(params) <- utils::modifyList(att, attributes(params)) } params <- .add_model_parameters_attributes( params, model, ci = ci, exponentiate, ci_method = ci_method, p_adjust = p_adjust, verbose = verbose, group_level = group_level, include_info = include_info, wb_component = wb_component, modelinfo = modelinfo, ... ) attr(params, "object_name") <- insight::safe_deparse_symbol(substitute(model)) class(params) <- c("parameters_model", "see_parameters_model", class(params)) params } # helper ----------------------------------------- # this functions adds the dispersion parameter, if it is not already # present in the output .add_dispersion_param_glmmTMB <- function( model, params, effects, component, ci, verbose ) { dispersion_param <- FALSE if ( # must be glmmTMB inherits(model, "glmmTMB") && # don't print dispersion if already present (is.null(component) || !"dispersion" %in% params$Component) && # don't print dispersion for zi-component component %in% c("conditional", "all", "dispersion") && # if effects = "fixed" and component = "conditional", don't include dispersion !(component == "conditional" && effects == "fixed") ) { dispersion_param <- insight::get_parameters(model, component = "dispersion") if (!is.null(dispersion_param)) { # add component column if (is.null(params$Component)) { params$Component <- "conditional" } params[nrow(params) + 1, ] <- NA params[nrow(params), "Parameter"] <- dispersion_param$Parameter[1] params[nrow(params), "Coefficient"] <- stats::sigma(model) params[nrow(params), "Component"] <- dispersion_param$Component[1] params[nrow(params), c("CI_low", "CI_high")] <- tryCatch( suppressWarnings(stats::confint( model, parm = "sigma", method = "wald", level = ci )[1:2]), error = function(e) { if (verbose) { insight::format_alert( "Cannot compute standard errors and confidence intervals for sigma parameter.", "Your model may suffer from singularity (see '?lme4::isSingular' and '?performance::check_singularity')." # nolint ) } c(NA, NA) } ) dispersion_param <- TRUE } } list(params = params, dispersion_param = dispersion_param) } # this functions adds random effects parameters to the output. Depending on # whether group level estimates or random effects variances are requested, # the related parameters are returned. It also correctly deals with the # dispersion parameter, if present in random effects .add_random_effects_glmmTMB <- function( model, params, ci, ci_method, ci_random, effects, component, dispersion_param, group_level, verbose = TRUE ) { params_random <- params_variance <- NULL random_effects <- insight::find_random(model, flatten = TRUE) if (!is.null(random_effects) && effects %in% c("random", "all", "grouplevel")) { # add random parameters or variances if (isTRUE(group_level)) { params_random <- .extract_random_parameters( model, ci = ci, effects = effects, component = component ) if (length(random_effects) > 1 && verbose) { insight::format_alert( "Cannot extract confidence intervals for random variance parameters from models with more than one grouping factor." # nolint ) } } else { params_variance <- .extract_random_variances( model, ci = ci, effects = effects, component = component, ci_method = ci_method, ci_random = ci_random, verbose = verbose ) # remove redundant dispersion parameter if (isTRUE(dispersion_param) && !is.null(params) && !is.null(params$Component)) { disp <- which(params$Component == "dispersion") res <- which(params_variance$Group == "Residual") # check if we have dispersion parameter, and either no sigma # or sigma equals dispersion if ( length(disp) > 0 && length(res) > 0 && isTRUE(all.equal( params_variance$Coefficient[res], params$Coefficient[disp], tolerance = 1e-5 )) ) { params <- params[-disp, ] } } } } # merge random and fixed effects, if necessary if (!is.null(params) && (!is.null(params_random) || !is.null(params_variance))) { params$Level <- NA params$Group <- "" # add component column if (!"Component" %in% colnames(params)) { if (component %in% c("zi", "zero_inflated")) { params$Component <- "zero_inflated" } else { params$Component <- "conditional" } } # reorder if (is.null(params_random)) { params <- params[match(colnames(params_variance), colnames(params))] } else { params <- params[match(colnames(params_random), colnames(params))] } } rbind(params, params_random, params_variance) } # ci ----- #' @export ci.glmmTMB <- function( x, ci = 0.95, dof = NULL, method = "wald", component = "all", verbose = TRUE, ... ) { method <- insight::validate_argument( tolower(method), # fmt: skip c( "wald", "normal", "residual", "ml1", "betwithin", "satterthwaite", "kenward", "kr", "boot", "profile", "uniroot", "robust" ) ) component <- insight::validate_argument( component, c("all", "conditional", "zi", "zero_inflated", "dispersion") ) if (is.null(.check_component(x, component, verbose = verbose))) { return(NULL) } # profiled CIs if (method == "profile") { if (length(ci) > 1) { pp <- stats::profile(x) } else { pp <- NULL } out <- lapply(ci, function(i) { .ci_profile_glmmTMB(x, ci = i, profiled = pp, component = component, ...) }) do.call(rbind, out) # uniroot CIs } else if (method == "uniroot") { out <- lapply(ci, function(i) { .ci_uniroot_glmmTMB(x, ci = i, component = component, ...) }) do.call(rbind, out) } else { # all other .ci_generic( model = x, ci = ci, dof = dof, method = method, component = component, ... ) } } # standard_error ----- #' @export standard_error.glmmTMB <- function( model, effects = "fixed", component = "all", method = NULL, vcov = NULL, vcov_args = NULL, verbose = TRUE, ... ) { component <- insight::validate_argument( component, c("all", "conditional", "zi", "zero_inflated", "dispersion") ) effects <- insight::validate_argument(effects, c("fixed", "random")) if (effects == "random") { .se_random_effects_glmmTMB(model) } else if (!is.null(vcov)) { .se_robust_glmmTMB(model, component, vcov, vcov_args, verbose, ...) } else { .se_fixed_effects_glmmTMB(model, component, method, verbose) } } # helper -------------------------------------------------------------------- # extract standard errors for fixed effects parameters .se_fixed_effects_glmmTMB <- function(model, component, method = NULL, verbose = TRUE) { if (is.null(.check_component(model, component, verbose = verbose))) { return(NULL) } # kenward approx if (!is.null(method) && method %in% c("kenward", "kr")) { return(se_kenward(model, component = "conditional")) } cs <- insight::compact_list(stats::coef(summary(model))) x <- lapply(names(cs), function(i) { .data_frame( Parameter = insight::find_parameters( model, effects = "fixed", component = i, flatten = TRUE ), SE = as.vector(cs[[i]][, 2]), Component = i ) }) se <- do.call(rbind, x) se$Component <- .rename_values(se$Component, "cond", "conditional") se$Component <- .rename_values(se$Component, "zi", "zero_inflated") se$Component <- .rename_values(se$Component, "disp", "dispersion") .filter_component(se, component) } # extract robust standard errors for fixed effects parameters .se_robust_glmmTMB <- function( model, component = "all", vcov, vcov_args = NULL, verbose = TRUE, ... ) { fun_args <- list(model, component = component, vcov = vcov, vcov_args = vcov_args) fun_args <- c(fun_args, list(...)) do.call("standard_error.default", fun_args) } # extract standard errors for random effects parameters .se_random_effects_glmmTMB <- function(model) { if (!all(insight::check_if_installed(c("TMB", "glmmTMB"), quietly = TRUE))) { return(NULL) } s1 <- TMB::sdreport(model$obj, getJointPrecision = TRUE) s2 <- sqrt(s1$diag.cov.random) rand.ef <- glmmTMB::ranef(model)[[1]] lapply(rand.ef, function(.x) { cnt <- nrow(.x) * ncol(.x) s3 <- s2[1:cnt] s2 <- s2[-(1:cnt)] d <- as.data.frame(matrix(sqrt(s3), ncol = ncol(.x), byrow = TRUE)) colnames(d) <- colnames(.x) d }) } # simulate model ----- #' @export simulate_model.glmmTMB <- function( model, iterations = 1000, component = "all", verbose = FALSE, ... ) { component <- insight::validate_argument( component, c("all", "conditional", "zi", "zero_inflated", "dispersion") ) info <- insight::model_info(model, verbose = FALSE) ## TODO remove is.list() when insight 0.8.3 on CRAN if (!is.list(info)) { info <- NULL } has_zeroinflated <- !is.null(info) && isTRUE(info$is_zero_inflated) has_dispersion <- !is.null(info) && isTRUE(info$is_dispersion) # check component-argument ---- if (component == "all") { if (!has_zeroinflated && !has_dispersion) { if (verbose) { insight::format_alert( "No zero-inflation and dispersion components. Simulating from conditional parameters." ) } component <- "conditional" } else if (!has_zeroinflated && has_dispersion) { if (verbose) { insight::format_alert( "No zero-inflation component. Simulating from conditional and dispersion parameters." ) } component <- c("conditional", "dispersion") } else if (has_zeroinflated && !has_dispersion) { if (verbose) { insight::format_alert( "No dispersion component. Simulating from conditional and zero-inflation parameters." ) } component <- c("conditional", "zero_inflated") } } else if (component %in% c("zi", "zero_inflated") && !has_zeroinflated) { insight::format_error("No zero-inflation model found.") } else if (component == "dispersion" && !has_dispersion) { insight::format_error("No dispersion model found.") } if (is.null(iterations)) { iterations <- 1000 } if (all(component == c("conditional", "zero_inflated"))) { d1 <- .simulate_model(model, iterations, component = "conditional", ...) d2 <- .simulate_model(model, iterations, component = "zero_inflated", ...) colnames(d2) <- paste0(colnames(d2), "_zi") d <- cbind(d1, d2) } else if (all(component == c("conditional", "dispersion"))) { d1 <- .simulate_model(model, iterations, component = "conditional", ...) d2 <- .simulate_model(model, iterations, component = "dispersion", ...) colnames(d2) <- paste0(colnames(d2), "_disp") d <- cbind(d1, d2) } else if (all(component == "all")) { d1 <- .simulate_model(model, iterations, component = "conditional", ...) d2 <- .simulate_model(model, iterations, component = "zero_inflated", ...) d3 <- .simulate_model(model, iterations, component = "dispersion", ...) colnames(d2) <- paste0(colnames(d2), "_zi") colnames(d3) <- paste0(colnames(d3), "_disp") d <- cbind(d1, d2, d3) } else if (all(component == "conditional")) { d <- .simulate_model(model, iterations, component = "conditional", ...) } else if (all(component %in% c("zi", "zero_inflated"))) { d <- .simulate_model(model, iterations, component = "zero_inflated", ...) } else { d <- .simulate_model(model, iterations, component = "dispersion", ...) } class(d) <- c("parameters_simulate_model", class(d)) attr(d, "object_name") <- insight::safe_deparse_symbol(substitute(model)) d } # simulate_parameters ----- #' @export simulate_parameters.glmmTMB <- function( model, iterations = 1000, centrality = "median", ci = 0.95, ci_method = "quantile", test = "p-value", ... ) { sim_data <- simulate_model(model, iterations = iterations, ...) out <- .summary_bootstrap( data = sim_data, test = test, centrality = centrality, ci = ci, ci_method = ci_method, ... ) params <- insight::get_parameters(model, ...) if ("Effects" %in% colnames(params) && insight::n_unique(params$Effects) > 1) { out$Effects <- params$Effects } if ("Component" %in% colnames(params) && insight::n_unique(params$Component) > 1) { out$Component <- params$Component } class(out) <- c("parameters_simulate", "see_parameters_simulate", class(out)) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(model)) attr(out, "iterations") <- iterations attr(out, "ci") <- ci attr(out, "ci_method") <- ci_method attr(out, "centrality") <- centrality out } parameters/R/ci_kenward.R0000644000176200001440000000314715073732442015063 0ustar liggesusers#' @rdname p_value_kenward #' @export ci_kenward <- function(model, ci = 0.95, ...) { UseMethod("ci_kenward") } #' @export ci_kenward.default <- function(model, ci = 0.95, ...) { if (!.check_REML_fit(model)) { model <- stats::update(model, . ~ ., REML = TRUE) } df_kr <- dof_kenward(model) out <- lapply(ci, function(i) { .ci_dof( model = model, ci = i, dof = df_kr, effects = "fixed", component = "all", method = "kenward", se = attr(df_kr, "se", exact = TRUE) ) }) out <- do.call(rbind, out) row.names(out) <- NULL out } #' @export ci_kenward.glmmTMB <- function(model, ci = 0.95, ...) { if (!.check_REML_fit(model)) { model <- stats::update(model, . ~ ., REML = TRUE) } df_kr <- dof_kenward(model) out <- lapply(ci, function(i) { .ci_dof( model = model, ci = i, dof = df_kr, effects = "fixed", component = "conditional", # for glmmTMB, only conditional method = "kenward", se = attr(df_kr, "se", exact = TRUE) ) }) out <- do.call(rbind, out) row.names(out) <- NULL out } .ci_kenward_dof <- function(model, ci = 0.95, df_kr) { if (inherits(model, "glmmTMB")) { component <- "conditional" } else { component <- "all" } out <- lapply(ci, function(i) { .ci_dof( model = model, ci = i, dof = df_kr$df_error, effects = "fixed", component = component, method = "kenward", se = df_kr$SE ) }) out <- do.call(rbind, out) row.names(out) <- NULL out } parameters/R/utils_cleaners.R0000644000176200001440000000434314736731407015775 0ustar liggesusers#' @keywords internal .clean_parameter_names <- function(x, full = FALSE) { # return if x is empty if (is.null(x) || length(x) == 0) { return("") } # here we need to capture only those patterns that we do *not* want to format # in a particular style. However, these patterns will not be shown in the output # from "model_parameters()". If certain patterns contain useful information, # remove them here and clean/prepare them in ".parameters_type_basic()". # for formatting / printing, refer to ".format_parameter()". pattern <- if (full) { c( "as.factor", "as.numeric", "as.ordered", "factor", "ordered", "offset", "lag", "diff", "catg", "matrx", "pol", "strata", "strat", "scale", "scored", "interaction", "lsp", "pb", "lo", "t2", "te", "ti", "tt", "mi", "mo", "gp" ) } else { c("as.factor", "as.numeric", "as.ordered", "factor", "ordered", "catg", "interaction") } for (j in seq_along(pattern)) { # remove possible namespace if (any(grepl("::", x, fixed = TRUE))) { x <- sub("(.*)::(.*)", "\\2", x) } if (pattern[j] == "offset" && any(grepl("offset(", x, fixed = TRUE))) { x <- insight::trim_ws(sub("offset\\(([^-+ )]*)\\)(.*)", "\\1\\2", x)) # some exceptions here... } else if (full && pattern[j] == "scale" && any(grepl("scale(", x, fixed = TRUE))) { x[grepl("scale(", x, fixed = TRUE)] <- insight::clean_names(grep("scale(", x, fixed = TRUE, value = TRUE)) } else if (any(grepl(pattern[j], x, fixed = TRUE))) { p <- paste0(pattern[j], "\\(((\\w|\\.)*)\\)(.*)") x <- insight::trim_ws(sub(p, "\\1\\3", x)) } } gsub("`", "", x, fixed = TRUE) } #' @keywords internal .remove_backticks_from_string <- function(x) { if (is.character(x)) { x <- gsub("`", "", x, fixed = TRUE) } x } #' @keywords internal .intercepts <- function() { c( "(intercept)_zi", "intercept (zero-inflated)", "intercept (zero-inflation)", "intercept", "zi_intercept", "(intercept)", "b_intercept", "b_zi_intercept" ) } #' @keywords internal .in_intercepts <- function(x) { tolower(x) %in% .intercepts() | startsWith(tolower(x), "intercept") } parameters/R/reexports.R0000644000176200001440000000176114502257471015010 0ustar liggesusers# ----------------------- insight ------------------------------------- #' @importFrom insight standardize_names #' @export insight::standardize_names #' @importFrom insight supported_models #' @export insight::supported_models #' @importFrom insight print_html #' @export insight::print_html #' @importFrom insight print_md #' @export insight::print_md #' @importFrom insight display #' @export insight::display # ----------------------- datawizard ------------------------------------- #' @importFrom datawizard describe_distribution #' @export datawizard::describe_distribution #' @importFrom datawizard demean #' @export datawizard::demean #' @importFrom datawizard rescale_weights #' @export datawizard::rescale_weights #' @importFrom datawizard visualisation_recipe #' @export datawizard::visualisation_recipe #' @importFrom datawizard kurtosis #' @export datawizard::kurtosis #' @importFrom datawizard skewness #' @export datawizard::skewness parameters/R/methods_epi2x2.R0000644000176200001440000000356214716604200015603 0ustar liggesusers#' @export model_parameters.epi.2by2 <- function(model, verbose = TRUE, ...) { # get parameter estimates params <- insight::get_parameters(model) colnames(params)[2] <- "Coefficient" # get coefficients including CI coef_names <- grepl("^([^NNT]*)(\\.strata\\.wald)", names(model$massoc.detail), perl = TRUE) cf <- model$massoc.detail[coef_names] names(cf) <- gsub(".strata.wald", "", names(cf), fixed = TRUE) # extract CI cis <- do.call(rbind, cf) cis$Parameter <- rownames(cis) cis$est <- NULL colnames(cis) <- c("CI_low", "CI_high", "Parameter") # merge params <- merge(params, cis, sort = FALSE) # find fraction estimates, multiply by 100 to get percentages fractions <- params$Parameter %in% c("AFRisk", "PAFRisk") params[fractions, c("Coefficient", "CI_low", "CI_high")] <- 100 * params[fractions, c("Coefficient", "CI_low", "CI_high")] # pretty names pretty_names <- params$Parameter pretty_names[pretty_names == "PR"] <- "Prevalence Ratio" pretty_names[pretty_names == "RR"] <- "Risk Ratio" pretty_names[pretty_names == "OR"] <- "Odds Ratio" pretty_names[pretty_names == "ARisk"] <- "Attributable Risk" pretty_names[pretty_names == "PARisk"] <- "Attributable Risk in Population" pretty_names[pretty_names == "AFRisk"] <- "Attributable Fraction in Exposed (%)" pretty_names[pretty_names == "PAFRisk"] <- "Attributable Fraction in Population (%)" stats <- model$massoc.detail$chi2.strata.uncor attr(params, "footer_text") <- paste0("Test that Odds Ratio = 1: Chi2(", stats[["df"]], ") = ", insight::format_value(stats[["test.statistic"]]), ", ", insight::format_p(stats[["p.value.2s"]])) attr(params, "pretty_names") <- stats::setNames(pretty_names, params$Parameter) attr(params, "no_caption") <- TRUE class(params) <- c("parameters_model", "see_parameters_model", class(params)) params } parameters/R/methods_coda.R0000644000176200001440000000010714507235543015400 0ustar liggesusers#' @export model_parameters.mcmc.list <- model_parameters.data.frame parameters/R/methods_mice.R0000644000176200001440000001173614726272305015421 0ustar liggesusers# confidence intervals -------------------------- #' @export ci.mipo <- ci.gam #' @export ci.mira <- function(x, ci = 0.95, ...) { insight::check_if_installed("mice") ci(mice::pool(x), ci = ci, ...) } # p values --------------------------------------- #' @export p_value.mipo <- function(model, ...) { s <- summary(model) out <- .data_frame( Parameter = as.vector(s$term), p = as.vector(s$p.value) ) # check for ordinal-alike models if (!is.null(model$pooled) && "y.level" %in% colnames(model$pooled)) { out$Response <- as.vector(model$pooled$y.level) } out } #' @export p_value.mira <- function(model, ...) { insight::check_if_installed("mice") p_value(mice::pool(model), ...) } # standard errors -------------------------------- #' @export standard_error.mipo <- function(model, ...) { s <- summary(model) out <- .data_frame( Parameter = as.vector(s$term), SE = as.vector(s$std.error) ) # check for ordinal-alike models if (!is.null(model$pooled) && "y.level" %in% colnames(model$pooled)) { out$Response <- as.vector(model$pooled$y.level) } out } #' @export standard_error.mira <- function(model, ...) { insight::check_if_installed("mice") standard_error(mice::pool(model), ...) } # format ------------------------------------------- #' @export format_parameters.mira <- format_parameters.rma # model_parameters --------------------------------- #' @export model_parameters.mipo <- function(model, ci = 0.95, exponentiate = FALSE, p_adjust = NULL, keep = NULL, drop = NULL, verbose = TRUE, ...) { # validation check, warn if unsupported argument is used. dot_args <- .check_dots( dots = list(...), not_allowed = c("vcov", "vcov_args"), class(model)[1], verbose = verbose ) # check if we have ordinal/categorical response if (!is.null(model$pooled) && "y.level" %in% colnames(model$pooled)) { merge_by <- c("Parameter", "Response") } else { merge_by <- "Parameter" } fun_args <- list( model, ci = ci, merge_by = merge_by, exponentiate = exponentiate, p_adjust = p_adjust, keep_parameters = keep, drop_parameters = drop, vcov = NULL, vcov_args = NULL ) fun_args <- c(fun_args, dot_args) out <- do.call(".model_parameters_generic", fun_args) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(model)) out } #' Parameters from multiply imputed repeated analyses #' #' Format models of class `mira`, obtained from `mice::width.mids()`, or of #' class `mipo`. #' #' @param model An object of class `mira` or `mipo`. #' @inheritParams model_parameters.default #' @param ... Arguments passed to or from other methods. #' #' @details `model_parameters()` for objects of class `mira` works #' similar to `summary(mice::pool())`, i.e. it generates the pooled summary #' of multiple imputed repeated regression analyses. #' #' @examplesIf require("mice", quietly = TRUE) && require("gee", quietly = TRUE) #' library(parameters) #' data(nhanes2, package = "mice") #' imp <- mice::mice(nhanes2) #' fit <- with(data = imp, exp = lm(bmi ~ age + hyp + chl)) #' model_parameters(fit) #' \donttest{ #' # model_parameters() also works for models that have no "tidy"-method in mice #' data(warpbreaks) #' set.seed(1234) #' warpbreaks$tension[sample(1:nrow(warpbreaks), size = 10)] <- NA #' imp <- mice::mice(warpbreaks) #' fit <- with(data = imp, expr = gee::gee(breaks ~ tension, id = wool)) #' #' # does not work: #' # summary(mice::pool(fit)) #' #' model_parameters(fit) #' } #' #' # and it works with pooled results #' data("nhanes2", package = "mice") #' imp <- mice::mice(nhanes2) #' fit <- with(data = imp, exp = lm(bmi ~ age + hyp + chl)) #' pooled <- mice::pool(fit) #' #' model_parameters(pooled) #' @export model_parameters.mira <- function(model, ci = 0.95, exponentiate = FALSE, p_adjust = NULL, keep = NULL, drop = NULL, verbose = TRUE, ...) { insight::check_if_installed("mice") micemodel <- suppressWarnings(mice::pool(model)) out <- .model_parameters_generic( model = micemodel, ci = ci, bootstrap = FALSE, iterations = 10, merge_by = "Parameter", standardize = NULL, exponentiate = exponentiate, p_adjust = p_adjust, keep_parameters = keep, drop_parameters = drop, verbose = verbose, ... ) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(model)) out } parameters/R/compare_parameters.R0000644000176200001440000002654215006147473016632 0ustar liggesusers#' @title Compare model parameters of multiple models #' @name compare_parameters #' #' @description Compute and extract model parameters of multiple regression #' models. See [`model_parameters()`] for further details. #' #' @param ... One or more regression model objects, or objects returned by #' `model_parameters()`. Regression models may be of different model #' types. Model objects may be passed comma separated, or as a list. #' If model objects are passed with names or the list has named elements, #' these names will be used as column names. #' @param component Model component for which parameters should be shown. See #' documentation for related model class in [`model_parameters()`]. #' @param column_names Character vector with strings that should be used as #' column headers. Must be of same length as number of models in `...`. #' @param ci_method Method for computing degrees of freedom for p-values #' and confidence intervals (CI). See documentation for related model class #' in [model_parameters()]. #' @param coefficient_names Character vector with strings that should be used #' as column headers for the coefficient column. Must be of same length as #' number of models in `...`, or length 1. If length 1, this name will be #' used for all coefficient columns. If `NULL`, the name for the coefficient #' column will detected automatically (as in `model_parameters()`). #' @inheritParams model_parameters.default #' @inheritParams model_parameters.glmmTMB #' @inheritParams print.parameters_model #' #' @details #' #' This function is in an early stage and does not yet cope with more complex #' models, and probably does not yet properly render all model components. It #' should also be noted that when including models with interaction terms, not #' only do the values of the parameters change, but so does their meaning (from #' main effects, to simple slopes), thereby making such comparisons hard. #' Therefore, you should not use this function to compare models with #' interaction terms with models without interaction terms. #' #' @return A data frame of indices related to the model's parameters. #' #' @examplesIf require("gt", quietly = TRUE) #' data(iris) #' lm1 <- lm(Sepal.Length ~ Species, data = iris) #' lm2 <- lm(Sepal.Length ~ Species + Petal.Length, data = iris) #' compare_parameters(lm1, lm2) #' #' # custom style #' compare_parameters(lm1, lm2, select = "{estimate}{stars} ({se})") #' #' \donttest{ #' # custom style, in HTML #' result <- compare_parameters(lm1, lm2, select = "{estimate}
({se})|{p}") #' print_html(result) #' } #' #' data(mtcars) #' m1 <- lm(mpg ~ wt, data = mtcars) #' m2 <- glm(vs ~ wt + cyl, data = mtcars, family = "binomial") #' compare_parameters(m1, m2) #' \donttest{ #' # exponentiate coefficients, but not for lm #' compare_parameters(m1, m2, exponentiate = "nongaussian") #' #' # change column names #' compare_parameters("linear model" = m1, "logistic reg." = m2) #' compare_parameters(m1, m2, column_names = c("linear model", "logistic reg.")) #' #' # or as list #' compare_parameters(list(m1, m2)) #' compare_parameters(list("linear model" = m1, "logistic reg." = m2)) #' } #' @export compare_parameters <- function(..., ci = 0.95, effects = "fixed", component = "conditional", standardize = NULL, exponentiate = FALSE, ci_method = "wald", p_adjust = NULL, select = NULL, column_names = NULL, pretty_names = TRUE, coefficient_names = NULL, keep = NULL, drop = NULL, include_reference = FALSE, groups = NULL, verbose = TRUE) { models <- list(...) if (length(models) == 1) { if (insight::is_model(models[[1]]) || inherits(models[[1]], "parameters_model")) { modellist <- FALSE } else { models <- models[[1]] modellist <- TRUE } } else { modellist <- FALSE } if (isTRUE(modellist)) { model_names <- names(models) if (length(model_names) == 0) { model_names <- paste("Model", seq_along(models), sep = " ") names(models) <- model_names } } else { model_names <- match.call(expand.dots = FALSE)[["..."]] if (length(names(model_names)) > 0) { model_names <- names(model_names) } else if (any(vapply(model_names, is.call, TRUE))) { model_names <- paste("Model", seq_along(models), sep = " ") } else { model_names <- vapply(model_names, as.character, character(1)) names(models) <- model_names } } supported_models <- vapply(models, function(i) { insight::is_model_supported(i) || inherits(i, "lavaan") || inherits(i, "parameters_model") }, TRUE) if (!all(supported_models)) { if (verbose) { insight::format_alert( sprintf("Following objects are not supported: %s", toString(model_names[!supported_models])), "Dropping unsupported models now." ) } models <- models[supported_models] model_names <- model_names[supported_models] } # set default if (is.null(select)) { if (is.null(ci) || is.na(ci)) { # if user set CI to NULL, show only estimates by default select <- "{estimate}" } else { # if we have CI, include them select <- "ci" } } # provide own names if (!is.null(column_names)) { if (length(column_names) != length(model_names)) { if (isTRUE(verbose)) { insight::format_alert("Number of column names does not match number of models.") } } else { model_names <- column_names } } # make sure we have enough coefficient names - else, repeat first value if (!is.null(coefficient_names) && length(coefficient_names) < length(models)) { coefficient_names <- rep(coefficient_names[1], length(models)) } # iterate all models and create list of model parameters m <- lapply(seq_along(models), function(i) { model <- models[[i]] model_name <- model_names[[i]] if (inherits(model, "parameters_model")) { # we already have model parameters object... dat <- model } else { # set default-ci_type for Bayesian models if (insight::is_bayesian_model(model, exclude = c("bmerMod", "bayesx", "blmerMod", "bglmerMod")) && !ci_method %in% c("hdi", "quantile", "ci", "eti", "si", "bci", "bcai")) { # nolint ci_method_tmp <- "eti" } else { ci_method_tmp <- ci_method } # here we have a model object that needs to be passed to model_parameters dat <- model_parameters( model, ci = ci, effects = effects, component = component, standardize = standardize, exponentiate = exponentiate, ci_method = ci_method_tmp, p_adjust = p_adjust, keep = keep, drop = drop, wb_component = FALSE, include_reference = include_reference, verbose = verbose ) } # set specific names for coefficient column coef_name <- attributes(dat)$coefficient_name if (!is.null(coef_name) && is.null(coefficient_names)) { colnames(dat)[colnames(dat) == "Coefficient"] <- coef_name } else if (!is.null(coefficient_names)) { colnames(dat)[colnames(dat) == "Coefficient"] <- coefficient_names[i] } # set pretty parameter names dat <- .set_pretty_names(dat, pretty_names) # make sure we have a component- and effects column, for merging if (!"Component" %in% colnames(dat)) { dat$Component <- "conditional" } if (!"Effects" %in% colnames(dat)) { dat$Effects <- "fixed" } if (!"Group" %in% colnames(dat)) { dat$Group <- "" } # add zi-suffix to parameter names if (any(dat$Component == "zero_inflated")) { dat$Parameter[dat$Component == "zero_inflated"] <- paste0(dat$Parameter[dat$Component == "zero_inflated"], " (zi)") } # add suffix ignore <- colnames(dat) %in% c("Parameter", "Component", "Effects", "Group") colnames(dat)[!ignore] <- paste0(colnames(dat)[!ignore], ".", model_name) # save model number, for sorting dat$model <- i dat$model[.in_intercepts(dat$Parameter)] <- 0 dat }) object_attributes <- lapply(m, attributes) names(object_attributes) <- model_names # merge all data frames all_models <- suppressWarnings(Reduce(function(x, y) { merge(x, y, all = TRUE, sort = FALSE, by = c("Parameter", "Component", "Effects", "Group")) }, m)) # find columns with model numbers and create new variable "params_order", # which is pasted together of all model-column indices. Take lowest index of # all model-column indices, which then indicates order of parameters/rows. model_cols <- which(startsWith(colnames(all_models), "model")) params_order <- as.numeric(substr(gsub("NA", "", do.call(paste0, all_models[model_cols]), fixed = TRUE), 0, 1)) all_models <- all_models[order(params_order), ] all_models[model_cols] <- NULL # remove empty group-column if (!any(nzchar(as.character(all_models$Group), keepNA = TRUE))) { all_models$Group <- NULL } attr(all_models, "model_names") <- gsub("\"", "", unlist(lapply(model_names, insight::safe_deparse)), fixed = TRUE) attr(all_models, "output_style") <- select attr(all_models, "all_attributes") <- object_attributes attr(all_models, "parameter_groups") <- groups class(all_models) <- c("compare_parameters", "see_compare_parameters", unique(class(all_models))) all_models } #' @rdname compare_parameters #' @export compare_models <- compare_parameters # helper ---------------------------- .set_pretty_names <- function(x, pretty_names) { # check if pretty names should be replaced by value labels # (if we have labelled data) if (isTRUE(getOption("parameters_labels", FALSE)) || identical(pretty_names, "labels")) { attr(x, "pretty_names") <- attr(x, "pretty_labels", exact = TRUE) pretty_names <- TRUE } att <- attributes(x) if (!is.null(att$pretty_names)) { # remove strings with NA names att$pretty_names <- att$pretty_names[!is.na(names(att$pretty_names))] if (length(att$pretty_names) != length(x$Parameter)) { match_pretty_names <- match(names(att$pretty_names), x$Parameter) match_pretty_names <- match_pretty_names[!is.na(match_pretty_names)] if (length(match_pretty_names)) { x$Parameter[match_pretty_names] <- att$pretty_names[x$Parameter[match_pretty_names]] } } else { match_pretty_names <- att$pretty_names[x$Parameter] if (anyNA(match_pretty_names)) { match_pretty_names <- match(names(att$pretty_names), x$Parameter) match_pretty_names <- match_pretty_names[!is.na(match_pretty_names)] if (length(match_pretty_names)) { x$Parameter[match_pretty_names] <- att$pretty_names[x$Parameter[match_pretty_names]] } } else { x$Parameter <- att$pretty_names[x$Parameter] } } } x } parameters/R/methods_MCMCglmm.R0000644000176200001440000000412514717111737016073 0ustar liggesusers#' @export standard_error.MCMCglmm <- function(model, ...) { nF <- model$Fixed$nfl parms <- as.data.frame(model$Sol[, 1:nF, drop = FALSE]) .data_frame( Parameter = .remove_backticks_from_string(colnames(parms)), SE = unname(sapply(parms, stats::sd)) ) } #' @export p_value.MCMCglmm <- function(model, ...) { nF <- model$Fixed$nfl p <- 1 - colSums(model$Sol[, 1:nF, drop = FALSE] > 0) / dim(model$Sol)[1] .data_frame( Parameter = insight::find_parameters(model, effects = "fixed", flatten = TRUE), p = p ) } #' @export model_parameters.MCMCglmm <- function(model, centrality = "median", dispersion = FALSE, ci = 0.95, ci_method = "eti", test = "pd", rope_range = "default", rope_ci = 0.95, bf_prior = NULL, diagnostic = c("ESS", "Rhat"), priors = TRUE, keep = NULL, drop = NULL, verbose = TRUE, ...) { # Processing params <- .extract_parameters_bayesian( model, centrality = centrality, dispersion = dispersion, ci = ci, ci_method = ci_method, test = test, rope_range = rope_range, rope_ci = rope_ci, bf_prior = bf_prior, diagnostic = diagnostic, priors = priors, keep_parameters = keep, drop_parameters = drop, verbose = verbose, ... ) attr(params, "pretty_names") <- format_parameters(model) attr(params, "ci") <- ci attr(params, "object_name") <- insight::safe_deparse_symbol(substitute(model)) class(params) <- c("parameters_model", "see_parameters_model", class(params)) params } parameters/R/methods_crch.R0000644000176200001440000000100614355245205015405 0ustar liggesusers#' @export standard_error.crch <- function(model, ...) { cs <- do.call(rbind, stats::coef(summary(model), model = "full")) params <- insight::get_parameters(model) .data_frame( Parameter = params$Parameter, SE = as.vector(cs[, 2]) ) } #' @export p_value.crch <- function(model, ...) { cs <- do.call(rbind, stats::coef(summary(model), model = "full")) params <- insight::get_parameters(model) .data_frame( Parameter = params$Parameter, p = as.vector(cs[, 4]) ) } parameters/R/p_function.R0000644000176200001440000005242615053035103015111 0ustar liggesusers#' @title p-value or consonance function #' @name p_function #' #' @description Compute p-values and compatibility (confidence) intervals for #' statistical models, at different levels. This function is also called #' consonance function. It allows to see which estimates are compatible with #' the model at various compatibility levels. Use `plot()` to generate plots #' of the _p_ resp. _consonance_ function and compatibility intervals at #' different levels. #' #' @param ci_levels Vector of scalars, indicating the different levels at which #' compatibility intervals should be printed or plotted. In plots, these levels #' are highlighted by vertical lines. It is possible to increase thickness for #' one or more of these lines by providing a names vector, where the to be #' highlighted values should be named `"emph"`, e.g #' `ci_levels = c(0.25, 0.5, emph = 0.95)`. #' @param x An object returned by [`model_parameters()`][model_parameters]. #' @param digits Number of digits for rounding or significant figures. May also #' be `"signif"` to return significant figures or `"scientific"` to return #' scientific notation. Control the number of digits by adding the value as #' suffix, e.g. `digits = "scientific4"` to have scientific notation with 4 #' decimal places, or `digits = "signif5"` for 5 significant figures (see also #' [signif()]). #' #' @inheritParams model_parameters #' @inheritParams model_parameters.default #' @inheritParams model_parameters.glmmTMB #' @inheritParams standard_error #' @inheritParams print.parameters_model #' #' @note #' Curently, `p_function()` computes intervals based on Wald t- or z-statistic. #' For certain models (like mixed models), profiled intervals may be more #' accurate, however, this is currently not supported. #' #' @seealso See also [`equivalence_test()`] and [`p_significance()`] for #' functions related to checking effect existence and significance. #' #' @details #' ## Compatibility intervals and continuous _p_-values for different estimate values #' #' `p_function()` only returns the compatibility interval estimates, not the #' related _p_-values. The reason for this is because the _p_-value for a #' given estimate value is just `1 - CI_level`. The values indicating the lower #' and upper limits of the intervals are the related estimates associated with #' the _p_-value. E.g., if a parameter `x` has a 75% compatibility interval #' of `(0.81, 1.05)`, then the _p_-value for the estimate value of `0.81` #' would be `1 - 0.75`, which is `0.25`. This relationship is more intuitive and #' better to understand when looking at the plots (using `plot()`). #' #' ## Conditional versus unconditional interpretation of _p_-values and intervals #' #' `p_function()`, and in particular its `plot()` method, aims at re-interpreting #' _p_-values and confidence intervals (better named: _compatibility_ intervals) #' in _unconditional_ terms. Instead of referring to the long-term property and #' repeated trials when interpreting interval estimates (so-called "aleatory #' probability", _Schweder 2018_), and assuming that all underlying assumptions #' are correct and met, `p_function()` interprets _p_-values in a Fisherian way #' as "_continuous_ measure of evidence against the very test hypothesis _and_ #' entire model (all assumptions) used to compute it" #' (*P-Values Are Tough and S-Values Can Help*, lesslikely.com/statistics/s-values; #' see also _Amrhein and Greenland 2022_). #' #' The common definition of p-values can be considered as "conditional" #' interpretation: #' #' _The p-value is the probability of obtaining test results at least as #' extreme as the result actually observed, under the assumption that the #' null hypothesis is correct (Wikipedia)._ #' #' However, this definition or interpretation is inadequate because it only #' refers to the test hypothesis (often the null hypothesis), which is only #' one component of the entire model that is being tested. Thus, #' _Greenland et al. 2022_ suggest an "unconditional" interpretation. #' #' This interpretation as a continuous measure of evidence against the test #' hypothesis and the entire model used to compute it can be seen in the #' figure below (taken from *P-Values Are Tough and S-Values Can Help*, #' lesslikely.com/statistics/s-values). The "conditional" interpretation of #' _p_-values and interval estimates (A) implicitly assumes certain assumptions #' to be true, thus the interpretation is "conditioned" on these assumptions #' (i.e. assumptions are taken as given, only the hypothesis is tested). The #' unconditional interpretation (B), however, questions _all_ these assumptions. #' #' A non-significant p-value could occur because the test hypothesis is false, #' but could also be the result of any of the model assumptions being incorrect. #' #' \if{html}{\cr \figure{unconditional_interpretation.png}{options: alt="Conditional versus unconditional interpretations of P-values"} \cr} #' #' "Emphasizing unconditional interpretations helps avoid overconfident and #' misleading inferences in light of uncertainties about the assumptions used #' to arrive at the statistical results." (_Greenland et al. 2022_). #' #' **Note:** The term "conditional" as used by Rafi and Greenland probably has #' a slightly different meaning than normally. "Conditional" in this notion #' means that all model assumptions are taken as given - it should not be #' confused with terms like "conditional probability". See also _Greenland et al. 2022_ #' for a detailed elaboration on this issue. #' #' In other words, the term compatibility interval emphasizes "the dependence #' of the _p_-value on the assumptions as well as on the data, recognizing that #' _p_<0.05 can arise from assumption violations even if the effect under #' study is null" (_Gelman/Greenland 2019_). #' #' ## Probabilistic interpretation of p-values and compatibility intervals #' #' Schweder (2018) resp. Schweder and Hjort (2016) (and others) argue that #' confidence curves (as produced by `p_function()`) have a valid probabilistic #' interpretation. They distinguish between _aleatory probability_, which #' describes the aleatory stochastic element of a distribution _ex ante_, i.e. #' before the data are obtained. This is the classical interpretation of #' confidence intervals following the Neyman-Pearson school of statistics. #' However, there is also an _ex post_ probability, called _epistemic_ probability, #' for confidence curves. The shift in terminology from _confidence_ intervals #' to _compatibility_ intervals may help emphasizing this interpretation. #' #' In this sense, the probabilistic interpretation of _p_-values and #' compatibility intervals is "conditional" - on the data _and_ model assumptions #' (which is in line with the _"unconditional"_ interpretation in the sense of #' Rafi and Greenland). #' #' Ascribing a probabilistic interpretation to one realized confidence interval #' is possible without repeated sampling of the specific experiment. Important #' is the assumption that a _sampling distribution_ is a good description of the #' variability of the parameter (_Vos and Holbert 2022_). At the core, the #' interpretation of a confidence interval is "I assume that this sampling #' distribution is a good description of the uncertainty of the parameter. If #' that's a good assumption, then the values in this interval are the most #' plausible or compatible with the data". The source of confidence in #' probability statements is the assumption that the selected sampling #' distribution is appropriate. #' #' "The realized confidence distribution is clearly an epistemic probability #' distribution" (_Schweder 2018_). In Bayesian words, compatibility intervals #' (or confidence distributons, or consonance curves) are "posteriors without #' priors" (_Schweder, Hjort, 2003_). #' #' The _p_-value indicates the degree of compatibility of the endpoints of the #' interval at a given confidence level with (1) the observed data and (2) model #' assumptions. The observed point estimate (_p_-value = 1) is the value #' estimated to be _most compatible_ with the data and model assumptions, #' whereas values values far from the observed point estimate (where _p_ #' approaches 0) are least compatible with the data and model assumptions #' (_Schweder and Hjort 2016, pp. 60-61; Amrhein and Greenland 2022_). In this #' regards, _p_-values are statements about _confidence_ or _compatibility_: #' The p-value is not an absolute measure of evidence for a model (such as the #' null/alternative model), it is a continuous measure of the compatibility of #' the observed data with the model used to compute it (_Greenland et al. 2016_, #' _Greenland 2023_). Going one step further, and following _Schweder_, p-values #' can be considered as _epistemic probability_ - "not necessarily of the #' hypothesis being true, but of it _possibly_ being true" (_Schweder 2018_). #' Hence, the interpretation of _p_-values might be guided using #' [`bayestestR::p_to_pd()`]. #' #' ## Probability or compatibility? #' #' We here presented the discussion of p-values and confidence intervals from the #' perspective of two paradigms, one saying that probability statements can be #' made, one saying that interpretation is guided in terms of "compatibility". #' Cox and Hinkley say, "interval estimates cannot be taken as probability #' statements" (_Cox and Hinkley 1979: 208_), which conflicts with the Schweder #' and Hjort confidence distribution school. However, if you view interval #' estimates as being intervals of values being consistent with the data, #' this comes close to the idea of (epistemic) probability. We do not believe that #' these two paradigms contradict or exclude each other. Rather, the aim is to #' emphasize the one point of view or the other, i.e. to place the linguistic #' nuances either on 'compatibility' or 'probability'. #' #' The main take-away is *not* to interpret p-values as dichotomous decisions #' that distinguish between "we found an effect" (statistically significant)" vs. #' "we found no effect" (statistically not significant) (_Altman and Bland 1995_). #' #' ## Compatibility intervals - is their interpretation "conditional" or not? #' #' The fact that the term "conditional" is used in different meanings in #' statistics, is confusing. Thus, we would summarize the (probabilistic) #' interpretation of compatibility intervals as follows: The intervals are built #' from the data _and_ our modeling assumptions. The accuracy of the intervals #' depends on our model assumptions. If a value is outside the interval, that #' might be because (1) that parameter value isn't supported by the data, or (2) #' the modeling assumptions are a poor fit for the situation. When we make bad #' assumptions, the compatibility interval might be too wide or (more commonly #' and seriously) too narrow, making us think we know more about the parameter #' than is warranted. #' #' When we say "there is a 95% chance the true value is in the interval", that is #' a statement of _epistemic probability_ (i.e. description of uncertainty related #' to our knowledge or belief). When we talk about repeated samples or sampling #' distributions, that is referring to _aleatoric_ (physical properties) probability. #' Frequentist inference is built on defining estimators with known _aleatoric_ #' probability properties, from which we can draw _epistemic_ probabilistic #' statements of uncertainty (_Schweder and Hjort 2016_). #' #' ## Functions in the parameters package to check for effect existence and significance #' #' The **parameters** package provides several options or functions to aid #' statistical inference. Beyond `p_function()`, there are, for example: #' - [`equivalence_test()`][equivalence_test.lm], to compute the (conditional) #' equivalence test for frequentist models #' - [`p_significance()`][p_significance.lm], to compute the probability of #' *practical significance*, which can be conceptualized as a unidirectional #' equivalence test #' - the `pd` argument (setting `pd = TRUE`) in `model_parameters()` includes #' a column with the *probability of direction*, i.e. the probability that a #' parameter is strictly positive or negative. See [`bayestestR::p_direction()`] #' for details. If plotting is desired, the [`p_direction()`][p_direction.lm] #' function can be used, together with `plot()`. #' - the `s_value` argument (setting `s_value = TRUE`) in `model_parameters()` #' replaces the p-values with their related _S_-values (*Rafi and Greenland 2020*) #' - finally, it is possible to generate distributions of model coefficients by #' generating bootstrap-samples (setting `bootstrap = TRUE`) or simulating #' draws from model coefficients using [`simulate_model()`]. These samples #' can then be treated as "posterior samples" and used in many functions from #' the **bayestestR** package. #' #' @return A data frame with p-values and compatibility intervals. #' #' @references #' - Altman DG, Bland JM. Absence of evidence is not evidence of absence. BMJ. #' 1995;311(7003):485. \doi{10.1136/bmj.311.7003.485} #' #' - Amrhein V, Greenland S. Discuss practical importance of results based on #' interval estimates and p-value functions, not only on point estimates and #' null p-values. Journal of Information Technology 2022;37:316–20. #' \doi{10.1177/02683962221105904} #' #' - Cox DR, Hinkley DV. 1979. Theoretical Statistics. 6th edition. #' Chapman and Hall/CRC #' #' - Fraser DAS. The P-value function and statistical inference. The American #' Statistician. 2019;73(sup1):135-147. \doi{10.1080/00031305.2018.1556735} #' #' - Gelman A, Greenland S. Are confidence intervals better termed "uncertainty #' intervals"? BMJ (2019)l5381. \doi{10.1136/bmj.l5381} #' #' - Greenland S, Rafi Z, Matthews R, Higgs M. To Aid Scientific Inference, #' Emphasize Unconditional Compatibility Descriptions of Statistics. (2022) #' https://arxiv.org/abs/1909.08583v7 (Accessed November 10, 2022) #' #' - Greenland S, Senn SJ, Rothman KJ, Carlin JB, Poole C, Goodman SN, et al. #' (2016). Statistical tests, P values, confidence intervals, and power: A #' guide to misinterpretations. European Journal of Epidemiology. 31:337-350. #' \doi{10.1007/s10654-016-0149-3} #' #' - Greenland S (2023). Divergence versus decision P-values: A distinction #' worth making in theory and keeping in practice: Or, how divergence P-values #' measure evidence even when decision P-values do not. Scand J Statist, 50(1), #' 54-88. #' #' - Rafi Z, Greenland S. Semantic and cognitive tools to aid statistical #' science: Replace confidence and significance by compatibility and surprise. #' BMC Medical Research Methodology. 2020;20(1):244. \doi{10.1186/s12874-020-01105-9} #' #' - Schweder T. Confidence is epistemic probability for empirical science. #' Journal of Statistical Planning and Inference (2018) 195:116–125. #' \doi{10.1016/j.jspi.2017.09.016} #' #' - Schweder T, Hjort NL. Confidence and Likelihood. Scandinavian Journal of #' Statistics. 2002;29(2):309-332. \doi{10.1111/1467-9469.00285} #' #' - Schweder T, Hjort NL. Frequentist analogues of priors and posteriors. #' In Stigum, B. (ed.), Econometrics and the Philosophy of Economics: Theory #' Data Confrontation in Economics, pp. 285-217. Princeton University Press, #' Princeton, NJ, 2003 #' #' - Schweder T, Hjort NL. Confidence, Likelihood, Probability: Statistical #' inference with confidence distributions. Cambridge University Press, 2016. #' #' - Vos P, Holbert D. Frequentist statistical inference without repeated sampling. #' Synthese 200, 89 (2022). \doi{10.1007/s11229-022-03560-x} #' #' @examplesIf requireNamespace("see") #' model <- lm(Sepal.Length ~ Species, data = iris) #' p_function(model) #' #' model <- lm(mpg ~ wt + as.factor(gear) + am, data = mtcars) #' result <- p_function(model) #' #' # single panels #' plot(result, n_columns = 2) #' #' # integrated plot, the default #' plot(result) #' @export p_function <- function(model, ci_levels = c(0.25, 0.5, 0.75, emph = 0.95), exponentiate = FALSE, effects = "fixed", component = "all", vcov = NULL, vcov_args = NULL, keep = NULL, drop = NULL, verbose = TRUE, ...) { # degrees of freedom dof <- insight::get_df(model, type = "wald") # standard errors se <- standard_error( model, effects = effects, component = component, vcov = vcov, vcov_args = vcov_args )$SE if (is.null(dof) || length(dof) == 0 || .is_chi2_model(model, dof)) { dof <- Inf } x <- do.call(rbind, lapply(seq(0, 1, 0.01), function(i) { suppressMessages(.ci_dof( model, ci = i, dof, effects, component, method = "wald", se = se, vcov = NULL, vcov_args = NULL, verbose = TRUE )) })) # data for plotting out <- x[!is.infinite(x$CI_low) & !is.infinite(x$CI_high), ] out$CI <- round(out$CI, 2) # most plausible value (point estimate) point_estimate <- out$CI_low[which.min(out$CI)] if (!is.null(keep) || !is.null(drop)) { out <- .filter_parameters(out, keep = keep, drop = drop, verbose = verbose ) } # transform non-Gaussian if (isTRUE(exponentiate)) { out$CI_low <- exp(out$CI_low) out$CI_high <- exp(out$CI_high) } # data for p_function ribbon data_ribbon <- datawizard::data_to_long( out, select = c("CI_low", "CI_high"), values_to = "x" ) # data for vertical CI level lines out <- out[out$CI %in% ci_levels, ] out$group <- 1 # emphasize focal hypothesis line emphasize <- which(names(ci_levels) == "emph") if (length(emphasize)) { out$group[out$CI == ci_levels[emphasize]] <- 2 } attr(out, "data") <- data_ribbon attr(out, "point_estimate") <- point_estimate attr(out, "pretty_names") <- suppressWarnings(format_parameters(model, ...)) class(out) <- c("parameters_p_function", "see_p_function", "data.frame") out } #' @rdname p_function #' @export consonance_function <- p_function #' @rdname p_function #' @export confidence_curve <- p_function # methods ---------------------- #' @export plot.parameters_p_function <- function(x, ...) { insight::check_if_installed("see") NextMethod() } #' @rdname p_function #' @export format.parameters_p_function <- function(x, digits = 2, format = NULL, ci_width = NULL, ci_brackets = TRUE, pretty_names = TRUE, ...) { # print dat <- lapply(split(x, x$CI), function(i) { ci <- as.character(i$CI)[1] out <- datawizard::data_rename( i, select = c("CI_low", "CI_high"), replacement = c(sprintf("CI_low_%s", ci), sprintf("CI_high_%s", ci)) ) out$CI <- NULL out$group <- NULL out }) out <- do.call(datawizard::data_merge, list(dat, by = "Parameter")) attr(out, "pretty_names") <- attributes(x)$pretty_names insight::format_table( out, digits = digits, ci_width = ci_width, ci_brackets = ci_brackets, format = format, pretty_names = pretty_names ) } #' @rdname p_function #' @export print.parameters_p_function <- function(x, digits = 2, ci_width = "auto", ci_brackets = TRUE, pretty_names = TRUE, ...) { cat(.print_p_function( x, digits, ci_width, ci_brackets, pretty_names = pretty_names, format = "text", ... )) } # helper ---------- .print_p_function <- function(x, digits = 2, ci_width = "auto", ci_brackets = c("(", ")"), pretty_names = TRUE, format = "html", ...) { # which engine? engine <- .check_format_backend(...) formatted_table <- format( x, digits = digits, format = format, ci_width = ci_width, ci_brackets = ci_brackets, pretty_names = pretty_names, ... ) # set engine for html format if (format == "html" && identical(engine, "tt")) { format <- "tt" } insight::export_table( formatted_table, format = format, caption = "Consonance Function", ... ) } # model <- lm(Sepal.Length ~ Species, data = iris) # for later use: highlight p-value for secific parameter estimate values # stat <- insight::get_statistic(model) # se <- parameters::standard_error(model) # estimate to test against - compute p-value for specific estimate # null_estimate <- 1.5 # p <- 2 * stats::pt(abs(stat$Statistic[3]) - (null_estimate / se$SE[3]), df = 147, lower.tail = FALSE) # bayestestR::p_to_pd(p) parameters/R/methods_mclust.R0000644000176200001440000000066714717111737016015 0ustar liggesusers#' @export model_parameters.Mclust <- function(model, data = NULL, clusters = NULL, ...) { if (is.null(data)) data <- as.data.frame(model$data) if (is.null(clusters)) clusters <- model$classification params <- .cluster_centers_params(data, clusters, ...) attr(params, "model") <- model attr(params, "type") <- "mixture" attr(params, "title") <- "Gaussian finite mixture model fitted by EM algorithm" params } parameters/R/methods_lm.R0000644000176200001440000000162114736731407015110 0ustar liggesusers# lm: .lm, .summary.lm # .lm --------------------- #' @export p_value.lm <- p_value.default #' @export ci.lm <- function(x, ci = 0.95, method = "residual", ...) { .ci_generic(model = x, ci = ci, method = method, ...) } # .summary.lm --------------------- #' @export standard_error.summary.lm <- function(model, ...) { cs <- stats::coef(model) data.frame( Parameter = rownames(cs), SE = as.vector(cs[, 2]), stringsAsFactors = FALSE, row.names = NULL ) } #' @export p_value.summary.lm <- function(model, ...) { cs <- stats::coef(model) data.frame( Parameter = rownames(cs), p = as.vector(cs[, 4]), stringsAsFactors = FALSE, row.names = NULL ) } #' @export ci.summary.lm <- function(x, ci = 0.95, method = "residual", ...) { .ci_generic(model = x, ci = ci, method = method, dof = insight::get_df(x), ...) } parameters/R/methods_gjrm.R0000644000176200001440000000404314507235543015434 0ustar liggesusers#' @export model_parameters.SemiParBIV <- function(model, ci = 0.95, bootstrap = FALSE, iterations = 1000, standardize = NULL, exponentiate = FALSE, p_adjust = NULL, keep = NULL, drop = NULL, verbose = TRUE, ...) { out <- .model_parameters_generic( model = model, ci = ci, bootstrap = bootstrap, iterations = iterations, component = "all", merge_by = c("Parameter", "Component"), standardize = standardize, exponentiate = exponentiate, keep_parameters = keep, drop_parameters = drop, p_adjust = p_adjust, ... ) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(model)) out } #' @export p_value.SemiParBIV <- function(model, ...) { s <- summary(model) s <- insight::compact_list(s[startsWith(names(s), "tableP")]) params <- do.call(rbind, lapply(seq_along(s), function(i) { out <- as.data.frame(s[[i]]) out$Parameter <- rownames(out) out$Component <- paste0("Equation", i) out })) colnames(params)[4] <- "p" rownames(params) <- NULL insight::text_remove_backticks(params[c("Parameter", "p", "Component")], verbose = FALSE) } #' @export standard_error.SemiParBIV <- function(model, ...) { s <- summary(model) s <- insight::compact_list(s[startsWith(names(s), "tableP")]) params <- do.call(rbind, lapply(seq_along(s), function(i) { out <- as.data.frame(s[[i]]) out$Parameter <- rownames(out) out$Component <- paste0("Equation", i) out })) colnames(params)[2] <- "SE" rownames(params) <- NULL insight::text_remove_backticks(params[c("Parameter", "SE", "Component")], verbose = FALSE) } parameters/R/ci_generic.R0000644000176200001440000001306215073732442015041 0ustar liggesusers# generic function for CI calculation .ci_generic <- function( model, ci = 0.95, method = "wald", dof = NULL, effects = "fixed", component = "all", vcov = NULL, vcov_args = NULL, verbose = TRUE, ... ) { # check method if (is.null(method)) { method <- "wald" } method <- tolower(method) # fmt: skip method <- insight::validate_argument( method, c( "wald", "ml1", "betwithin", "kr", "satterthwaite", "kenward", "boot", "profile", "residual", "normal" ) ) effects <- insight::validate_argument(effects, c("fixed", "random", "all")) # fmt: skip component <- insight::validate_argument( component, c( "all", "conditional", "zi", "zero_inflated", "dispersion", "precision", "scale", "smooth_terms", "full", "marginal" ) ) if (method == "ml1") { # nolint return(ci_ml1(model, ci = ci)) } else if (method == "betwithin") { return(ci_betwithin(model, ci = ci)) } else if (method == "satterthwaite") { return(ci_satterthwaite(model, ci = ci)) } else if (method %in% c("kenward", "kr")) { return(ci_kenward(model, ci = ci)) } # default CIs follow here (methods wald, boot, profile, residual, normal) out <- lapply(ci, function(i) { .ci_dof( model = model, ci = i, dof = dof, effects = effects, component = component, method = method, vcov = vcov, vcov_args = vcov_args, verbose = verbose, ... ) }) out <- do.call(rbind, out) row.names(out) <- NULL out } #' @keywords internal .ci_dof <- function( model, ci, dof, effects, component, method = "wald", se = NULL, vcov = NULL, vcov_args = NULL, verbose = TRUE, ... ) { # need parameters to calculate the CIs if (inherits(model, "emmGrid")) { params <- insight::get_parameters( model, effects = effects, component = component, merge_parameters = TRUE ) } else { params <- insight::get_parameters( model, effects = effects, component = component, verbose = FALSE ) } # check if all estimates are non-NA params <- .check_rank_deficiency(model, params, verbose = FALSE) # for polr, we need to fix parameter names params$Parameter <- gsub("Intercept: ", "", params$Parameter, fixed = TRUE) # validation check... if (is.null(method)) { method <- "wald" } method <- tolower(method) # Fist, we want standard errors for parameters # -------------------------------------------- # if we have adjusted SE, e.g. from kenward-roger, don't recompute # standard errors to save time... if (is.null(se)) { if (!is.null(vcov) || isTRUE(list(...)[["robust"]])) { # robust (HC) standard errors? stderror <- standard_error( model, component = component, vcov = vcov, vcov_args = vcov_args, verbose = verbose, ... ) } else { # normal standard errors, including small-sample approximations stderror <- switch( method, kenward = se_kenward(model), kr = se_kenward(model), satterthwaite = se_satterthwaite(model), standard_error(model, component = component) ) } # if we have a non-empty stderror, use it if (insight::is_empty_object(stderror)) { return(NULL) } # filter non-matching parameters, resp. sort stderror and parameters, # so both have the identical order of values if ( nrow(stderror) != nrow(params) || !all(stderror$Parameter %in% params$Parameter) || !all(order(stderror$Parameter) == order(params$Parameter)) ) { params <- stderror <- merge(stderror, params, sort = FALSE) } se <- stderror$SE } # Next, we need degrees of freedom # -------------------------------- # check if we have a valid dof vector if (is.null(dof)) { # residual df dof <- insight::get_df(x = model, type = method, verbose = FALSE) # make sure we have a value for degrees of freedom if (is.null(dof) || length(dof) == 0 || .is_chi2_model(model, dof)) { dof <- Inf } else if (length(dof) > nrow(params)) { # filter non-matching parameters dof <- dof[seq_len(nrow(params))] } } # Now we can calculate CIs # ------------------------ alpha <- (1 + ci) / 2 fac <- suppressWarnings(stats::qt(alpha, df = dof)) out <- cbind(CI_low = params$Estimate - se * fac, CI_high = params$Estimate + se * fac) out <- as.data.frame(out) out$CI <- ci out$Parameter <- params$Parameter out <- out[c("Parameter", "CI", "CI_low", "CI_high")] if ("Component" %in% names(params)) { out$Component <- params$Component } if ("Effects" %in% names(params) && effects != "fixed") { out$Effects <- params$Effects } if ("Response" %in% names(params)) { out$Response <- params$Response } if ("Group" %in% names(params) && inherits(model, c("lcmm", "externX", "externVar"))) { out$Group <- params$Group } # for cox-panel models, we have non-linear parameters with NA coefficient, # but test statistic and p-value - don't check for NA estimates in this case if (anyNA(params$Estimate) && !inherits(model, "coxph.penal")) { out[stats::complete.cases(out), ] } else { out } } .is_chi2_model <- function(model, dof) { statistic <- insight::find_statistic(model) (all(dof == 1) && identical(statistic, "chi-squared statistic")) } parameters/R/methods_hglm.R0000644000176200001440000001534114761570351015430 0ustar liggesusers# #' @export # p_value.hglm <- function(model, ...) { # stat <- insight::get_statistic(model) # .data_frame( # Parameter = stat$Parameter, # p = 2 * stats::pnorm(abs(stat$Statistic), lower.tail = FALSE) # ) # } # #' @export # ci.hglm <- function(x, ci = 0.95, ...) { # .ci_generic(model = x, ci = ci, ...) # } #' @export model_parameters.hglm <- function(model, ci = 0.95, ci_method = NULL, bootstrap = FALSE, iterations = 1000, exponentiate = FALSE, effects = "all", component = "all", p_adjust = NULL, include_info = getOption("parameters_info", FALSE), keep = NULL, drop = NULL, verbose = TRUE, ...) { # which components to return? effects <- match.arg(effects, choices = c("fixed", "random", "all")) component <- match.arg(component, choices = c("all", "conditional", "dispersion")) # fixed effects mp <- model_parameters.default( model, ci = ci, ci_method = ci_method, bootstrap = bootstrap, effects = "fixed", component = "conditional", iterations = iterations, exponentiate = exponentiate, p_adjust = p_adjust, include_info = include_info, keep = keep, drop = drop, verbose = verbose, ... ) # hglm has a special structure, so we add random effects and dispersion # manually here... if (effects %in% c("all", "random")) { re_params <- insight::get_parameters(model, effects = "random", component = "conditional") re_se <- standard_error(model, effects = "random", component = "conditional") re_ci <- ci(model, effects = "random", component = "conditional") # bind all results re_params <- cbind( re_params[c("Parameter", "Estimate")], re_se["SE"], re_ci[c("CI", "CI_low", "CI_high")] ) # no values for statistic, df and p re_params$t <- re_params$df_error <- re_params$p <- NA # add effects-columns mp$Effects <- "fixed" re_params$Effects <- "random" # renaming colnames(re_params)[colnames(re_params) == "Estimate"] <- "Coefficient" # bind together mp <- rbind(mp, re_params) } # add dispersion model has_dispersion <- !is.null(insight::find_formula(model, verbose = FALSE)$dispersion) if (has_dispersion && component %in% c("all", "dispersion")) { disp_params <- insight::get_parameters(model, effects = "fixed", component = "dispersion") disp_se <- standard_error(model, effects = "fixed", component = "dispersion") disp_ci <- ci(model, effects = "fixed", component = "dispersion") # bind all results disp_params <- cbind( disp_params[c("Parameter", "Estimate")], disp_se["SE"], disp_ci[c("CI", "CI_low", "CI_high")] ) # no values for statistic, df and p disp_params$t <- disp_params$df_error <- disp_params$p <- NA # add effects-columns if (is.null(mp$Effects)) { mp$Effects <- "fixed" } disp_params$Effects <- "fixed" # add component-columns mp$Component <- "conditional" disp_params$Component <- "dispersion" # renaming colnames(disp_params)[colnames(disp_params) == "Estimate"] <- "Coefficient" # bind together mp <- rbind(mp, disp_params) } mp } #' @export standard_error.hglm <- function(model, effects = "fixed", component = "conditional", verbose = TRUE, ...) { effects <- match.arg(effects, choices = c("all", "fixed", "random")) component <- match.arg(component, choices = c("all", "conditional", "dispersion")) f <- insight::find_formula(model, verbose = FALSE) if (component == "dispersion" && is.null(f$dispersion)) { if (verbose) { insight::format_alert("No standard errors found for model's dispersion parameters.") } return(NULL) } # validation check, make sure we have a dispersion component if (component == "all" && is.null(f$dispersion)) { compomnent <- "conditional" } s <- summary(model) if (effects == "fixed") { se <- s$FixCoefMat } else if (effects == "random") { se <- s$RandCoefMat } else { se <- c(s$FixCoefMat, s$RandCoefMat) } out <- .data_frame( Parameter = row.names(se), SE = as.vector(se[, 2]) ) # dispersion component? if (effects != "random" && component %in% c("dispersion", "all")) { se <- s$SummVC1 disp <- .data_frame( Parameter = row.names(se), SE = as.vector(se[, 2]), Component = "dispersion" ) if (component == "dispersion") { out <- disp } else { out$Component <- "conditional" out <- rbind(out, disp) } } out } #' @export ci.hglm <- function(x, ci = 0.95, method = "wald", dof = NULL, effects = "fixed", component = "conditional", verbose = TRUE, ...) { effects <- match.arg(effects, choices = c("all", "fixed", "random")) component <- match.arg(component, choices = c("all", "conditional", "dispersion")) # fixed effects ----------------- if (effects %in% c("fixed", "all")) { out <- .ci_generic( x, ci = ci, method = method, dof = dof, effects = "fixed", component = component, verbose = verbose, ... ) } # add random effects ----------------- if (effects %in% c("random", "all")) { se <- standard_error(x, effects = "random", component = "conditional")$SE .ci_re <- .ci_dof( x, ci = ci, method = method, dof = dof, effects = "random", component = "conditional", se = se, verbose = verbose, ... ) if (effects == "all") { out <- rbind(out, .ci_re) } else { out <- .ci_re } } out } #' @export p_value.hglm <- function(model, dof = NULL, method = NULL, verbose = TRUE, ...) { dots <- list(...) dots$component <- NULL fun_args <- list( model, dof = dof, component = "conditional", method = method, verbose = verbose ) fun_args <- c(fun_args, dots) do.call("p_value.default", fun_args) } parameters/R/n_clusters.R0000644000176200001440000002432214736731407015141 0ustar liggesusers#' @title Find number of clusters in your data #' @name n_clusters #' #' @description #' Similarly to [`n_factors()`] for factor / principal component analysis, #' `n_clusters()` is the main function to find out the optimal numbers of clusters #' present in the data based on the maximum consensus of a large number of #' methods. #' #' Essentially, there exist many methods to determine the optimal number of #' clusters, each with pros and cons, benefits and limitations. The main #' `n_clusters` function proposes to run all of them, and find out the number of #' clusters that is suggested by the majority of methods (in case of ties, it #' will select the most parsimonious solution with fewer clusters). #' #' Note that we also implement some specific, commonly used methods, like the #' Elbow or the Gap method, with their own visualization functionalities. See #' the examples below for more details. #' #' @param x A data frame. #' @param standardize Standardize the dataframe before clustering (default). #' @param include_factors Logical, if `TRUE`, factors are converted to numerical #' values in order to be included in the data for determining the number of #' clusters. By default, factors are removed, because most methods that #' determine the number of clusters need numeric input only. #' @param package Package from which methods are to be called to determine the #' number of clusters. Can be `"all"` or a vector containing #' `"easystats"`, `"NbClust"`, `"mclust"`, and `"M3C"`. #' @param fast If `FALSE`, will compute 4 more indices (sets `index = "allong"` #' in `NbClust`). This has been deactivated by default as it is #' computationally heavy. #' @param n_max Maximal number of clusters to test. #' @param clustering_function,gap_method Other arguments passed to other #' functions. `clustering_function` is used by `fviz_nbclust()` and #' can be `kmeans`, `cluster::pam`, `cluster::clara`, `cluster::fanny`, and #' more. `gap_method` is used by `cluster::maxSE` to extract the optimal #' numbers of clusters (see its `method` argument). #' @param method,min_size,eps_n,eps_range Arguments for DBSCAN algorithm. #' @param distance_method The distance method (passed to [`dist()`]). Used by #' algorithms relying on the distance matrix, such as `hclust` or `dbscan`. #' @param hclust_method The hierarchical clustering method (passed to [`hclust()`]). #' @param nbclust_method The clustering method (passed to `NbClust::NbClust()` #' as `method`). #' @inheritParams model_parameters.default #' #' #' #' @note There is also a [`plot()`-method](https://easystats.github.io/see/articles/parameters.html) implemented in the [**see**-package](https://easystats.github.io/see/). #' #' @examples #' \donttest{ #' library(parameters) #' #' # The main 'n_clusters' function =============================== #' if (require("mclust", quietly = TRUE) && require("NbClust", quietly = TRUE) && #' require("cluster", quietly = TRUE) && require("see", quietly = TRUE)) { #' n <- n_clusters(iris[, 1:4], package = c("NbClust", "mclust")) # package can be "all" #' n #' summary(n) #' as.data.frame(n) # Duration is the time elapsed for each method in seconds #' plot(n) #' #' # The following runs all the method but it significantly slower #' # n_clusters(iris[1:4], standardize = FALSE, package = "all", fast = FALSE) #' } #' } #' @export n_clusters <- function(x, standardize = TRUE, include_factors = FALSE, package = c("easystats", "NbClust", "mclust"), fast = TRUE, nbclust_method = "kmeans", n_max = 10, ...) { if (all(package == "all")) { package <- c("easystats", "NbClust", "mclust", "M3C") } x <- .prepare_data_clustering(x, include_factors = include_factors, standardize = standardize, ...) out <- data.frame() if ("easystats" %in% tolower(package)) { out <- rbind(out, .n_clusters_easystats(x, n_max = n_max, ...)) } if ("nbclust" %in% tolower(package)) { out <- rbind(out, .n_clusters_NbClust(x, fast = fast, nbclust_method = nbclust_method, n_max = n_max, ...)) } if ("mclust" %in% tolower(package)) { out <- rbind(out, .n_clusters_mclust(x, n_max = n_max, ...)) } if ("M3C" %in% tolower(package)) { out <- rbind(out, .n_clusters_M3C(x, n_max = n_max, fast = fast)) } # Drop Nans out <- out[!is.na(out$n_Clusters), ] # Error if no solution if (nrow(out) == 0) { insight::format_error("No complete solution was found. Please try again with more methods.") } # Clean out <- out[order(out$n_Clusters), ] # Arrange by n clusters row.names(out) <- NULL # Reset row index out$Method <- as.character(out$Method) # Remove duplicate methods starting with the smallest dupli <- NULL for (i in seq_len(nrow(out))) { if (i > 1 && out[i, "Method"] %in% out$Method[1:i - 1]) { dupli <- c(dupli, i) } } if (!is.null(dupli)) { out <- out[-dupli, ] } # Add summary by_clusters <- .data_frame( n_Clusters = as.numeric(unique(out$n_Clusters)), n_Methods = as.numeric(by(out, as.factor(out$n_Clusters), function(out) n <- nrow(out))) ) attr(out, "summary") <- by_clusters attr(out, "n") <- min(as.numeric(as.character( by_clusters[by_clusters$n_Methods == max(by_clusters$n_Methods), "n_Clusters"] ))) class(out) <- c("n_clusters", "see_n_clusters", class(out)) out } #' @keywords internal .n_clusters_mclust <- function(x, n_max = 10, ...) { insight::check_if_installed("mclust") t0 <- Sys.time() mclustBIC <- mclust::mclustBIC # this is needed as it is internally required by the following function BIC <- mclust::mclustBIC(x, G = 1:n_max, verbose = FALSE) # Extract the best solutions as shown in summary(BIC) out <- strsplit(names(unclass(summary(BIC))), split = ",", fixed = TRUE) # Get separated vectors models <- as.character(sapply(out, function(x) x[[1]])) n <- as.numeric(sapply(out, function(x) x[[2]])) .data_frame( n_Clusters = n, Method = paste0("Mixture (", models, ")"), Package = "mclust", Duration = as.numeric(difftime(Sys.time(), t0, units = "secs")) ) } # Methods ----------------------------------------------------------------- #' @keywords internal .n_clusters_easystats <- function(x, n_max = 10, ...) { elb <- n_clusters_elbow(x, preprocess = FALSE, n_max = n_max, ...) sil <- n_clusters_silhouette(x, preprocess = FALSE, n_max = n_max, ...) gap1 <- n_clusters_gap(x, preprocess = FALSE, gap_method = "firstSEmax", n_max = n_max, ...) gap2 <- n_clusters_gap(x, preprocess = FALSE, gap_method = "globalSEmax", n_max = n_max, ...) .data_frame( n_Clusters = c( attributes(elb)$n, attributes(sil)$n, attributes(gap1)$n, attributes(gap2)$n ), Method = c("Elbow", "Silhouette", "Gap_Maechler2012", "Gap_Dudoit2002"), Package = "easystats", Duration = c( attributes(elb)$duration, attributes(sil)$duration, attributes(gap1)$duration, attributes(gap2)$duration ) ) } #' @keywords internal .n_clusters_NbClust <- function(x, fast = TRUE, nbclust_method = "kmeans", n_max = 10, indices = "all", ...) { insight::check_if_installed("NbClust") if (all(indices == "all")) { indices <- c( "kl", "Ch", "Hartigan", "CCC", "Scott", "Marriot", "trcovw", "Tracew", "Friedman", "Rubin", "Cindex", "DB", "Silhouette", "Duda", "Pseudot2", "Beale", "Ratkowsky", "Ball", "PtBiserial", "Frey", "Mcclain", "Dunn", "SDindex", "SDbw", "gap", "gamma", "gplus", "tau" ) # c("hubert", "dindex") are graphical methods } if (fast) { indices <- indices[!indices %in% c("gap", "gamma", "gplus", "tau")] } out <- data.frame() for (idx in indices) { t0 <- Sys.time() n <- tryCatch( expr = { .catch_warnings(NbClust::NbClust( x, index = tolower(idx), method = nbclust_method, max.nc = n_max, ... )) }, error = function(e) { NULL } ) if (!is.null(n)) { # Catch and print potential warnings w <- "" if (!is.null(n$warnings)) { w <- paste0("\n - ", unlist(n$warnings), collapse = "") insight::format_warning(paste0("For ", idx, " index (NbClust):", w)) } # Don't merge results if convergence issue if (!grepl("did not converge in", w, fixed = TRUE)) { out <- rbind(out, .data_frame( n_Clusters = n$out$Best.nc[["Number_clusters"]], Method = idx, Package = "NbClust", Duration = as.numeric(difftime(Sys.time(), t0, units = "secs")) )) } } } out } #' @keywords internal .n_clusters_M3C <- function(x, n_max = 10, fast = TRUE, ...) { if (!requireNamespace("M3C", quietly = TRUE)) { # nolint insight::format_error( "Package `M3C` required for this function to work. Please install it by first running `remotes::install_github('https://github.com/crj32/M3C')` (the package is not on CRAN)." ) # Not on CRAN (but on github and bioconductor) } data <- data.frame(t(x)) colnames(data) <- paste0("x", seq(1, ncol(data))) # Add columns names as required by the package t0 <- Sys.time() out <- M3C::M3C(data, method = 2, maxK = n_max, removeplots = TRUE, silent = TRUE) out <- .data_frame( n_Clusters = out$scores[which.min(out$scores$PCSI), "K"], Method = "Consensus clustering algorithm (penalty term)", Package = "M3C", Duration = as.numeric(difftime(Sys.time(), t0, units = "secs")) ) # Monte Carlo Version (Super slow) if (isFALSE(fast)) { t0 <- Sys.time() out2 <- M3C::M3C(data, method = 1, maxK = n_max, removeplots = TRUE, silent = TRUE) out <- rbind( out, .data_frame( n_Clusters = out2$scores[which.max(out2$scores$RCSI), "K"], Method = "Consensus clustering algorithm (Monte Carlo)", Package = "M3C", Duration = as.numeric(difftime(Sys.time(), t0, units = "secs")) ) ) } out } parameters/R/methods_sem.R0000644000176200001440000000137714507235543015270 0ustar liggesusers#' @export model_parameters.sem <- model_parameters.default #' @export standard_error.sem <- function(model, ...) { if (!.is_semLme(model)) { return(NULL) } if (is.null(model$se)) { insight::format_alert( "Model has no standard errors. Please fit model again with bootstrapped standard errors." ) return(NULL) } .data_frame( Parameter = names(model$se), SE = unname(model$se) ) } #' @export p_value.sem <- function(model, ...) { if (!.is_semLme(model)) { return(NULL) } stat <- insight::get_statistic(model) if (is.null(stat)) { return(NULL) } .data_frame( Parameter = stat$Parameter, p = 2 * stats::pnorm(abs(stat$Statistic), lower.tail = FALSE) ) } parameters/R/methods_averaging.R0000644000176200001440000000526714761570351016452 0ustar liggesusers# classes: .averaging #################### .averaging #' @export model_parameters.averaging <- function(model, ci = 0.95, component = "conditional", exponentiate = FALSE, p_adjust = NULL, include_info = getOption("parameters_info", FALSE), keep = NULL, drop = NULL, verbose = TRUE, ...) { component <- insight::validate_argument(component, c("conditional", "full")) out <- .model_parameters_generic( model = model, ci = ci, merge_by = "Parameter", exponentiate = exponentiate, component = component, p_adjust = p_adjust, keep_parameters = keep, drop_parameters = drop, include_info = include_info, ... ) attr(out, "object_name") <- insight::safe_deparse_symbol(substitute(model)) out } #' @export standard_error.averaging <- function(model, component = "conditional", ...) { component <- insight::validate_argument(component, c("conditional", "full")) params <- insight::get_parameters(model, component = component) if (component == "full") { s <- summary(model)$coefmat.full } else { s <- summary(model)$coefmat.subset } .data_frame( Parameter = .remove_backticks_from_string(params$Parameter), SE = as.vector(s[, 3]) ) } #' @export p_value.averaging <- function(model, component = "conditional", ...) { component <- insight::validate_argument(component, c("conditional", "full")) params <- insight::get_parameters(model, component = component) if (component == "full") { s <- summary(model)$coefmat.full } else { s <- summary(model)$coefmat.subset } # to data frame s <- as.data.frame(s) # do we have a p-value column based on t? pvcn <- which(colnames(s) == "Pr(>|t|)") # if not, do we have a p-value column based on z? if (length(pvcn) == 0) { pvcn <- which(colnames(s) == "Pr(>|z|)") } # if not, default to ncol if (length(pvcn) == 0) { if (ncol(s) > 4) { pvcn <- 5 } else { pvcn <- 4 } } .data_frame( Parameter = .remove_backticks_from_string(params$Parameter), p = as.vector(s[, pvcn]) ) } #' @export ci.averaging <- function(x, ci = 0.95, component = "conditional", ...) { component <- insight::validate_argument(component, c("conditional", "full")) .ci_generic(model = x, ci = ci, dof = Inf, component = component) } parameters/R/format_order.R0000644000176200001440000000303514477616760015447 0ustar liggesusers#' Order (first, second, ...) formatting #' #' Format order. #' #' @param order value or vector of orders. #' @param textual Return number as words. If `FALSE`, will run [insight::format_value()]. #' @param ... Arguments to be passed to [insight::format_value()] if `textual` is `FALSE`. #' #' @return A formatted string. #' @examples #' format_order(2) #' format_order(8) #' format_order(25, textual = FALSE) #' @export format_order <- function(order, textual = TRUE, ...) { if (textual) { order <- insight::format_number(order) parts <- unlist(strsplit(order, " ", fixed = TRUE)) parts[length(parts)] <- switch(utils::tail(parts, 1), one = "first", two = "second", three = "third", four = "fourth", five = "fifth", six = "sixth", seven = "seventh", eight = "eigth", nine = "ninth" ) out <- paste(parts, collapse = " ") } else { number <- insight::format_value(order, digits = 0, ...) last <- substr(number, nchar(number), nchar(number)) last_two <- substr(number, nchar(number) - 1, nchar(number)) # exceptions if (last_two %in% c(11, 12, 13)) { out <- paste0(number, "th") } else { out <- paste0( number, switch(last, `1` = "st", `2` = "nd", `3` = "rd", `4` = "th", `5` = "th", `6` = "th", `7` = "th", `8` = "th", `9` = "th", `0` = "th" ) ) } } out } parameters/R/ci_satterthwaite.R0000644000176200001440000000103515073732442016312 0ustar liggesusers#' @rdname p_value_satterthwaite #' @export ci_satterthwaite <- function(model, ci = 0.95, ...) { df_satter <- dof_satterthwaite(model) if (inherits(model, "glmmTMB")) { component <- "conditional" } else { component <- "all" } out <- lapply(ci, function(i) { .ci_dof( model = model, ci = i, dof = df_satter, effects = "fixed", component = component, method = "satterthwaite", ... ) }) out <- do.call(rbind, out) row.names(out) <- NULL out } parameters/R/methods_lrm.R0000644000176200001440000000275114736731407015277 0ustar liggesusers## from rms / rmsb package # model parameters ------------- #' @export model_parameters.blrm <- model_parameters.bayesQR # standard error ------------- #' @export standard_error.lrm <- function(model, ...) { se <- sqrt(diag(stats::vcov(model))) # psm-models returns vcov-matrix w/o dimnames if (is.null(names(se))) names(se) <- names(stats::coef(model)) .data_frame( Parameter = .remove_backticks_from_string(names(se)), SE = as.vector(se) ) } #' @export standard_error.ols <- standard_error.lrm #' @export standard_error.rms <- standard_error.lrm #' @export standard_error.psm <- standard_error.lrm # p-values ----------------------- #' @export p_value.lrm <- function(model, ...) { stat <- insight::get_statistic(model) # Issue: 697: typically the degrees of freedom are the same for every # observation, but the value is repeated. This poses problems in multiple # imputation models with Hmisc when we get more df values than parameters. dof <- insight::get_df(model, type = "wald") dfu <- unique(dof) if (length(dfu) == 1) { dof <- dfu } p <- 2 * stats::pt(abs(stat$Statistic), df = dof, lower.tail = FALSE) .data_frame( Parameter = .remove_backticks_from_string(stat$Parameter), p = as.vector(p) ) } #' @export p_value.ols <- p_value.lrm #' @export p_value.rms <- p_value.lrm #' @export p_value.psm <- p_value.lrm #' @export p_value.blrm <- p_value.BFBayesFactor parameters/R/format_df_adjust.R0000644000176200001440000000175714477616760016310 0ustar liggesusers#' Format the name of the degrees-of-freedom adjustment methods #' #' Format the name of the degrees-of-freedom adjustment methods. #' #' @param method Name of the method. #' @param approx_string,dof_string Suffix added to the name of the method in #' the returned string. #' #' @examples #' library(parameters) #' #' format_df_adjust("kenward") #' format_df_adjust("kenward", approx_string = "", dof_string = " DoF") #' @return A formatted string. #' @export format_df_adjust <- function(method, approx_string = "-approximated", dof_string = " degrees of freedom") { method <- tolower(method) out <- switch(method, kr = , `kenward-rogers` = , `kenward-roger` = , kenward = "Kenward-Roger", ml1 = "m-l-1", betwithin = , bw = "Between-within", fit = "Residual", boot = "Bootstrapped", insight::format_capitalize(method) ) paste0(out, approx_string, dof_string) } parameters/R/methods_hclust.R0000644000176200001440000001133115066721001015763 0ustar liggesusers#' Parameters from Cluster Models (k-means, ...) #' #' Format cluster models obtained for example by [kmeans()]. #' #' @param model Cluster model. #' @param data A data frame. #' @param clusters A vector with clusters assignments (must be same length as #' rows in data). #' @param ... Arguments passed to or from other methods. #' #' @examplesIf all(insight::check_if_installed(c("dbscan", "cluster", "fpc"), quietly = TRUE)) #' \donttest{ #' # #' # K-means ------------------------------- #' model <- kmeans(iris[1:4], centers = 3) #' rez <- model_parameters(model) #' rez #' #' # Get clusters #' predict(rez) #' #' # Clusters centers in long form #' attributes(rez)$means #' #' # Between and Total Sum of Squares #' attributes(rez)$Sum_Squares_Total #' attributes(rez)$Sum_Squares_Between #' #' # #' # Hierarchical clustering (hclust) --------------------------- #' data <- iris[1:4] #' model <- hclust(dist(data)) #' clusters <- cutree(model, 3) #' #' rez <- model_parameters(model, data, clusters) #' rez #' #' # Get clusters #' predict(rez) #' #' # Clusters centers in long form #' attributes(rez)$means #' #' # Between and Total Sum of Squares #' attributes(rez)$Total_Sum_Squares #' attributes(rez)$Between_Sum_Squares #' #' # #' # K-Medoids (PAM and HPAM) ============== #' model <- cluster::pam(iris[1:4], k = 3) #' model_parameters(model) #' #' model <- fpc::pamk(iris[1:4], criterion = "ch") #' model_parameters(model) #' #' # DBSCAN --------------------------- #' model <- dbscan::dbscan(iris[1:4], eps = 1.45, minPts = 10) #' #' rez <- model_parameters(model, iris[1:4]) #' rez #' #' # Get clusters #' predict(rez) #' #' # Clusters centers in long form #' attributes(rez)$means #' #' # Between and Total Sum of Squares #' attributes(rez)$Sum_Squares_Total #' attributes(rez)$Sum_Squares_Between #' #' # HDBSCAN #' model <- dbscan::hdbscan(iris[1:4], minPts = 10) #' model_parameters(model, iris[1:4]) #' } #' @export model_parameters.hclust <- function(model, data = NULL, clusters = NULL, ...) { if (is.null(data)) { insight::format_error( "This function requires the data used to compute the clustering to be provided via `data` as it is not accessible from the clustering object itself." ) } if (is.null(clusters)) { insight::format_error( "This function requires a vector of clusters assignments of same length as data to be passed, as it is not contained in the clustering object itself." ) } params <- cluster_centers(data, clusters, ...) # Long means means <- datawizard::reshape_longer(params, select = 4:ncol(params), values_to = "Mean", names_to = "Variable" ) attr(params, "variance") <- attributes(params)$variance attr(params, "Sum_Squares_Between") <- attributes(params)$Sum_Squares_Between attr(params, "Sum_Squares_Total") <- attributes(params)$Sum_Squares_Total attr(params, "means") <- means attr(params, "model") <- model attr(params, "scores") <- clusters attr(params, "type") <- "hclust" class(params) <- c("parameters_clusters", class(params)) params } #' @export model_parameters.pvclust <- function(model, data = NULL, clusters = NULL, ci = 0.95, ...) { if (is.null(data)) { insight::format_error( "This function requires the data used to compute the clustering to be provided via `data` as it is not accessible from the clustering object itself." ) } if (is.null(clusters)) { clusters <- .model_parameters_pvclust_clusters(model, data, ci)$Cluster } params <- .cluster_centers_params(data, clusters, ...) attr(params, "model") <- model attr(params, "type") <- "pvclust" attr(params, "title") <- "Bootstrapped Hierarchical Clustering (PVCLUST)" params } # Utils ------------------------------------------------------------------- #' @keywords internal .model_parameters_pvclust_clusters <- function(model, data, ci = 0.95) { insight::check_if_installed("pvclust") rez <- pvclust::pvpick(model, alpha = ci, pv = "si") # Assign clusters out <- data.frame() for (cluster in seq_along(rez$clusters)) { out <- rbind(out, data.frame(Cluster = cluster, Row = rez$clusters[[cluster]], stringsAsFactors = FALSE), make.row.names = FALSE, stringsAsFactors = FALSE) } # Add points not in significant clusters remaining_rows <- row.names(data)[!row.names(data) %in% out$Row] if (length(remaining_rows) > 0) { out <- rbind(out, data.frame(Cluster = 0, Row = remaining_rows, stringsAsFactors = FALSE), make.row.names = FALSE, stringsAsFactors = FALSE) } # Reorder according to original order of rows out <- out[order(match(out$Row, row.names(data))), ] row.names(out) <- NULL out } parameters/R/extract_random_parameters.R0000644000176200001440000001263515057525051020212 0ustar liggesusers.extract_random_parameters <- function(model, ...) { UseMethod(".extract_random_parameters") } .extract_random_parameters.merMod <- function(model, ci = 0.95, effects = "random", ...) { insight::check_if_installed("lme4") out <- as.data.frame(lme4::ranef(model, condVar = TRUE), stringsAsFactors = FALSE) colnames(out) <- c("Group", "Parameter", "Level", "Coefficient", "SE") # coerce to character out$Parameter <- as.character(out$Parameter) out$Level <- as.character(out$Level) out$Group <- as.character(out$Group) out$Effects <- "random" if (length(ci) == 1) { fac <- stats::qnorm((1 + ci) / 2) out$CI_low <- out$Coefficient - fac * out$SE out$CI_high <- out$Coefficient + fac * out$SE ci_cols <- c("CI_low", "CI_high") } else { ci_cols <- NULL for (i in ci) { fac <- stats::qnorm((1 + i) / 2) ci_low <- paste0("CI_low_", i) ci_high <- paste0("CI_high_", i) out[[ci_low]] <- out$Coefficient - fac * out$SE out[[ci_high]] <- out$Coefficient + fac * out$SE ci_cols <- c(ci_cols, ci_low, ci_high) } } stat_column <- gsub("-statistic", "", insight::find_statistic(model), fixed = TRUE) # to match rbind out[[stat_column]] <- NA out$df_error <- NA out$p <- NA out <- out[c("Parameter", "Level", "Coefficient", "SE", ci_cols, stat_column, "df_error", "p", "Effects", "Group")] if (effects == "random") { out[c(stat_column, "df_error", "p")] <- NULL } out } .extract_random_parameters.glmmTMB <- function(model, ci = 0.95, effects = "random", component = "conditional", ...) { insight::check_if_installed("lme4") out <- as.data.frame(lme4::ranef(model, condVar = TRUE)) colnames(out) <- c("Component", "Group", "Parameter", "Level", "Coefficient", "SE") # filter component out <- switch(component, zi = , zero_inflated = out[out$Component == "zi", ], cond = , conditional = out[out$Component == "cond", ], disp = , dispersion = out[out$Component == "disp", ], out ) # coerce to character out$Parameter <- as.character(out$Parameter) out$Level <- as.character(out$Level) out$Group <- as.character(out$Group) out$Effects <- "random" # rename out$Component[out$Component == "zi"] <- "zero_inflated" out$Component[out$Component == "cond"] <- "conditional" out$Component[out$Component == "disp"] <- "dispersion" if (length(ci) == 1) { fac <- stats::qnorm((1 + ci) / 2) out$CI_low <- out$Coefficient - fac * out$SE out$CI_high <- out$Coefficient + fac * out$SE ci_cols <- c("CI_low", "CI_high") } else { ci_cols <- NULL for (i in ci) { fac <- stats::qnorm((1 + i) / 2) ci_low <- paste0("CI_low_", i) ci_high <- paste0("CI_high_", i) out[[ci_low]] <- out$Coefficient - fac * out$SE out[[ci_high]] <- out$Coefficient + fac * out$SE ci_cols <- c(ci_cols, ci_low, ci_high) } } stat_column <- gsub("-statistic", "", insight::find_statistic(model), fixed = TRUE) # to match rbind out[[stat_column]] <- NA out$df_error <- NA out$p <- NA out <- out[c( "Parameter", "Level", "Coefficient", "SE", ci_cols, stat_column, "df_error", "p", "Component", "Effects", "Group" )] if (effects == "random") { out[c(stat_column, "df_error", "p")] <- NULL } out } .extract_random_parameters.MixMod <- function(model, ...) { NULL } .extract_random_parameters.coxme <- function(model, ci = NULL, effects = "random", ...) { insight::check_if_installed(c("lme4", "coxme")) # extract random effects re_grp <- insight::find_random(model, split_nested = TRUE, flatten = TRUE) do.call(rbind, lapply(re_grp, function(grp) { # coerce to data frame out <- as.data.frame(lme4::ranef(model)[[grp]], stringsAsFactors = FALSE) colnames(out)[1] <- "(Intercept)" # reshape, to have a long format out$.grp <- unique(insight::get_data(model)[[grp]]) out <- datawizard::reshape_longer(out, select = -".grp") out$grpvar <- grp # rename columns colnames(out) <- c("Level", "Parameter", "Coefficient", "Group") out$SE <- NA # re-order columns out <- out[c("Group", "Parameter", "Level", "Coefficient", "SE")] out$Parameter[out$Parameter == "Intercept"] <- "(Intercept)" # sort out <- datawizard::data_arrange(out, c("Group", "Parameter", "Level")) # coerce to character out$Parameter <- as.character(out$Parameter) out$Level <- as.character(out$Level) out$Group <- as.character(out$Group) out$Effects <- "random" out$CI_low <- out$CI_high <- NA ci_cols <- c("CI_low", "CI_high") stat_column <- gsub("-statistic", "", insight::find_statistic(model), fixed = TRUE) # to match rbind out[[stat_column]] <- NA out$df_error <- NA out$p <- NA out <- out[c("Parameter", "Level", "Coefficient", "SE", ci_cols, stat_column, "df_error", "p", "Effects", "Group")] if (effects == "random") { out[c(stat_column, "df_error", "p")] <- NULL } out })) } parameters/vignettes/0000755000176200001440000000000015111301674014424 5ustar liggesusersparameters/vignettes/overview_of_vignettes.Rmd0000644000176200001440000000442115060045621021513 0ustar liggesusers--- title: "Overview of Vignettes" output: rmarkdown::html_vignette: vignette: > %\VignetteIndexEntry{Overview of Vignettes} \usepackage[utf8]{inputenc} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console --- ```{r message=FALSE, warning=FALSE, include=FALSE} library(knitr) knitr::opts_chunk$set( echo = TRUE, collapse = TRUE, warning = FALSE, message = FALSE, comment = "#>", eval = TRUE ) ``` All package vignettes are available at [https://easystats.github.io/parameters/](https://easystats.github.io/parameters/). ## Function Overview * [Function Reference](https://easystats.github.io/parameters/reference/index.html) ## Description of Parameters * [Summary of Model Parameters](https://easystats.github.io/parameters/articles/model_parameters.html) * [Parameter and Model Standardization](https://easystats.github.io/parameters/articles/standardize_parameters_effsize.html) * [Robust Estimation of Standard Errors, Confidence Intervals, and p-values](https://easystats.github.io/parameters/articles/model_parameters_robust.html) * [Model Parameters for Multiply Imputed Repeated Analyses](https://easystats.github.io/parameters/articles/model_parameters_mice.html) * [Analysing Longitudinal or Panel Data](https://easystats.github.io/parameters/articles/demean.html) ## Formatting, Printing and Plotting * [Formatting Model Parameters](https://easystats.github.io/parameters/articles/model_parameters_formatting.html) * [Printing Model Parameters](https://easystats.github.io/parameters/articles/model_parameters_print.html) * [Plotting functions](https://easystats.github.io/see/articles/parameters.html) ## Dimension Reduction and Clustering * [Feature Reduction (PCA, cMDS, ICA, ...)](https://easystats.github.io/parameters/articles/parameters_reduction.html) * [Structural Models (EFA, CFA, SEM, ...)](https://easystats.github.io/parameters/articles/efa_cfa.html) * [Selection of Model Parameters](https://easystats.github.io/parameters/articles/parameters_selection.html) * [Clustering with easystats](https://easystats.github.io/parameters/articles/clustering.html) ## Plotting Functions * [Plotting functions in the **see** package](https://easystats.github.io/see/articles/parameters.html) parameters/data/0000755000176200001440000000000013641625623013336 5ustar liggesusersparameters/data/qol_cancer.RData0000644000176200001440000000646413641625623016373 0ustar liggesusers]iW~LL j6%T2OE4рFH\p]q7-%wKLY,*e[,5_uyHYUv9sν=E۴~k4uP(/7} rpFl_(_|2)/l۽ ^-qpk^pp#otaaN]corxøn={9w8pN98mowxG.w;{pÇ>:sÅw'>i8<:pswx%tCB9|txao9|a#q:?qx?sb~a2g8\a3.qaeV;\YvpxW:\p5uxykur(:D%ءPq8ߡPs;</tXpðCaË^/ux78lprW8p+^pFMi'Go Gb'[7 \?P8Y|?8 ?K/01o9?K3O=gړ{z ? ױ8 \q7gz$fͣyb#?Cu ؟l\~Aҗw.1x<>ZCI9̋#ğu BпX6ۚ$@;4/ɬizn;\Ȓ'o=|?d|Ï|(WދEח_[xv+c:`^b+ޫ>?ڕ|u>wwk??_2^IICǣ?^:ȸ7 ؤ~WﶿN<N>乮oO^'a|*~}x4ٸǩ]—8x Q}wvځNj^/yI^]o#Ŏi>'xXܮ%ͳ^^&Ruq>;S'dFKLkfS7O`wjuIyuYYѣ|7Zy{X5%1 NeVuOrN~sKC7!OSoh?kX7$>-<ϝYqO GcI'|z}JƩOS" Xپ"I>!yt j/2}ao^+6CsQæseW;Aie *6t\j]|W~l~kmv=E٪Bкoa[Aաƕ_Ikٔ8 nԚH{Km xVvE2nv.њB2%}cf'_$8ƚNA} -;C-0F[R y,dF@%BܦdĵxftO9׀SF[_< Ec,Q1yԥ,ԍOZ:\~ŝ:{q>o3#!!N~Ua5. vByB71}9pM}Q?qĭ]]ٞOO>׆wk9b[9%?U"9?+| Ik!:zYr/=^yW}Q=/΂wu@Ƴ }U3Ghk8p z,ԥ_tcxB< s5zy;^^>=-{& Yct@YOx&!FmcgO%˒z<[s]Kg+-Ek;hR^48ܵGb|w}(VwX.oys+⬧Pd%qLmU~Ͱ;rm3+<?HtH'~[ *E? )z|.o jЖ_Z ;I?)eOމ8؋Cz|~mQ%]o>uuget%\錼XfGlm@{uUѵfJA7!^WTx$ _&oY.-&*jWB6]#zy3Nt)1oF[3V[? vO*ր=- v~Ɉ[?=.| ;t[g=w}n}'+;?c |EיIhV""}yÒu8׎ AV1e14f& U2f%7#Ɇ/^A~@Q0;9^C_fVax qGm̧}ۿ _a۶rjSOJ(^tTGz "T605;=:3 )U93NO7s,WtF~ 32tz*~cY[ i=x#x0;8K6Gc|{A>/$~ݵ%]5~']x/ x 3QO#.A/&/0g?prנ}ޓoCy{4JzCG۱d^*}qL]VcV.u@R<J1o;/dWAl+s/egڢRyO*LKwG+ < !_"Kˇn{e9&},u.G_T8tطn>WLp=!%qd4~zo0:R}-+KMGLx>%3- ~-kX78^9F/  ՜E궯~FY/^1d\ D<_)U?DēorA4 yzuQyXQN)J)e'sG!>WDTT E>t[xn`F=%)?ȣSyn4G"b@R7)C}}¬o+B|MߪWiN{#q%KwY~ ׮|T2? ӓ0SaWmݧ_(y9?&oEk\" 9W-7~C՟~8^i[#z}P"4N_$%%wJC;T7a*ǩ#jswV|NtO>Ծay;vva13 ז87tNx,l^̓scJ(|fhTs6,tμ=OilX'^ػQѳ;3gK"7Y8 %Z''1 v޳BBD,ppʟQm9-parameters/NAMESPACE0000644000176200001440000010105015073732442013640 0ustar liggesusers# Generated by roxygen2: do not edit by hand S3method(as.data.frame,VarCorr.coxme) S3method(as.data.frame,VarCorr.lme) S3method(as.double,n_clusters) S3method(as.double,n_factors) S3method(as.numeric,n_clusters) S3method(as.numeric,n_factors) S3method(bootstrap_model,default) S3method(bootstrap_model,glmmTMB) S3method(bootstrap_model,merMod) S3method(bootstrap_model,nestedLogit) S3method(bootstrap_parameters,bootstrap_model) S3method(bootstrap_parameters,default) S3method(ci,BBmm) S3method(ci,BBreg) S3method(ci,DirichletRegModel) S3method(ci,HLfit) S3method(ci,MixMod) S3method(ci,Sarlm) S3method(ci,averaging) S3method(ci,bayesx) S3method(ci,betamfx) S3method(ci,betaor) S3method(ci,betareg) S3method(ci,bracl) S3method(ci,brmultinom) S3method(ci,btergm) S3method(ci,clm2) S3method(ci,clmm2) S3method(ci,coeftest) S3method(ci,complmrob) S3method(ci,crq) S3method(ci,default) S3method(ci,deltaMethod) S3method(ci,effectsize_table) S3method(ci,estimate_contrasts) S3method(ci,estimate_means) S3method(ci,estimate_slopes) S3method(ci,fixest_multi) S3method(ci,flac) S3method(ci,flic) S3method(ci,gam) S3method(ci,gamm) S3method(ci,gamm4) S3method(ci,geeglm) S3method(ci,glht) S3method(ci,glm) S3method(ci,glmgee) S3method(ci,glmm) S3method(ci,glmmTMB) S3method(ci,hglm) S3method(ci,hurdle) S3method(ci,ivFixed) S3method(ci,ivprobit) S3method(ci,lavaan) S3method(ci,list) S3method(ci,lm) S3method(ci,lm_robust) S3method(ci,lme) S3method(ci,lmodel2) S3method(ci,logistf) S3method(ci,logitmfx) S3method(ci,logitor) S3method(ci,lqm) S3method(ci,lqmm) S3method(ci,margins) S3method(ci,maxLik) S3method(ci,mediate) S3method(ci,merMod) S3method(ci,merModList) S3method(ci,meta_bma) S3method(ci,meta_fixed) S3method(ci,meta_random) S3method(ci,metaplus) S3method(ci,mhurdle) S3method(ci,mipo) S3method(ci,mira) S3method(ci,mixor) S3method(ci,mjoint) S3method(ci,mle) S3method(ci,mle2) S3method(ci,mlm) S3method(ci,mmrm) S3method(ci,mmrm_fit) S3method(ci,mmrm_tmb) S3method(ci,model_fit) S3method(ci,multinom) S3method(ci,multinom_weightit) S3method(ci,negbin) S3method(ci,negbinirr) S3method(ci,negbinmfx) S3method(ci,nestedLogit) S3method(ci,nlrq) S3method(ci,ordinal_weightit) S3method(ci,parameters_standardized) S3method(ci,pgmm) S3method(ci,phyloglm) S3method(ci,phylolm) S3method(ci,poissonirr) S3method(ci,poissonmfx) S3method(ci,polr) S3method(ci,probitmfx) S3method(ci,rma) S3method(ci,rq) S3method(ci,rqs) S3method(ci,rqss) S3method(ci,scam) S3method(ci,selection) S3method(ci,summary.lm) S3method(ci,svyglm) S3method(ci,svyolr) S3method(ci,systemfit) S3method(ci,varest) S3method(ci,zerocount) S3method(ci,zeroinfl) S3method(ci_kenward,default) S3method(ci_kenward,glmmTMB) S3method(cluster_discrimination,cluster_analysis) S3method(cluster_discrimination,default) S3method(cluster_performance,dbscan) S3method(cluster_performance,hclust) S3method(cluster_performance,kmeans) S3method(cluster_performance,parameters_clusters) S3method(convert_efa_to_cfa,fa) S3method(convert_efa_to_cfa,fa.ci) S3method(convert_efa_to_cfa,parameters_efa) S3method(convert_efa_to_cfa,parameters_pca) S3method(display,compare_parameters) S3method(display,equivalence_test_lm) S3method(display,parameters_brms_meta) S3method(display,parameters_efa) S3method(display,parameters_efa_summary) S3method(display,parameters_model) S3method(display,parameters_omega) S3method(display,parameters_omega_summary) S3method(display,parameters_p_function) S3method(display,parameters_pca) S3method(display,parameters_pca_summary) S3method(display,parameters_sem) S3method(display,parameters_simulate) S3method(display,parameters_standardized) S3method(equivalence_test,MixMod) S3method(equivalence_test,estimate_contrasts) S3method(equivalence_test,estimate_means) S3method(equivalence_test,estimate_slopes) S3method(equivalence_test,feis) S3method(equivalence_test,felm) S3method(equivalence_test,gee) S3method(equivalence_test,glm) S3method(equivalence_test,glmmTMB) S3method(equivalence_test,gls) S3method(equivalence_test,hurdle) S3method(equivalence_test,lm) S3method(equivalence_test,lme) S3method(equivalence_test,merMod) S3method(equivalence_test,mixed) S3method(equivalence_test,parameters_model) S3method(equivalence_test,parameters_simulate_model) S3method(equivalence_test,rma) S3method(equivalence_test,wbm) S3method(equivalence_test,zeroinfl) S3method(factor_analysis,data.frame) S3method(factor_analysis,matrix) S3method(factor_scores,fa) S3method(factor_scores,omega) S3method(factor_scores,parameters_efa) S3method(factor_scores,parameters_omega) S3method(format,compare_parameters) S3method(format,equivalence_test_lm) S3method(format,p_calibrate) S3method(format,parameters_brms_meta) S3method(format,parameters_coef) S3method(format,parameters_model) S3method(format,parameters_p_function) S3method(format,parameters_sem) S3method(format,parameters_simulate) S3method(format,parameters_standardized) S3method(format_parameters,default) S3method(format_parameters,emm_list) S3method(format_parameters,glmm) S3method(format_parameters,margins) S3method(format_parameters,mediate) S3method(format_parameters,merModList) S3method(format_parameters,meta_bma) S3method(format_parameters,meta_fixed) S3method(format_parameters,meta_random) S3method(format_parameters,mira) S3method(format_parameters,mle2) S3method(format_parameters,parameters_model) S3method(format_parameters,rma) S3method(model_parameters,AKP) S3method(model_parameters,Anova.mlm) S3method(model_parameters,BFBayesFactor) S3method(model_parameters,BGGM) S3method(model_parameters,DirichletRegModel) S3method(model_parameters,FAMD) S3method(model_parameters,Gam) S3method(model_parameters,HLfit) S3method(model_parameters,MCMCglmm) S3method(model_parameters,Mclust) S3method(model_parameters,MixMod) S3method(model_parameters,PCA) S3method(model_parameters,PMCMR) S3method(model_parameters,SemiParBIV) S3method(model_parameters,afex_aov) S3method(model_parameters,anova) S3method(model_parameters,anova.rms) S3method(model_parameters,aov) S3method(model_parameters,aovlist) S3method(model_parameters,asym) S3method(model_parameters,averaging) S3method(model_parameters,bamlss) S3method(model_parameters,bayesQR) S3method(model_parameters,bcplm) S3method(model_parameters,befa) S3method(model_parameters,betamfx) S3method(model_parameters,betaor) S3method(model_parameters,betareg) S3method(model_parameters,bfsl) S3method(model_parameters,bifeAPEs) S3method(model_parameters,blavaan) S3method(model_parameters,blrm) S3method(model_parameters,bootstrap_model) S3method(model_parameters,bracl) S3method(model_parameters,brmsfit) S3method(model_parameters,brmultinom) S3method(model_parameters,censReg) S3method(model_parameters,cgam) S3method(model_parameters,clm2) S3method(model_parameters,clmm) S3method(model_parameters,clmm2) S3method(model_parameters,coeftest) S3method(model_parameters,compare.loo) S3method(model_parameters,comparisons) S3method(model_parameters,coxme) S3method(model_parameters,cpglmm) S3method(model_parameters,data.frame) S3method(model_parameters,dbscan) S3method(model_parameters,default) S3method(model_parameters,deltaMethod) S3method(model_parameters,dep.effect) S3method(model_parameters,draws) S3method(model_parameters,emmGrid) S3method(model_parameters,emm_list) S3method(model_parameters,epi.2by2) S3method(model_parameters,estimate_contrasts) S3method(model_parameters,estimate_means) S3method(model_parameters,estimate_slopes) S3method(model_parameters,externVar) S3method(model_parameters,externX) S3method(model_parameters,fa) S3method(model_parameters,fa.ci) S3method(model_parameters,feglm) S3method(model_parameters,fitdistr) S3method(model_parameters,fixest) S3method(model_parameters,fixest_multi) S3method(model_parameters,flac) S3method(model_parameters,flic) S3method(model_parameters,gam) S3method(model_parameters,gamlss) S3method(model_parameters,gamm) S3method(model_parameters,glht) S3method(model_parameters,glimML) S3method(model_parameters,glm) S3method(model_parameters,glmm) S3method(model_parameters,glmmTMB) S3method(model_parameters,glmx) S3method(model_parameters,hclust) S3method(model_parameters,hdbscan) S3method(model_parameters,hglm) S3method(model_parameters,hkmeans) S3method(model_parameters,htest) S3method(model_parameters,hurdle) S3method(model_parameters,hypotheses) S3method(model_parameters,item_omega) S3method(model_parameters,ivFixed) S3method(model_parameters,ivprobit) S3method(model_parameters,kmeans) S3method(model_parameters,lavaan) S3method(model_parameters,lcmm) S3method(model_parameters,list) S3method(model_parameters,lm_robust) S3method(model_parameters,lme) S3method(model_parameters,lmodel2) S3method(model_parameters,logistf) S3method(model_parameters,logitmfx) S3method(model_parameters,logitor) S3method(model_parameters,lqm) S3method(model_parameters,lqmm) S3method(model_parameters,maov) S3method(model_parameters,marginaleffects) S3method(model_parameters,margins) S3method(model_parameters,maxLik) S3method(model_parameters,maxim) S3method(model_parameters,mblogit) S3method(model_parameters,mcmc) S3method(model_parameters,mcmc.list) S3method(model_parameters,mcp1) S3method(model_parameters,mcp2) S3method(model_parameters,med1way) S3method(model_parameters,mediate) S3method(model_parameters,merMod) S3method(model_parameters,merModList) S3method(model_parameters,meta_bma) S3method(model_parameters,meta_fixed) S3method(model_parameters,meta_random) S3method(model_parameters,metaplus) S3method(model_parameters,mhurdle) S3method(model_parameters,mipo) S3method(model_parameters,mira) S3method(model_parameters,mixed) S3method(model_parameters,mixor) S3method(model_parameters,mjoint) S3method(model_parameters,mle) S3method(model_parameters,mle2) S3method(model_parameters,mlm) S3method(model_parameters,mmrm) S3method(model_parameters,mmrm_fit) S3method(model_parameters,mmrm_tmb) S3method(model_parameters,model_fit) S3method(model_parameters,multinom) S3method(model_parameters,multinom_weightit) S3method(model_parameters,mvord) S3method(model_parameters,negbin) S3method(model_parameters,negbinirr) S3method(model_parameters,negbinmfx) S3method(model_parameters,nestedLogit) S3method(model_parameters,omega) S3method(model_parameters,onesampb) S3method(model_parameters,ordinal_weightit) S3method(model_parameters,osrt) S3method(model_parameters,pairwise.htest) S3method(model_parameters,pam) S3method(model_parameters,parameters_efa) S3method(model_parameters,parameters_pca) S3method(model_parameters,pb2) S3method(model_parameters,pgmm) S3method(model_parameters,poissonirr) S3method(model_parameters,poissonmfx) S3method(model_parameters,polr) S3method(model_parameters,predictions) S3method(model_parameters,principal) S3method(model_parameters,probitmfx) S3method(model_parameters,pvclust) S3method(model_parameters,ridgelm) S3method(model_parameters,rlmerMod) S3method(model_parameters,rma) S3method(model_parameters,robtab) S3method(model_parameters,rqs) S3method(model_parameters,rqss) S3method(model_parameters,scam) S3method(model_parameters,selection) S3method(model_parameters,sem) S3method(model_parameters,seqanova.svyglm) S3method(model_parameters,slopes) S3method(model_parameters,stanfit) S3method(model_parameters,stanmvreg) S3method(model_parameters,stanreg) S3method(model_parameters,summary_emm) S3method(model_parameters,survfit) S3method(model_parameters,svy2lme) S3method(model_parameters,svyglm) S3method(model_parameters,svyolr) S3method(model_parameters,svytable) S3method(model_parameters,systemfit) S3method(model_parameters,t1way) S3method(model_parameters,trendPMCMR) S3method(model_parameters,trimcibt) S3method(model_parameters,varest) S3method(model_parameters,vgam) S3method(model_parameters,wbgee) S3method(model_parameters,wbm) S3method(model_parameters,wmcpAKP) S3method(model_parameters,yuen) S3method(model_parameters,zcpglm) S3method(model_parameters,zerocount) S3method(model_parameters,zeroinfl) S3method(model_parameters,zoo) S3method(p_calibrate,default) S3method(p_calibrate,numeric) S3method(p_direction,coxph) S3method(p_direction,feis) S3method(p_direction,felm) S3method(p_direction,gee) S3method(p_direction,glm) S3method(p_direction,glmmTMB) S3method(p_direction,gls) S3method(p_direction,hurdle) S3method(p_direction,lm) S3method(p_direction,lme) S3method(p_direction,merMod) S3method(p_direction,mixed) S3method(p_direction,rma) S3method(p_direction,svyglm) S3method(p_direction,wbm) S3method(p_direction,zeroinfl) S3method(p_significance,coxph) S3method(p_significance,feis) S3method(p_significance,felm) S3method(p_significance,gee) S3method(p_significance,glm) S3method(p_significance,glmmTMB) S3method(p_significance,gls) S3method(p_significance,hurdle) S3method(p_significance,lm) S3method(p_significance,lme) S3method(p_significance,merMod) S3method(p_significance,mixed) S3method(p_significance,parameters_model) S3method(p_significance,rma) S3method(p_significance,svyglm) S3method(p_significance,wbm) S3method(p_significance,zeroinfl) S3method(p_value,BBmm) S3method(p_value,BBreg) S3method(p_value,BFBayesFactor) S3method(p_value,BGGM) S3method(p_value,DirichletRegModel) S3method(p_value,Gam) S3method(p_value,HLfit) S3method(p_value,LORgee) S3method(p_value,MCMCglmm) S3method(p_value,Sarlm) S3method(p_value,SemiParBIV) S3method(p_value,aareg) S3method(p_value,anova) S3method(p_value,aov) S3method(p_value,aovlist) S3method(p_value,averaging) S3method(p_value,bamlss) S3method(p_value,bayesQR) S3method(p_value,bayesx) S3method(p_value,bcplm) S3method(p_value,betamfx) S3method(p_value,betaor) S3method(p_value,betareg) S3method(p_value,bife) S3method(p_value,blavaan) S3method(p_value,blrm) S3method(p_value,bracl) S3method(p_value,brmsfit) S3method(p_value,brmultinom) S3method(p_value,btergm) S3method(p_value,censReg) S3method(p_value,cgam) S3method(p_value,clm2) S3method(p_value,clmm2) S3method(p_value,coeftest) S3method(p_value,complmrob) S3method(p_value,coxme) S3method(p_value,coxph) S3method(p_value,coxr) S3method(p_value,cpglm) S3method(p_value,cpglmm) S3method(p_value,crch) S3method(p_value,crq) S3method(p_value,data.frame) S3method(p_value,default) S3method(p_value,deltaMethod) S3method(p_value,draws) S3method(p_value,eglm) S3method(p_value,emmGrid) S3method(p_value,emm_list) S3method(p_value,externVar) S3method(p_value,externX) S3method(p_value,feglm) S3method(p_value,fixest_multi) S3method(p_value,flac) S3method(p_value,flexsurvreg) S3method(p_value,flic) S3method(p_value,gam) S3method(p_value,gamlss) S3method(p_value,gamm) S3method(p_value,gamm4) S3method(p_value,gee) S3method(p_value,geeglm) S3method(p_value,glht) S3method(p_value,glimML) S3method(p_value,glmgee) S3method(p_value,glmm) S3method(p_value,glmx) S3method(p_value,gls) S3method(p_value,gmnl) S3method(p_value,hglm) S3method(p_value,htest) S3method(p_value,hurdle) S3method(p_value,ivFixed) S3method(p_value,ivprobit) S3method(p_value,ivreg) S3method(p_value,lavaan) S3method(p_value,lcmm) S3method(p_value,list) S3method(p_value,lm) S3method(p_value,lm_robust) S3method(p_value,lme) S3method(p_value,lmodel2) S3method(p_value,logistf) S3method(p_value,logitmfx) S3method(p_value,logitor) S3method(p_value,lqm) S3method(p_value,lqmm) S3method(p_value,lrm) S3method(p_value,margins) S3method(p_value,maxLik) S3method(p_value,mblogit) S3method(p_value,mediate) S3method(p_value,merMod) S3method(p_value,metaplus) S3method(p_value,mhurdle) S3method(p_value,mipo) S3method(p_value,mira) S3method(p_value,mixor) S3method(p_value,mjoint) S3method(p_value,mle2) S3method(p_value,mlm) S3method(p_value,mmrm) S3method(p_value,mmrm_fit) S3method(p_value,mmrm_tmb) S3method(p_value,model_fit) S3method(p_value,multinom) S3method(p_value,multinom_weightit) S3method(p_value,mvord) S3method(p_value,negbin) S3method(p_value,negbinirr) S3method(p_value,negbinmfx) S3method(p_value,nestedLogit) S3method(p_value,nlrq) S3method(p_value,numeric) S3method(p_value,ols) S3method(p_value,ordinal_weightit) S3method(p_value,pggls) S3method(p_value,pglm) S3method(p_value,plm) S3method(p_value,poissonirr) S3method(p_value,poissonmfx) S3method(p_value,polr) S3method(p_value,probitmfx) S3method(p_value,psm) S3method(p_value,riskRegression) S3method(p_value,rlm) S3method(p_value,rlmerMod) S3method(p_value,rma) S3method(p_value,rms) S3method(p_value,robmixglm) S3method(p_value,rq) S3method(p_value,rqs) S3method(p_value,rqss) S3method(p_value,scam) S3method(p_value,selection) S3method(p_value,sem) S3method(p_value,speedlm) S3method(p_value,stanreg) S3method(p_value,summary.lm) S3method(p_value,survreg) S3method(p_value,svy2lme) S3method(p_value,svyglm) S3method(p_value,svyglm.nb) S3method(p_value,svyglm.zip) S3method(p_value,svyolr) S3method(p_value,systemfit) S3method(p_value,tobit) S3method(p_value,truncreg) S3method(p_value,varest) S3method(p_value,vgam) S3method(p_value,vglm) S3method(p_value,wbgee) S3method(p_value,wbm) S3method(p_value,zcpglm) S3method(p_value,zerocount) S3method(p_value,zeroinfl) S3method(plot,cluster_analysis) S3method(plot,cluster_analysis_summary) S3method(plot,compare_parameters) S3method(plot,equivalence_test_lm) S3method(plot,n_clusters) S3method(plot,n_clusters_dbscan) S3method(plot,n_clusters_elbow) S3method(plot,n_clusters_gap) S3method(plot,n_clusters_hclust) S3method(plot,n_clusters_silhouette) S3method(plot,n_factors) S3method(plot,parameters_brms_meta) S3method(plot,parameters_efa) S3method(plot,parameters_model) S3method(plot,parameters_p_function) S3method(plot,parameters_pca) S3method(plot,parameters_sem) S3method(plot,parameters_simulate) S3method(predict,cluster_analysis) S3method(predict,cluster_meta) S3method(predict,kmeans) S3method(predict,parameters_clusters) S3method(predict,parameters_efa) S3method(predict,parameters_pca) S3method(predict,parameters_sem) S3method(principal_components,data.frame) S3method(principal_components,lm) S3method(principal_components,merMod) S3method(print,cfa_model) S3method(print,cluster_analysis) S3method(print,cluster_discrimination) S3method(print,compare_parameters) S3method(print,equivalence_test_lm) S3method(print,n_clusters) S3method(print,n_clusters_dbscan) S3method(print,n_clusters_elbow) S3method(print,n_clusters_gap) S3method(print,n_clusters_hclust) S3method(print,n_clusters_silhouette) S3method(print,n_factors) S3method(print,p_calibrate) S3method(print,p_direction_lm) S3method(print,p_significance_lm) S3method(print,parameters_brms_meta) S3method(print,parameters_clusters) S3method(print,parameters_coef) S3method(print,parameters_da) S3method(print,parameters_efa) S3method(print,parameters_efa_summary) S3method(print,parameters_loadings) S3method(print,parameters_model) S3method(print,parameters_omega) S3method(print,parameters_omega_summary) S3method(print,parameters_p_function) S3method(print,parameters_pca) S3method(print,parameters_pca_summary) S3method(print,parameters_random) S3method(print,parameters_sem) S3method(print,parameters_simulate) S3method(print,parameters_standardized) S3method(print_html,compare_parameters) S3method(print_html,equivalence_test_lm) S3method(print_html,parameters_brms_meta) S3method(print_html,parameters_efa) S3method(print_html,parameters_efa_summary) S3method(print_html,parameters_model) S3method(print_html,parameters_p_function) S3method(print_html,parameters_pca) S3method(print_html,parameters_pca_summary) S3method(print_html,parameters_sem) S3method(print_html,parameters_simulate) S3method(print_html,parameters_standardized) S3method(print_md,compare_parameters) S3method(print_md,equivalence_test_lm) S3method(print_md,parameters_brms_meta) S3method(print_md,parameters_efa) S3method(print_md,parameters_efa_summary) S3method(print_md,parameters_model) S3method(print_md,parameters_omega) S3method(print_md,parameters_omega_summary) S3method(print_md,parameters_p_function) S3method(print_md,parameters_pca) S3method(print_md,parameters_pca_summary) S3method(print_md,parameters_sem) S3method(print_md,parameters_simulate) S3method(print_md,parameters_standardized) S3method(reduce_parameters,data.frame) S3method(reduce_parameters,lm) S3method(reduce_parameters,merMod) S3method(reshape_loadings,data.frame) S3method(reshape_loadings,parameters_efa) S3method(se_kenward,default) S3method(se_kenward,glmmTMB) S3method(se_satterthwaite,default) S3method(select_parameters,lm) S3method(select_parameters,merMod) S3method(simulate_model,LORgee) S3method(simulate_model,MixMod) S3method(simulate_model,betamfx) S3method(simulate_model,betaor) S3method(simulate_model,betareg) S3method(simulate_model,biglm) S3method(simulate_model,bracl) S3method(simulate_model,brmultinom) S3method(simulate_model,censReg) S3method(simulate_model,cglm) S3method(simulate_model,clm) S3method(simulate_model,clm2) S3method(simulate_model,clmm2) S3method(simulate_model,coxme) S3method(simulate_model,coxph) S3method(simulate_model,cpglm) S3method(simulate_model,cpglmm) S3method(simulate_model,crch) S3method(simulate_model,crq) S3method(simulate_model,default) S3method(simulate_model,feglm) S3method(simulate_model,feis) S3method(simulate_model,fixest) S3method(simulate_model,fixest_multi) S3method(simulate_model,flac) S3method(simulate_model,flexsurvreg) S3method(simulate_model,flic) S3method(simulate_model,gam) S3method(simulate_model,gamlss) S3method(simulate_model,gamm) S3method(simulate_model,gee) S3method(simulate_model,geeglm) S3method(simulate_model,glimML) S3method(simulate_model,glm) S3method(simulate_model,glmRob) S3method(simulate_model,glmmTMB) S3method(simulate_model,glmmadmb) S3method(simulate_model,glmrob) S3method(simulate_model,glmx) S3method(simulate_model,gls) S3method(simulate_model,hurdle) S3method(simulate_model,iv_robust) S3method(simulate_model,ivreg) S3method(simulate_model,list) S3method(simulate_model,lm) S3method(simulate_model,lmRob) S3method(simulate_model,lm_robust) S3method(simulate_model,lme) S3method(simulate_model,lmrob) S3method(simulate_model,logistf) S3method(simulate_model,lrm) S3method(simulate_model,merMod) S3method(simulate_model,mhurdle) S3method(simulate_model,mixor) S3method(simulate_model,mlm) S3method(simulate_model,model_fit) S3method(simulate_model,multinom) S3method(simulate_model,mvord) S3method(simulate_model,nestedLogit) S3method(simulate_model,nlrq) S3method(simulate_model,ols) S3method(simulate_model,plm) S3method(simulate_model,polr) S3method(simulate_model,psm) S3method(simulate_model,rms) S3method(simulate_model,rq) S3method(simulate_model,selection) S3method(simulate_model,speedglm) S3method(simulate_model,speedlm) S3method(simulate_model,survreg) S3method(simulate_model,svyglm.nb) S3method(simulate_model,svyglm.zip) S3method(simulate_model,tobit) S3method(simulate_model,truncreg) S3method(simulate_model,varest) S3method(simulate_model,vgam) S3method(simulate_model,vglm) S3method(simulate_model,zerocount) S3method(simulate_model,zeroinfl) S3method(simulate_parameters,default) S3method(simulate_parameters,glmmTMB) S3method(simulate_parameters,hurdle) S3method(simulate_parameters,mblogit) S3method(simulate_parameters,mlm) S3method(simulate_parameters,multinom) S3method(simulate_parameters,nestedLogit) S3method(simulate_parameters,varest) S3method(simulate_parameters,zerocount) S3method(simulate_parameters,zeroinfl) S3method(sort,parameters_efa) S3method(sort,parameters_pca) S3method(sort_parameters,data.frame) S3method(sort_parameters,default) S3method(standard_error,BBmm) S3method(standard_error,BBreg) S3method(standard_error,DirichletRegModel) S3method(standard_error,HLfit) S3method(standard_error,LORgee) S3method(standard_error,MCMCglmm) S3method(standard_error,MixMod) S3method(standard_error,Sarlm) S3method(standard_error,SemiParBIV) S3method(standard_error,aareg) S3method(standard_error,anova) S3method(standard_error,aov) S3method(standard_error,aovlist) S3method(standard_error,averaging) S3method(standard_error,bamlss) S3method(standard_error,bayesx) S3method(standard_error,betamfx) S3method(standard_error,betaor) S3method(standard_error,betareg) S3method(standard_error,bfsl) S3method(standard_error,bife) S3method(standard_error,biglm) S3method(standard_error,blavaan) S3method(standard_error,bracl) S3method(standard_error,brmsfit) S3method(standard_error,brmultinom) S3method(standard_error,btergm) S3method(standard_error,censReg) S3method(standard_error,cgam) S3method(standard_error,character) S3method(standard_error,clm2) S3method(standard_error,clmm2) S3method(standard_error,coeftest) S3method(standard_error,complmrob) S3method(standard_error,coxme) S3method(standard_error,coxph) S3method(standard_error,coxr) S3method(standard_error,cpglm) S3method(standard_error,cpglmm) S3method(standard_error,crch) S3method(standard_error,crq) S3method(standard_error,data.frame) S3method(standard_error,default) S3method(standard_error,deltaMethod) S3method(standard_error,draws) S3method(standard_error,effectsize_table) S3method(standard_error,emmGrid) S3method(standard_error,emm_list) S3method(standard_error,estimate_contrasts) S3method(standard_error,estimate_means) S3method(standard_error,estimate_slopes) S3method(standard_error,externVar) S3method(standard_error,externX) S3method(standard_error,factor) S3method(standard_error,feglm) S3method(standard_error,fitdistr) S3method(standard_error,fixest) S3method(standard_error,fixest_multi) S3method(standard_error,flac) S3method(standard_error,flexsurvreg) S3method(standard_error,flic) S3method(standard_error,gam) S3method(standard_error,gamlss) S3method(standard_error,gamm) S3method(standard_error,gamm4) S3method(standard_error,gee) S3method(standard_error,geeglm) S3method(standard_error,glht) S3method(standard_error,glimML) S3method(standard_error,glmgee) S3method(standard_error,glmm) S3method(standard_error,glmmTMB) S3method(standard_error,glmx) S3method(standard_error,gls) S3method(standard_error,gmnl) S3method(standard_error,hglm) S3method(standard_error,htest) S3method(standard_error,hurdle) S3method(standard_error,ivFixed) S3method(standard_error,ivprobit) S3method(standard_error,ivreg) S3method(standard_error,lavaan) S3method(standard_error,lcmm) S3method(standard_error,list) S3method(standard_error,lm_robust) S3method(standard_error,lme) S3method(standard_error,lmodel2) S3method(standard_error,logistf) S3method(standard_error,logitmfx) S3method(standard_error,logitor) S3method(standard_error,lqm) S3method(standard_error,lqmm) S3method(standard_error,lrm) S3method(standard_error,margins) S3method(standard_error,maxLik) S3method(standard_error,mblogit) S3method(standard_error,mediate) S3method(standard_error,merMod) S3method(standard_error,merModList) S3method(standard_error,meta_bma) S3method(standard_error,meta_fixed) S3method(standard_error,meta_random) S3method(standard_error,metaplus) S3method(standard_error,mhurdle) S3method(standard_error,mipo) S3method(standard_error,mira) S3method(standard_error,mixor) S3method(standard_error,mjoint) S3method(standard_error,mle) S3method(standard_error,mle2) S3method(standard_error,mlm) S3method(standard_error,mmrm) S3method(standard_error,mmrm_fit) S3method(standard_error,mmrm_tmb) S3method(standard_error,model_fit) S3method(standard_error,multinom) S3method(standard_error,multinom_weightit) S3method(standard_error,mvord) S3method(standard_error,mvstanreg) S3method(standard_error,negbin) S3method(standard_error,negbinirr) S3method(standard_error,negbinmfx) S3method(standard_error,nestedLogit) S3method(standard_error,nlrq) S3method(standard_error,numeric) S3method(standard_error,ols) S3method(standard_error,ordinal_weightit) S3method(standard_error,parameters_kurtosis) S3method(standard_error,parameters_skewness) S3method(standard_error,parameters_standardized) S3method(standard_error,pgmm) S3method(standard_error,plm) S3method(standard_error,poissonirr) S3method(standard_error,poissonmfx) S3method(standard_error,polr) S3method(standard_error,probitmfx) S3method(standard_error,psm) S3method(standard_error,riskRegression) S3method(standard_error,rma) S3method(standard_error,rms) S3method(standard_error,robmixglm) S3method(standard_error,rq) S3method(standard_error,rqs) S3method(standard_error,rqss) S3method(standard_error,scam) S3method(standard_error,selection) S3method(standard_error,sem) S3method(standard_error,stanreg) S3method(standard_error,summary.lm) S3method(standard_error,survreg) S3method(standard_error,svy2lme) S3method(standard_error,svyglm) S3method(standard_error,svyglm.nb) S3method(standard_error,svyglm.zip) S3method(standard_error,svyolr) S3method(standard_error,systemfit) S3method(standard_error,table) S3method(standard_error,tobit) S3method(standard_error,truncreg) S3method(standard_error,varest) S3method(standard_error,vgam) S3method(standard_error,vglm) S3method(standard_error,wbgee) S3method(standard_error,wbm) S3method(standard_error,xtabs) S3method(standard_error,zcpglm) S3method(standard_error,zerocount) S3method(standard_error,zeroinfl) S3method(standardize_info,default) S3method(standardize_parameters,bootstrap_model) S3method(standardize_parameters,bootstrap_parameters) S3method(standardize_parameters,default) S3method(standardize_parameters,mediate) S3method(standardize_parameters,model_fit) S3method(standardize_parameters,parameters_model) S3method(summary,cluster_analysis) S3method(summary,n_clusters) S3method(summary,n_factors) S3method(summary,parameters_efa) S3method(summary,parameters_model) S3method(summary,parameters_omega) S3method(summary,parameters_pca) S3method(visualisation_recipe,cluster_analysis) S3method(visualisation_recipe,cluster_analysis_summary) S3method(visualisation_recipe,n_clusters_dbscan) S3method(visualisation_recipe,n_clusters_elbow) S3method(visualisation_recipe,n_clusters_gap) S3method(visualisation_recipe,n_clusters_silhouette) export(bootstrap_model) export(bootstrap_parameters) export(ci) export(ci_betwithin) export(ci_kenward) export(ci_ml1) export(ci_satterthwaite) export(closest_component) export(cluster_analysis) export(cluster_centers) export(cluster_discrimination) export(cluster_meta) export(cluster_performance) export(compare_models) export(compare_parameters) export(confidence_curve) export(consonance_function) export(convert_efa_to_cfa) export(degrees_of_freedom) export(demean) export(describe_distribution) export(display) export(dof) export(dof_betwithin) export(dof_kenward) export(dof_ml1) export(dof_satterthwaite) export(dominance_analysis) export(efa_to_cfa) export(equivalence_test) export(factor_analysis) export(factor_scores) export(format_df_adjust) export(format_order) export(format_p_adjust) export(format_parameters) export(get_scores) export(kurtosis) export(model_parameters) export(n_clusters) export(n_clusters_dbscan) export(n_clusters_elbow) export(n_clusters_gap) export(n_clusters_hclust) export(n_clusters_silhouette) export(n_components) export(n_factors) export(n_parameters) export(p_calibrate) export(p_direction) export(p_function) export(p_significance) export(p_value) export(p_value_betwithin) export(p_value_kenward) export(p_value_ml1) export(p_value_satterthwaite) export(parameters) export(parameters_type) export(pool_parameters) export(principal_components) export(print_html) export(print_md) export(random_parameters) export(reduce_data) export(reduce_parameters) export(rescale_weights) export(reshape_loadings) export(rotated_data) export(se_kenward) export(se_satterthwaite) export(select_parameters) export(simulate_model) export(simulate_parameters) export(skewness) export(sort_parameters) export(standard_error) export(standardise_info) export(standardise_parameters) export(standardise_posteriors) export(standardize_info) export(standardize_names) export(standardize_parameters) export(standardize_posteriors) export(supported_models) export(visualisation_recipe) importFrom(bayestestR,ci) importFrom(bayestestR,equivalence_test) importFrom(bayestestR,p_direction) importFrom(bayestestR,p_significance) importFrom(datawizard,demean) importFrom(datawizard,describe_distribution) importFrom(datawizard,kurtosis) importFrom(datawizard,rescale_weights) importFrom(datawizard,skewness) importFrom(datawizard,visualisation_recipe) importFrom(graphics,plot) importFrom(insight,display) importFrom(insight,n_parameters) importFrom(insight,print_html) importFrom(insight,print_md) importFrom(insight,standardize_names) importFrom(insight,supported_models) parameters/NEWS.md0000644000176200001440000012744415111055132013521 0ustar liggesusers# parameters 0.28.3 * fixed bug in `standardize_info()` that was preventing `standardise_parameters()` from working for `fixest` models. * `equivalence_test()` gets methods for objects from the *modelbased* package. * Improved support for objects from package *survey*. * Added support for package *lcmm*. * Added `ci_method` options `"kenward-roger"` and `"satterthwaite"` for models from package *glmmTMB*. Consequently, `se_kenward()`, `se_satterthwaite()`, `ci_kenward()`, `ci_satterthwaite()`, `p_value_kenward()` and `p_value_satterthwaite()` can now be used with `glmmTMB` models. # parameters 0.28.2 ## Bug fixes * Updates tests to resolve issues with the latest version of the *fixest* package. # parameters 0.28.1 ## Changes * Methods for *glmmTMB* objects (`ci()`, `model_parameters()`, `standard_error()`) now support the `vcov` argument to compute robust standard errors. * `model_parameters()` for *marginaleffects* objects is now more robust in detecting Bayesian models. * Modified code base to address changes in the *marginaleffects* package from version 0.29.0 onwards. ## Bug fixes * Fixed issue with `equivalence_test()` for models of class `glmmTMB` with `beta_family()`. * `exponentiate = TRUE` in `model_parameters()` did not exponentiate location and scale parameters for models from package *ordinal*. # parameters 0.28.0 ## Breaking Changes * The experimental `print_table()` function was removed. The aim of this function was to test the implementation of the `tinytable` backend for printing. Now, `tinytable` is fully supported by `insight::export_table()` and thereby also by the various `print()` resp. `display()` methods for model parameters. ## Changes * All `print_html()` methods get an `engine` argument, to either use the `gt` package or the `tinytable` package for printing HTML tables. Since `tinytable` not only produces HTML tables, but rather different formats depending on the environment, `print_html()` may also generate a markdown table. Thus, the generic `display()` method can be used, too, which has a `format` argument that also supports `"tt"` for `tinytable`. * Improved support for *coxme* models in `model_parameters()`. Random effects and group level estimates are now returned as well. ## Bug fixes * Fixed issue with models of class `selection` with multiple outcomes. # parameters 0.27.0 ## Breaking Changes * The `standardize` argument in `factor_analysis()` now defaults to `FALSE`. * The `rotation` argument in `factor_analysis()` now defaults to `"oblimin"`, because the former default of `"none"` rarely makes sense in the context of factor analysis. If you want to use no rotation, please set `rotation = "none"`. * The `cor` argument in `n_factors()` was renamed into `correlation_matrix`. In `factor_analysis()`, the `cor` argument was completely removed to avoid naming collision with the `cor` argument of `psych::fa()`, which now users can pass the `cor` argument to `psych::fa()` when using `factor_analysis()`. ## Changes * `factor_analysis()` gets a `.matrix` method, including a new argument `n_obs` (which can be a single value or a matrix of pairwise counts), to compute factor analysis for a correlation matrix or covariance matrix. * New function `factor_scores()` to extract factor scores from EFA (`psych::fa()` or `factor_analysis()`). * Added and/or improved print-methods for all functions around PCA, FA and Omega. * Improved efficiency in `model_parameters()` for models from packages *brms* and *rstanarm*. * `p_adjust` for `model_parameters()` gets a new options, `"sup-t"`, to calculate simultaneous confidence intervals. ## Bug fixes * `bootstrap_model()` did not work for intercept-only models. This has been fixed. * Fixed issue with printing labels as pretty names for models from package *pscl*, i.e. `print(model_parameters(model), pretty_names = "labels")` now works as expected. # parameters 0.26.0 ## Changes * The `effects` argument in `model_parameters()` for classes `merMod`, `glmmTMB`, `brmsfit` and `stanreg` gets an additional `"grouplevel"` option, to return the group-level estimates for random effects. * `model_parameters()` for Anova-objects gains a `p_adjust` argument, to apply p-adjustment where possible. Furthermore, for models from package *afex*, where p-adjustment was applied during model-fitting, the correct p-values are now returned (before, unadjusted p-values were returned in some cases). * Revised code-base to address changes in latest *insight* update. Dealing with larger models (many parameters, many posterior samples) from packages *brms* and *rstanarm* is more efficient now. Furthermore, the options for the `effects` argument have a new behaviour. `"all"` only returns fixed effects and random effects variance components, but no longer the group level estimates. Use `effects = "full"` to return all parameters. This change is mainly to be more flexible and gain more efficiency for models with many parameters and / or many posterior draws. * `model_parameters()` for Anova objects gains an `include_intercept` argument, to include intercepts in the Anova table, where possible. # parameters 0.25.0 ## Changes * `model_parameters()` for objects from the *marginaleffects* packages now calls `bayestestR::describe_posterior()` to process Bayesian models. This offers more flexibility in summarizing the posterior draws from *marginaleffects*. * `model_parameters()` now shows a more informative coefficient name for binomial models with probit-link. * Argument `wb_component` now defaults to `FALSE`. * Improved support and printing for tests from package *WRS2*. ## Bug fixes * Fixed printing issue with `model_parameters()` for `htest` objects when printing into markdown or HTML format. * Fixed printing issue with `model_parameters()` for mixed models when `include_reference = TRUE`. # parameters 0.24.2 ## Changes * The `effects` argument in `model_parameters()` for classes `merMod`, `glmmTMB`, `brmsfit` and `stanreg` gets an additional `"random_total"` option, to return the overall coefficient for random effects (sum of fixed and random effects). ## Bug fixes * Fixed issue in `model_parameters()` for objects from package *marginaleffects* where columns were renamed when their names equaled to certain reserved words. # parameters 0.24.1 ## Changes * `model_parameters()` now supports objects of class `survfit`. * `model_parameters()` now gives informative error messages for more model classes than before when the function fails to extract model parameters. * Improved information for credible intervals and sampling method from output of `model_parameters()` for Bayesian models. ## Bug fixes * Fixed issue with `model_parameters(, table_wide = TRUE)` with complex error structures ( #556 ) * Fixed issue when printing `model_parameters()` with models from `mgcv::gam()`. * Fixed issues due to breaking changes in the latest release of the *datawizard* package. * Fixed issue with wrong column-header in printed output of `model_parameters()` for `MASS::polr()` models with probit-link. # parameters 0.24.0 ## Breaking Changes * The `robust` argument, which was deprecated for a long time, is now no longer supported. Please use `vcov` and `vcov_args` instead. ## Changes * Added support for `coxph.panel` models. * Added support for `anova()` from models of the *survey* package. * Documentation was re-organized and clarified, and the index reduced by removing redundant class-documentation. ## Bug fixes * Fixed bug in `p_value()` for objects of class `averaging`. * Fixed bug when extracting 'pretty labels' for model parameters, which could fail when predictors were character vectors. * Fixed bug with inaccurate standard errors for models from package *fixest* that used the `sunab()` function in the formula. # parameters 0.23.0 ## Breaking Changes * Argument `summary` in `model_parameters()` is now deprecated. Please use `include_info` instead. * Changed output style for the included additional information on model formula, sigma and R2 when printing model parameters. This information now also includes the RMSE. ## Changes * Used more accurate analytic approach to calculate normal distributions for the SGPV in `equivalence_test()` and used in `p_significance()`. * Added `p_direction()` methods for frequentist models. This is a convenient way to test the direction of the effect, which formerly was already (and still is) possible with `pd = TRUE` in `model_parameters()`. * `p_function()`, `p_significance()` and `equivalence_test()` get a `vcov` and `vcov_args` argument, so that results can be based on robust standard errors and confidence intervals. * `equivalence_test()` and `p_significance()` work with objects returned by `model_parameters()`. * `pool_parameters()` now better deals with models with multiple components (e.g. zero-inflation or dispersion). * Revision / enhancement of some documentation. * Updated *glmmTMB* methods to work with the latest version of the package. * Improved printing for `simulate_parameters()` for models from packages *mclogit*. * `print()` for `compare_parameters()` now also puts factor levels into square brackets, like the `print()` method for `model_parameters()`. * `include_reference` now only adds the reference category of factors to the parameters table when those factors have appropriate contrasts (treatment or SAS contrasts). ## Bug fixes * Arguments like `digits` etc. were ignored in `model_parameters() for objects from the *marginaleffects* package. # parameters 0.22.2 ## New supported models * Support for models `glm_weightit`, `multinom_weightit` and `ordinal_weightit` from package *WeightIt*. ## Changes * Added `p_significance()` methods for frequentist models. * Methods for `degrees_of_freedom()` have been removed. `degrees_of_freedom()` now calls `insight::get_df()`. * `model_parameters()` for data frames and `draws` objects from package *posterior* also gets an `exponentiate` argument. ## Bug fixes * Fixed issue with warning for spuriously high coefficients for Stan-models (non-Gaussian). # parameters 0.22.1 ## Breaking changes * Revised calculation of the second generation p-value (SGPV) in `equivalence_test()`, which should now be more accurate related to the proportion of the interval that falls inside the ROPE. Formerly, the confidence interval was simply treated as uniformly distributed when calculating the SGPV, now the interval is assumed to be normally distributed. ## New supported models * Support for `svy2lme` models from package *svylme*. ## Changes * `standardize_parameters()` now also prettifies labels of factors. ## Bug fixes * Fixed issue with `equivalence_test()` when ROPE range was not symmetrically centered around zero (e.g., `range = c(-99, 0.1)`). * `model_parameters()` for `anova()` from mixed models now also includes the denominator degrees of freedom in the output (`df_error`). * `print(..., pretty_names = "labels")` for tobit-models from package *AER* now include value labels, if available. * Patch release, to ensure that performance runs with older version of datawizard on Mac OS X with R (old-release). # parameters 0.22.0 ## Breaking changes * Deprecated arguments in `model_parameters()` for `htest`, `aov` and `BFBayesFactor` objects were removed. * Argument `effectsize_type` is deprecated. Please use `es_type` now. This change was necessary to avoid conflicts with partial matching of argument names (here: `effects`). ## New supported models * Support for objects from `stats::Box.test()`. * Support for `glmgee` models from package *glmtoolbox*. ## Bug fix * Fixed edge case in `predict()` for `factor_analysis()`. * Fixed wrong ORCID in `DESCRIPTION`. # parameters 0.21.7 ## Changes * Fixed issues related to latest release from _marginaleffects_. ## Bug fixes * Fixes issue in `compare_parameters()` for models from package *blme*. * Fixed conflict in `model_parameters()` when both `include_reference = TRUE` and `pretty_names = "labels"` were used. Now, pretty labels are correctly updated and preserved. # parameters 0.21.6 ## New supported models * Support for models of class `serp` (*serp*). ## Changes * `include_reference` can now directly be set to `TRUE` in `model_parameters()` and doesn't require a call to `print()` anymore. * `compare_parameters()` gains a `include_reference` argument, to add the reference category of categorical predictors to the parameters table. * `print_md()` for `compare_parameters()` now by default uses the *tinytable* package to create markdown tables. This allows better control for column heading spanning over multiple columns. ## Bug fixes * Fixed issue with parameter names for `model_parameters()` and objects from package *epiR*. * Fixed issue with `exponentiate = TRUE` for `model_parameters()` with models of class `clmm` (package *ordinal*), when model had no `component` column (e.g., no scale or location parameters were returned). * `include_reference` now also works when factor were created "on-the-fly" inside the model formula (i.e. `y ~ as.factor(x)`). # parameters 0.21.5 ## Bug fixes * Fixes CRAN check errors related to the changes in the latest update of *marginaleffects*. # parameters 0.21.4 ## Breaking changes * The `exponentiate` argument of `model_parameters()` for `marginaleffects::predictions()` now defaults to `FALSE`, in line with all the other `model_parameters()` methods. ## Changes * `model_parameters()` for models of package *survey* now gives informative messages when `bootstrap = TRUE` (which is currently not supported). * `n_factors()` now also returns the explained variance for the number of factors as attributes. * `model_parameters()` for objects of package *metafor* now warns when unsupported arguments (like `vcov`) are used. * Improved documentation for `pool_parameters()`. ## Bug fixes * `print(include_reference = TRUE)` for `model_parameters()` did not work when run inside a pipe-chain. * Fixed issues with `format()` for objects returned by `compare_parameters()` that included mixed models. # parameters 0.21.3 ## Changes * `principal_components()` and `factor_analysis()` now also work when argument `n = 1`. * `print_md()` for `compare_parameters()` now gains more arguments, similar to the `print()` method. * `bootstrap_parameters()` and `model_parameters()` now accept bootstrapped samples returned by `bootstrap_model()`. * The `print()` method for `model_parameters()` now also yields a warning for models with logit-links when possible issues with (quasi) complete separation occur. ## Bug fixes * Fixed issue in `print_html()` for objects from package _ggeffects_. * Fixed issues for `nnet::multinom()` with wide-format response variables (using `cbind()`). * Minor fixes for `print_html()` method for `model_parameters()`. * Robust standard errors (argument `vcov`) now works for `plm` models. # parameters 0.21.2 ## Changes * Minor improvements to factor analysis functions. * The `ci_digits` argument of the `print()` method for `model_parameters()` now defaults to the same value of `digits`. * `model_parameters()` for objects from package *marginaleffects* now also accepts the `exponentiate` argument. * The `print()`, `print_html()`, `print_md()` and `format()` methods for `model_parameters()` get an `include_reference` argument, to add the reference category of categorical predictors to the parameters table. ## Bug fixes * Fixed issue with wrong calculation of test-statistic and p-values in `model_parameters()` for `fixest` models. * Fixed issue with wrong column header for `glm` models with `family = binomial("identiy")`. * Minor fixes for `dominance_analysis()`. # parameters 0.21.1 ## General * Added support for models of class `nestedLogit` (*nestedLogit*). ## Changes to functions * `model_parameters()` now also prints correct "pretty names" when predictors where converted to ordered factors inside formulas, e.g. `y ~ as.ordered(x)`. * `model_parameters()` now prints a message when the `vcov` argument is provided and `ci_method` is explicitly set to `"profile"`. Else, when `vcov` is not `NULL` and `ci_method` is `NULL`, it defaults to `"wald"`, to return confidence intervals based on robust standard errors. # parameters 0.21.0 ## Breaking Changes * It is no longer possible to calculate Satterthwaite-approximated degrees of freedom for mixed models from package *nlme*. This was based on the *lavaSearch2* package, which no longer seems to support models of class `lme`. ## Changes to functions * Improved support for objects of class `mipo` for models with ordinal or categorical outcome. # parameters 0.20.3 ## General * Added support for models of class `hglm` (*hglm*), `mblogit` (*mclogit*), `fixest_multi` (*fixest*), and `phylolm` / `phyloglm` (*phylolm*). * `as.data.frame` methods for extracting posterior draws via `bootstrap_model()` have been retired. Instead, directly using `bootstrap_model()` is recommended. ## Changes to functions * `equivalence_test()` gets a method for `ggeffects` objects from package *ggeffects*. * `equivalence_test()` now prints the `SGPV` column instead of `% in ROPE`. This is because the former `% in ROPE` actually was equivalent to the second generation p-value (SGPV) and refers to the proportion of the _range_ of the confidence interval that is covered by the ROPE. However, `% in ROPE` did not refer to the probability mass of the underlying distribution of a confidence interval that was covered by the ROPE, hence the old column name was a bit misleading. * Fixed issue in `model_parameters.ggeffects()` to address forthcoming changes in the _ggeffects_ package. ## Bug fixes * When an invalid or not supported value for the `p_adjust` argument in `model_parameters()` is provided, the valid options were not shown in correct capital letters, where appropriate. * Fixed bug in `cluster_analysis()` for `include_factors = TRUE`. * Fixed warning in `model_parameters()` and `ci()` for models from package *glmmTMB* when `ci_method` was either `"profile"` or `"uniroot"`. # parameters 0.20.2 ## General * Reduce unnecessary warnings. * The deprecated argument `df_method` in `model_parameters()`was removed. * Output from `model_parameters()` for objects returned by `manova()` and `car::Manova()` is now more consistent. ## Bug fix * Fixed issues in tests for `mmrm` models. * Fixed issue in `bootstrap_model()` for models of class `glmmTMB` with dispersion parameters. * Fixed failing examples. # parameters 0.20.1 ## General * Added support for models of class `flic` and `flac` (*logistf*), `mmrm` (*mmrm*). ## Changes * `model_parameters()` now includes a `Group` column for `stanreg` or `brmsfit` models with random effects. * The `print()` method for `model_parameters()` now uses the same pattern to print random effect variances for Bayesian models as for frequentist models. ## Bug fix * Fixed issue with the `print()` method for `compare_parameters()`, which duplicated random effects parameters rows in some edge cases. * Fixed issue with the `print()` method for `compare_parameters()`, which didn't work properly when `ci=NULL`. # parameters 0.20.0 ## Breaking * The deprecated argument `df_method` in `model_parameters()` is now defunct and throws an error when used. * The deprecated functions `ci_robust()`, `p_robust()` and `standard_error_robust` have been removed. These were superseded by the `vcov` argument in `ci()`, `p_value()`, and `standard_error()`, respectively. * The `style` argument in `compare_parameters()` was renamed into `select`. ## New functions * `p_function()`, to print and plot p-values and compatibility (confidence) intervals for statistical models, at different levels. This allows to see which estimates are most compatible with the model at various compatibility levels. * `p_calibrate()`, to compute calibrated p-values. ## Changes * `model_parameters()` and `compare_parameters()` now use the unicode character for the multiplication-sign as interaction mark (i.e. `\u00d7`). Use `options(parameters_interaction = )` or the argument `interaction_mark` to use a different character as interaction mark. * The `select` argument in `compare_parameters()`, which is used to control the table column elements, now supports an experimental glue-like syntax. See this vignette _Printing Model Parameters_. Furthermore, the `select` argument can also be used in the `print()` method for `model_parameters()`. * `print_html()` gets a `font_size` and `line_padding` argument to tweak the appearance of HTML tables. Furthermore, arguments `select` and `column_labels` are new, to customize the column layout of tables. See examples in `?display`. * Consolidation of vignettes on standardization of model parameters. * Minor speed improvements. ## Bug fix * `model_parameters().BFBayesFactor` no longer drops the `BF` column if the Bayes factor is `NA`. * The `print()` and `display()` methods for `model_parameters()` from Bayesian models now pass the `...` to `insight::format_table()`, allowing extra arguments to be recognized. * Fixed footer message regarding the approximation method for CU and p-values for mixed models. * Fixed issues in the `print()` method for `compare_parameters()` with mixed models, when some models contained within-between components (see `wb_component`) and others did not. # parameters 0.19.0 ## Breaking * Arguments that calculate effectsize in `model_parameters()` for `htest`, Anova objects and objects of class `BFBayesFactor` were revised. Instead of single arguments for the different effectsizes, there is now one argument, `effectsize_type`. The reason behind this change is that meanwhile many new type of effectsizes have been added to the _effectsize_ package, and the generic argument allows to make use of those effect sizes. * The attribute name in PCA / EFA has been changed from `data_set` to `dataset`. * The minimum needed R version has been bumped to `3.6`. * Removed deprecated argument `parameters` from `model_parameters()`. * `standard_error_robust()`, `ci_robust()` and `p_value_robust()` are now deprecated and superseded by the `vcov` and `vcov_args` arguments in the related methods `standard_error()`, `ci()` and `p_value()`, respectively. * Following functions were moved from package *parameters* to *performance*: `check_sphericity_bartlett()`, `check_kmo()`, `check_factorstructure()` and `check_clusterstructure()`. ## Changes to functions * Added `sparse` option to `principal_components()` for sparse PCA. * The `pretty_names` argument from the `print()` method can now also be `"labels"`, which will then use variable and value labels (if data is labelled) as pretty names. If no labels were found, default pretty names are used. * `bootstrap_model()` for models of class `glmmTMB` and `merMod` gains a `cluster` argument to specify optional clusters when the `parallel` option is set to `"snow"`. * P-value adjustment (argument `p_adjust` in `model_parameters()`) is now performed after potential parameters were removed (using `keep` or `drop`), so adjusted p-values is only applied to the parameters of interest. * Robust standard errors are now supported for `fixest` models with the `vcov` argument. * `print()` for `model_parameters()` gains a `footer` argument, which can be used to suppress the footer in the output. Further more, if `footer = ""` or `footer = FALSE` in `print_md()`, no footer is printed. * `simulate_model()` and `simulate_parameters()` now pass `...` to `insight::get_varcov()`, to allow simulated draws to be based on heteroscedasticity consistent variance covariance matrices. * The `print()` method for `compare_parameters()` was improved for models with multiple components (e.g., mixed models with fixed and random effects, or models with count- and zero-inflation parts). For these models, `compare_parameters(effects = "all", component = "all")` prints more nicely. ## Bug fixes * Fix erroneous warning for *p*-value adjustments when the differences between original and adjusted *p*-values were very small. # parameters 0.18.2 ## New functions * New function `dominance_analysis()`, to compute dominance analysis statistics and designations. ## Changes to functions * Argument `ci_random` in `model_parameters()` defaults to `NULL`. It uses a heuristic to determine if random effects confidence intervals are likely to take a long time to compute, and automatically includes or excludes those confidence intervals. Set `ci_random` to `TRUE` or `FALSE` to explicitly calculate or omit confidence intervals for random effects. ## Bug fixes * Fix issues in `pool_parameters()` for certain models with special components (like `MASS::polr()`), that failed when argument `component` was set to `"conditional"` (the default). * Fix issues in `model_parameters()` for multiple imputation models from package *Hmisc*. # parameters 0.18.1 ## General * It is now possible to hide messages about CI method below tables by specifying `options("parameters_cimethod" = FALSE)` (#722). By default, these messages are displayed. * `model_parameters()` now supports objects from package _marginaleffects_ and objects returned by `car::linearHypothesis()`. * Added `predict()` method to `cluster_meta` objects. * Reorganization of docs for `model_parameters()`. ## Changes to functions * `model_parameters()` now also includes standard errors and confidence intervals for slope-slope-correlations of random effects variances. * `model_parameters()` for mixed models gains a `ci_random` argument, to toggle whether confidence intervals for random effects parameters should also be computed. Set to `FALSE` if calculation of confidence intervals for random effects parameters takes too long. * `ci()` for *glmmTMB* models with `method = "profile"` is now more robust. ## Bug fixes * Fixed issue with *glmmTMB* models when calculating confidence intervals for random effects failed due to singular fits. * `display()` now correctly includes custom text and additional information in the footer (#722). * Fixed issue with argument `column_names` in `compare_parameters()` when strings contained characters that needed to be escaped for regular expressions. * Fixed issues with unknown arguments in `model_parameters()` for *lavaan* models when `standardize = TRUE`. # parameters 0.18.0 ## Breaking Changes * `model_parameters()` now no longer treats data frame inputs as posterior samples. Rather, for data frames, now `NULL` is returned. If you want to treat a data frame as posterior samples, set the new argument `as_draws = TRUE`. ## New functions * `sort_parameters()` to sort model parameters by coefficient values. * `standardize_parameters()`, `standardize_info()` and `standardise_posteriors()` to standardize model parameters. ## Changes to functions ### `model_parameters()` * `model_parameters()` for mixed models from package *lme4* now also reports confidence intervals for random effect variances by default. Formerly, CIs were only included when `ci_method` was `"profile"` or `"boot"`. The *merDeriv* package is required for this feature. * `model_parameters()` for `htest` objects now also supports models from `var.test()`. * Improved support for `anova.rms` models in `model_parameters()`. * `model_parameters()` now supports `draws` objects from package *posterior* and `deltaMethods` objects from package *car*. * `model_parameters()` now checks arguments and informs the user if specific given arguments are not supported for that model class (e.g., `"vcov"` is currently not supported for models of class *glmmTMB*). ## Bug fixes * The `vcov` argument, used for computing robust standard errors, did not calculate the correct p-values and confidence intervals for models of class `lme`. * `pool_parameters()` did not save all relevant model information as attributes. * `model_parameters()` for models from package *glmmTMB* did not work when `exponentiate = TRUE` and model contained a dispersion parameter that was different than sigma. Furthermore, exponentiating falsely exponentiated the dispersion parameter. # parameters 0.17.0 ## General * Added options to set defaults for different arguments. Currently supported: - `options("parameters_summary" = TRUE/FALSE)`, which sets the default value for the `summary` argument in `model_parameters()` for non-mixed models. - `options("parameters_mixed_summary" = TRUE/FALSE)`, which sets the default value for the `summary` argument in `model_parameters()` for mixed models. * Minor improvements for `print()` methods. * Robust uncertainty estimates: - The `vcov_estimation`, `vcov_type`, and `robust` arguments are deprecated in these functions: `model_parameters()`, `parameters()`, `standard_error()`, `p_value()`, and `ci()`. They are replaced by the `vcov` and `vcov_args` arguments. - The `standard_error_robust()` and `p_value_robust()` functions are superseded by the `vcov` and `vcov_args` arguments of the `standard_error()` and `p_value()` functions. - Vignette: https://easystats.github.io/parameters/articles/model_parameters_robust.html ## Bug fixes * Fixed minor issues and edge cases in `n_clusters()` and related cluster functions. * Fixed issue in `p_value()` that returned wrong p-values for `fixest::feols()`. # parameters 0.16.0 ## General * Improved speed performance for `model_parameters()`, in particular for glm's and mixed models where random effect variances were calculated. * Added more options for printing `model_parameters()`. See also revised vignette: https://easystats.github.io/parameters/articles/model_parameters_print.html ## Changes to functions ### `model_parameters()` * `model_parameters()` for mixed models gains an `include_sigma` argument. If `TRUE`, adds the residual variance, computed from the random effects variances, as an attribute to the returned data frame. Including sigma was the default behaviour, but now defaults to `FALSE` and is only included when `include_sigma = TRUE`, because the calculation was very time consuming. * `model_parameters()` for `merMod` models now also computes CIs for the random SD parameters when `ci_method="boot"` (previously, this was only possible when `ci_method` was `"profile"`). * `model_parameters()` for `glmmTMB` models now computes CIs for the random SD parameters. Note that these are based on a Wald-z-distribution. * Similar to `model_parameters.htest()`, the `model_parameters.BFBayesFactor()` method gains `cohens_d` and `cramers_v` arguments to control if you need to add frequentist effect size estimates to the returned summary data frame. Previously, this was done by default. * Column name for coefficients from *emmeans* objects are now more specific. * `model_prameters()` for `MixMod` objects (package *GLMMadaptive*) gains a `robust` argument, to compute robust standard errors. ## Bug fixes * Fixed bug with `ci()` for class `merMod` when `method="boot"`. * Fixed issue with correct association of components for ordinal models of classes `clm` and `clm2`. * Fixed issues in `random_parameters()` and `model_parameters()` for mixed models without random intercept. * Confidence intervals for random parameters in `model_parameters()` failed for (some?) `glmer` models. * Fix issue with default `ci_type` in `compare_parameters()` for Bayesian models. # parameters 0.15.0 ## Breaking changes * Following functions were moved to the new *datawizard* package and are now re-exported from *parameters* package: - `center()` - `convert_data_to_numeric()` - `data_partition()` - `demean()` (and its aliases `degroup()` and `detrend()`) - `kurtosis()` - `rescale_weights()` - `skewness()` - `smoothness()` Note that these functions will be removed in the next release of *parameters* package and they are currently being re-exported only as a convenience for the package developers. This release should provide them with time to make the necessary changes before this breaking change is implemented. * Following functions were moved to the *performance* package: - `check_heterogeneity()` - `check_multimodal()` ## General * The handling to approximate the degrees of freedom in `model_parameters()`, `ci()` and `p_value()` was revised and should now be more consistent. Some bugs related to the previous computation of confidence intervals and p-values have been fixed. Now it is possible to change the method to approximate degrees of freedom for CIs and p-values using the `ci_method`, resp. `method` argument. This change has been documented in detail in `?model_parameters`, and online here: https://easystats.github.io/parameters/reference/model_parameters.html * Minor changes to `print()` for *glmmTMB* with dispersion parameter. * Added vignette on printing options for model parameters. ## Changes to functions ### `model_parameters()` * The `df_method` argument in `model_parameters()` is deprecated. Please use `ci_method` now. * `model_parameters()` with `standardize = "refit"` now returns random effects from the standardized model. * `model_parameters()` and `ci()` for `lmerMod` models gain a `"residuals"` option for the `ci_method` (resp. `method`) argument, to explicitly calculate confidence intervals based on the residual degrees of freedom, when present. * `model_parameters()` supports following new objects: `trimcibt`, `wmcpAKP`, `dep.effect` (in *WRS2* package), `systemfit` * `model_parameters()` gains a new argument `table_wide` for ANOVA tables. This can be helpful for users who may wish to report ANOVA table in wide format (i.e., with numerator and denominator degrees of freedom on the same row). * `model_parameters()` gains two new arguments, `keep` and `drop`. `keep` is the new names for the former `parameters` argument and can be used to filter parameters. While `keep` selects those parameters whose names match the regular expression pattern defined in `keep`, `drop` is the counterpart and excludes matching parameter names. * When `model_parameters()` is called with `verbose = TRUE`, and `ci_method` is not the default value, the printed output includes a message indicating which approximation-method for degrees of freedom was used. * `model_parameters()` for mixed models with `ci_method = "profile` computes (profiled) confidence intervals for both fixed and random effects. Thus, `ci_method = "profile` allows to add confidence intervals to the random effect variances. * `model_parameters()` should longer fail for supported model classes when robust standard errors are not available. ### Other functions * `n_factors()` the methods based on fit indices have been fixed and can be included separately (`package = "fit"`). Also added a `n_max` argument to crop the output. * `compare_parameters()` now also accepts a list of model objects. * `describe_distribution()` gets `verbose` argument to toggle warnings and messages. * `format_parameters()` removes dots and underscores from parameter names, to make these more "human readable". * The experimental calculation of p-values in `equivalence_test()` was replaced by a proper calculation p-values. The argument `p_value` was removed and p-values are now always included. * Minor improvements to `print()`, `print_html()` and `print_md()`. ## Bug fixes * The random effects returned by `model_parameters()` mistakenly displayed the residuals standard deviation as square-root of the residual SD. * Fixed issue with `model_parameters()` for *brmsfit* objects that model standard errors (i.e. for meta-analysis). * Fixed issue in `model_parameters` for `lmerMod` models that, by default, returned residual degrees of freedom in the statistic column, but confidence intervals were based on `Inf` degrees of freedom instead. * Fixed issue in `ci_satterthwaite()`, which used `Inf` degrees of freedom instead of the Satterthwaite approximation. * Fixed issue in `model_parameters.mlm()` when model contained interaction terms. * Fixed issue in `model_parameters.rma()` when model contained interaction terms. * Fixed sign error for `model_parameters.htest()` for objects created with `t.test.formula()` (issue #552) * Fixed issue when computing random effect variances in `model_parameters()` for mixed models with categorical random slopes. # parameters 0.14.0 ## Breaking changes * `check_sphericity()` has been renamed into `check_sphericity_bartlett()`. * Removed deprecated arguments. * `model_parameters()` for bootstrapped samples used in *emmeans* now treats the bootstrap samples as samples from posterior distributions (Bayesian models). ## New supported model classes * `SemiParBIV` (*GJRM*), `selection` (*sampleSelection*), `htest` from the *survey* package, `pgmm` (*plm*). ## General * Performance improvements for models from package *survey*. ## New functions * Added a `summary()` method for `model_parameters()`, which is a convenient shortcut for `print(..., select = "minimal")`. ## Changes to functions ### `model_parameters()` * `model_parameters()` gains a `parameters` argument, which takes a regular expression as string, to select specific parameters from the returned data frame. * `print()` for `model_parameters()` and `compare_parameters()` gains a `groups` argument, to group parameters in the output. Furthermore, `groups` can be used directly as argument in `model_parameters()` and `compare_parameters()` and will be passed to the `print()` method. * `model_parameters()` for ANOVAs now saves the type as attribute and prints this information as footer in the output as well. * `model_parameters()` for *htest*-objects now saves the alternative hypothesis as attribute and prints this information as footer in the output as well. * `model_parameters()` passes arguments `type`, `parallel` and `n_cpus` down to `bootstrap_model()` when `bootstrap = TRUE`. ### other * `bootstrap_models()` for *merMod* and *glmmTMB* objects gains further arguments to set the type of bootstrapping and to allow parallel computing. * `bootstrap_parameters()` gains the `ci_method` type `"bci"`, to compute bias-corrected and accelerated bootstrapped intervals. * `ci()` for `svyglm` gains a `method` argument. ## Bug fixes * Fixed issue in `model_parameters()` for *emmGrid* objects with Bayesian models. * Arguments `digits`, `ci_digits` and `p_digits` were ignored for `print()` and only worked when used in the call to `model_parameters()` directly. # parameters 0.13.0 ## General * Revised and improved the `print()` method for `model_parameters()`. ## New supported model classes * `blrm` (*rmsb*), `AKP`, `med1way`, `robtab` (*WRS2*), `epi.2by2` (*epiR*), `mjoint` (*joineRML*), `mhurdle` (*mhurdle*), `sarlm` (*spatialreg*), `model_fit` (*tidymodels*), `BGGM` (*BGGM*), `mvord` (*mvord*) ## Changes to functions ### `model_parameters()` * `model_parameters()` for `blavaan` models is now fully treated as Bayesian model and thus relies on the functions from *bayestestR* (i.e. ROPE, Rhat or ESS are reported) . * The `effects`-argument from `model_parameters()` for mixed models was revised and now shows the random effects variances by default (same functionality as `random_parameters()`, but mimicking the behaviour from `broom.mixed::tidy()`). When the `group_level` argument is set to `TRUE`, the conditional modes (BLUPs) of the random effects are shown. * `model_parameters()` for mixed models now returns an `Effects` column even when there is just one type of "effects", to mimic the behaviour from `broom.mixed::tidy()`. In conjunction with `standardize_names()` users can get the same column names as in `tidy()` for `model_parameters()` objects. * `model_parameters()` for t-tests now uses the group values as column names. * `print()` for `model_parameters()` gains a `zap_small` argument, to avoid scientific notation for very small numbers. Instead, `zap_small` forces to round to the specified number of digits. * To be internally consistent, the degrees of freedom column for `lqm(m)` and `cgam(m)` objects (with *t*-statistic) is called `df_error`. * `model_parameters()` gains a `summary` argument to add summary information about the model to printed outputs. * Minor improvements for models from *quantreg*. * `model_parameters` supports rank-biserial, rank epsilon-squared, and Kendall's *W* as effect size measures for `wilcox.test()`, `kruskal.test`, and `friedman.test`, respectively. ### Other functions * `describe_distribution()` gets a `quartiles` argument to include 25th and 75th quartiles of a variable. ## Bug fixes * Fixed issue with non-initialized argument `style` in `display()` for `compare_parameters()`. * Make `print()` for `compare_parameters()` work with objects that have "simple" column names for confidence intervals with missing CI-level (i.e. when column is named `"CI"` instead of, say, `"95% CI"`). * Fixed issue with `p_adjust` in `model_parameters()`, which did not work for adjustment-methods `"BY"` and `"BH"`. * Fixed issue with `show_sigma` in `print()` for `model_parameters()`. * Fixed issue in `model_parameters()` with incorrect order of degrees of freedom. # parameters 0.12.0 ## General * Roll-back R dependency to R >= 3.4. * Bootstrapped estimates (from `bootstrap_model()` or `bootstrap_parameters()`) can be passed to `emmeans` to obtain bootstrapped estimates, contrasts, simple slopes (etc) and their CIs. * These can then be passed to `model_parameters()` and related functions to obtain standard errors, p-values, etc. ## Breaking changes * `model_parameters()` now always returns the confidence level for as additional `CI` column. * The `rule` argument in `equivalenct_test()` defaults to `"classic"`. ## New supported model classes * `crr` (*cmprsk*), `leveneTest()` (*car*), `varest` (*vars*), `ergm` (*ergm*), `btergm` (*btergm*), `Rchoice` (*Rchoice*), `garch` (*tseries*) ## New functions * `compare_parameters()` (and its alias `compare_models()`) to show / print parameters of multiple models in one table. ## Changes to functions * Estimation of bootstrapped *p*-values has been re-written to be more accurate. * `model_parameters()` for mixed models gains an `effects`-argument, to return fixed, random or both fixed and random effects parameters. * Revised printing for `model_parameters()` for *metafor* models. * `model_parameters()` for *metafor* models now recognized confidence levels specified in the function call (via argument `level`). * Improved support for effect sizes in `model_parameters()` from *anova* objects. ## Bug fixes * Fixed edge case when formatting parameters from polynomial terms with many degrees. * Fixed issue with random sampling and dropped factor levels in `bootstrap_model()`. parameters/inst/0000755000176200001440000000000015111301673013370 5ustar liggesusersparameters/inst/CITATION0000644000176200001440000000064714053423551014540 0ustar liggesusersbibentry( bibtype = "article", title = "Extracting, Computing and Exploring the Parameters of Statistical Models using {R}.", volume = "5", doi = "10.21105/joss.02445", number = "53", journal = "Journal of Open Source Software", author = c(person("Daniel", "Lüdecke"), person("Mattan S.", "Ben-Shachar"), person("Indrajeet", "Patil"), person("Dominique", "Makowski")), year = "2020", pages = "2445" ) parameters/inst/doc/0000755000176200001440000000000015111301673014135 5ustar liggesusersparameters/inst/doc/overview_of_vignettes.Rmd0000644000176200001440000000442115060045621021225 0ustar liggesusers--- title: "Overview of Vignettes" output: rmarkdown::html_vignette: vignette: > %\VignetteIndexEntry{Overview of Vignettes} \usepackage[utf8]{inputenc} %\VignetteEngine{knitr::rmarkdown} editor_options: chunk_output_type: console --- ```{r message=FALSE, warning=FALSE, include=FALSE} library(knitr) knitr::opts_chunk$set( echo = TRUE, collapse = TRUE, warning = FALSE, message = FALSE, comment = "#>", eval = TRUE ) ``` All package vignettes are available at [https://easystats.github.io/parameters/](https://easystats.github.io/parameters/). ## Function Overview * [Function Reference](https://easystats.github.io/parameters/reference/index.html) ## Description of Parameters * [Summary of Model Parameters](https://easystats.github.io/parameters/articles/model_parameters.html) * [Parameter and Model Standardization](https://easystats.github.io/parameters/articles/standardize_parameters_effsize.html) * [Robust Estimation of Standard Errors, Confidence Intervals, and p-values](https://easystats.github.io/parameters/articles/model_parameters_robust.html) * [Model Parameters for Multiply Imputed Repeated Analyses](https://easystats.github.io/parameters/articles/model_parameters_mice.html) * [Analysing Longitudinal or Panel Data](https://easystats.github.io/parameters/articles/demean.html) ## Formatting, Printing and Plotting * [Formatting Model Parameters](https://easystats.github.io/parameters/articles/model_parameters_formatting.html) * [Printing Model Parameters](https://easystats.github.io/parameters/articles/model_parameters_print.html) * [Plotting functions](https://easystats.github.io/see/articles/parameters.html) ## Dimension Reduction and Clustering * [Feature Reduction (PCA, cMDS, ICA, ...)](https://easystats.github.io/parameters/articles/parameters_reduction.html) * [Structural Models (EFA, CFA, SEM, ...)](https://easystats.github.io/parameters/articles/efa_cfa.html) * [Selection of Model Parameters](https://easystats.github.io/parameters/articles/parameters_selection.html) * [Clustering with easystats](https://easystats.github.io/parameters/articles/clustering.html) ## Plotting Functions * [Plotting functions in the **see** package](https://easystats.github.io/see/articles/parameters.html) parameters/inst/doc/overview_of_vignettes.R0000644000176200001440000000035515111301673020705 0ustar liggesusers## ----message=FALSE, warning=FALSE, include=FALSE------------------------------ library(knitr) knitr::opts_chunk$set( echo = TRUE, collapse = TRUE, warning = FALSE, message = FALSE, comment = "#>", eval = TRUE ) parameters/inst/doc/overview_of_vignettes.html0000644000176200001440000001655015111301673021454 0ustar liggesusers Overview of Vignettes

Overview of Vignettes

All package vignettes are available at https://easystats.github.io/parameters/.

Function Overview

parameters/inst/WORDLIST0000644000176200001440000000630415073732442014576 0ustar liggesusersADF AER Adressing Amrhein Analysing Anova Arel Azen BGGM BLUPs BMC BMJ Bayarri BayesFM BayesFactor BFBayesFactor Bentler Bergh Biometrics Biometrika Blume Budescu Bundock CFA CMD CNG CRC Cattell Cattell's CrossValidated Curently D'Agostino DAS DBSCAN DG DOI DRR Davison De Delacre DirichletReg DirichletRegModel DoF DoFs Dom Dorie Dupont DV EFA EGAnet ELPD ENP ESS Eingenvalues Eivenvalues Elff Epskamp FactoMineR FastICA Fidell Fisherian GJRM GLMM GLMMadaptive Garrido Gelman Golino Gorsuch Greevy Groemping Gustafson HC HDI HEXACO HLM Heisig Hesketh Heteroskedasticity Higgs Hinkley Hjort Holbert Hmisc Hofman Hofmann Hornik Isager IC ICA IRR JB JM JRM Jurs Jupyter KJ KMO Kenward Korner Kruschke Kutner LMM LMMs Lakens Laparra Lawley Liu MADs MCMCglmm MLM MPE MSA Maechler Magnusson Malo Mattan McNemar Merkle Metaclustering Monti Møller Montiel Mundlak NHST NL Neter Neyman Nieto Nievergelt Nondegenerate Nonresponse ORCID Olea Olkin PCoA PHQ PLOS PMCMRplus PeerJ Pernet Pettersson Plagborg PloS Psychometrika REWB ROPE's Rabe Rafi Rchoice Revelle Rhat Rocklin Rosseel Rousseeuw Routledge Rothman Scand Senn Statist SBC SDs SEM SEs SGPV Sadana Satterthwaite Satterthwaite's Schaeffer Scheel Schweder Sellke Shachar Shi Shikano Shmekels Sivula Sphericity Stata Stigum Struyf Synthese SVARs TOST Tabachnick Thiyagarajan Timepoint Turkheimer VGAM VSS Valls Vehtari Velicer Vos WRS WeightIt Wasserman Wisenbaker Zoski afex al aleatoric anova aod arxiv bamlss bayes bayesian bayestestR bbmle behaviour behaviours betareg biserial blavaan blme bmwiernik brglm brms brmsfit btergm cAIC cMDS censReg centre centred centroid cet ci clubSandwich cmprsk codecov countreg clm coxme cplm datanovia datawizard de decompositions demstats df distributons doi easystats effectsize effectsizes elpd emmGrid emmeans endogeneity epiR eps equivariance ergm et exponentiate exponentiating fastICA fixest gam gamlss gamm gaussianity ggeffects github glm glm's glmgee glmmTMB glmx glmtoolbox hclust heteroskedasticity hglm homoscedasticity htest http https hyperspectral interpretability interpretable io ivfixed ivprobit jeffreymgirard joineRML joss jstatsoft kmeans labelled lakens lavaan lavaSearch lcmm lesslikely lm lme lmerTest lmodel lmtest loadings logistf logitsf marginaleffects maxLik mblogit mclogit mclust mjoint meaned merDeriv merMod metaBMA metacluster metaclustering metafor metaplus mfx mgcv mhurdle mlogit mmrm modelbased modelsummary multcomp multicollinearity mvord nestedLogit nlme nnet nubmer onwards pam pamk patilindrajeets performant phylolm plm posthoc pre priori probabilistically ps pscl quantreg quartiles reproducibility rmarkdown rmsb robustlmm rownumbers rstanarm sampleSelection sdy setosa serp spaMM spatialreg sphericity strengejacke subclusters subscale subscales svylme systemfit th tidymodels tinytable tobit tseries unicode unreplicable varEST varimax vincentab www ’ parameters/README.md0000644000176200001440000004473315057525051013714 0ustar liggesusers # parameters [![DOI](https://joss.theoj.org/papers/10.21105/joss.02445/status.svg)](https://doi.org/10.21105/joss.02445) [![downloads](https://cranlogs.r-pkg.org/badges/parameters)](https://cran.r-project.org/package=parameters) [![total](https://cranlogs.r-pkg.org/badges/grand-total/parameters)](https://cranlogs.r-pkg.org/) ***Describe and understand your model’s parameters!*** **parameters**’ primary goal is to provide utilities for processing the parameters of various statistical models (see [here](https://easystats.github.io/insight/) for a list of supported models). Beyond computing *p-values*, *CIs*, *Bayesian indices* and other measures for a wide variety of models, this package implements features like *bootstrapping* of parameters and models, *feature reduction* (feature extraction and variable selection), or tools for data reduction like functions to perform cluster, factor or principal component analysis. Another important goal of the **parameters** package is to facilitate and streamline the process of reporting results of statistical models, which includes the easy and intuitive calculation of standardized estimates or robust standard errors and p-values. **parameters** therefor offers a simple and unified syntax to process a large variety of (model) objects from many different packages. ## Installation [![CRAN](https://www.r-pkg.org/badges/version/parameters)](https://cran.r-project.org/package=parameters) [![parameters status badge](https://easystats.r-universe.dev/badges/parameters)](https://easystats.r-universe.dev) [![codecov](https://codecov.io/gh/easystats/parameters/branch/main/graph/badge.svg)](https://app.codecov.io/gh/easystats/parameters) | Type | Source | Command | |----|----|----| | Release | CRAN | `install.packages("parameters")` | | Development | r - universe | `install.packages("parameters", repos = "https://easystats.r-universe.dev")` | | Development | GitHub | `remotes::install_github("easystats/parameters")` | > **Tip** > > Instead of `library(parameters)`, use `library(easystats)`. This will > make all features of the easystats-ecosystem available. > > To stay updated, use `easystats::install_latest()`. ## Documentation [![Documentation](https://img.shields.io/badge/documentation-parameters-orange.svg?colorB=E91E63)](https://easystats.github.io/parameters/) [![Blog](https://img.shields.io/badge/blog-easystats-orange.svg?colorB=FF9800)](https://easystats.github.io/blog/posts/) [![Features](https://img.shields.io/badge/features-parameters-orange.svg?colorB=2196F3)](https://easystats.github.io/parameters/reference/index.html) Click on the buttons above to access the package [documentation](https://easystats.github.io/parameters/) and the [easystats blog](https://easystats.github.io/blog/posts/), and check-out these vignettes: - [Summary of Model Parameters](https://easystats.github.io/parameters/articles/model_parameters.html) - [Parameter and Model Standardization](https://easystats.github.io/parameters/articles/standardize_parameters_effsize.html) - [Robust Estimation of Standard Errors, Confidence Intervals and p-values](https://easystats.github.io/parameters/articles/model_parameters_robust.html) - [Model Parameters and Missing Data](https://easystats.github.io/parameters/articles/model_parameters_mice.html) - [Feature reduction (PCA, cMDS, ICA…)](https://easystats.github.io/parameters/articles/parameters_reduction.html) - [Structural models (EFA, CFA, SEM…)](https://easystats.github.io/parameters/articles/efa_cfa.html) - [Parameters selection](https://easystats.github.io/parameters/articles/parameters_selection.html) - [A Practical Guide for Panel Data Analysis](https://easystats.github.io/parameters/articles/demean.html) - [Plotting functions](https://easystats.github.io/see/articles/parameters.html) ## Contributing and Support In case you want to file an issue or contribute in another way to the package, please follow [this guide](https://github.com/easystats/parameters/blob/main/.github/CONTRIBUTING.md). For questions about the functionality, you may either contact us via email or also file an issue. # Features ## Model’s parameters description The [`model_parameters()`](https://easystats.github.io/parameters/articles/model_parameters.html) function (that can be accessed via the `parameters()` shortcut) allows you to extract the parameters and their characteristics from various models in a consistent way. It can be considered as a lightweight alternative to [`broom::tidy()`](https://github.com/tidymodels/broom), with some notable differences: - The column names of the returned data frame are *specific* to their content. For instance, the column containing the statistic is named following the statistic name, i.e., *t*, *z*, etc., instead of a generic name such as *statistic* (however, you can get standardized (generic) column names using [`standardize_names()`](https://easystats.github.io/insight/reference/standardize_names.html)). - It is able to compute or extract indices not available by default, such as *p-values*, *CIs*, etc. - It includes *feature engineering* capabilities, including parameters [bootstrapping](https://easystats.github.io/parameters/reference/bootstrap_parameters.html). ### Classical Regression Models ``` r model <- lm(Sepal.Width ~ Petal.Length * Species + Petal.Width, data = iris) # regular model parameters model_parameters(model) #> Parameter | Coefficient | SE | 95% CI | t(143) | p #> ------------------------------------------------------------------------------------------- #> (Intercept) | 2.89 | 0.36 | [ 2.18, 3.60] | 8.01 | < .001 #> Petal Length | 0.26 | 0.25 | [-0.22, 0.75] | 1.07 | 0.287 #> Species [versicolor] | -1.66 | 0.53 | [-2.71, -0.62] | -3.14 | 0.002 #> Species [virginica] | -1.92 | 0.59 | [-3.08, -0.76] | -3.28 | 0.001 #> Petal Width | 0.62 | 0.14 | [ 0.34, 0.89] | 4.41 | < .001 #> Petal Length × Species [versicolor] | -0.09 | 0.26 | [-0.61, 0.42] | -0.36 | 0.721 #> Petal Length × Species [virginica] | -0.13 | 0.26 | [-0.64, 0.38] | -0.50 | 0.618 # standardized parameters model_parameters(model, standardize = "refit") #> Parameter | Coefficient | SE | 95% CI | t(143) | p #> ------------------------------------------------------------------------------------------- #> (Intercept) | 3.59 | 1.30 | [ 1.01, 6.17] | 2.75 | 0.007 #> Petal Length | 1.07 | 1.00 | [-0.91, 3.04] | 1.07 | 0.287 #> Species [versicolor] | -4.62 | 1.31 | [-7.21, -2.03] | -3.53 | < .001 #> Species [virginica] | -5.51 | 1.38 | [-8.23, -2.79] | -4.00 | < .001 #> Petal Width | 1.08 | 0.24 | [ 0.59, 1.56] | 4.41 | < .001 #> Petal Length × Species [versicolor] | -0.38 | 1.06 | [-2.48, 1.72] | -0.36 | 0.721 #> Petal Length × Species [virginica] | -0.52 | 1.04 | [-2.58, 1.54] | -0.50 | 0.618 # heteroscedasticity-consitent SE and CI model_parameters(model, vcov = "HC3") #> Parameter | Coefficient | SE | 95% CI | t(143) | p #> ------------------------------------------------------------------------------------------- #> (Intercept) | 2.89 | 0.43 | [ 2.03, 3.75] | 6.66 | < .001 #> Petal Length | 0.26 | 0.29 | [-0.30, 0.83] | 0.92 | 0.357 #> Species [versicolor] | -1.66 | 0.53 | [-2.70, -0.62] | -3.16 | 0.002 #> Species [virginica] | -1.92 | 0.77 | [-3.43, -0.41] | -2.51 | 0.013 #> Petal Width | 0.62 | 0.12 | [ 0.38, 0.85] | 5.23 | < .001 #> Petal Length × Species [versicolor] | -0.09 | 0.29 | [-0.67, 0.48] | -0.32 | 0.748 #> Petal Length × Species [virginica] | -0.13 | 0.31 | [-0.73, 0.48] | -0.42 | 0.675 ``` ### Mixed Models ``` r library(lme4) model <- lmer(Sepal.Width ~ Petal.Length + (1 | Species), data = iris) # model parameters with CI, df and p-values based on Wald approximation model_parameters(model) #> # Fixed Effects #> #> Parameter | Coefficient | SE | 95% CI | t(146) | p #> ------------------------------------------------------------------ #> (Intercept) | 2.00 | 0.56 | [0.89, 3.11] | 3.56 | < .001 #> Petal Length | 0.28 | 0.06 | [0.16, 0.40] | 4.75 | < .001 #> #> # Random Effects #> #> Parameter | Coefficient | SE | 95% CI #> ----------------------------------------------------------- #> SD (Intercept: Species) | 0.89 | 0.46 | [0.33, 2.43] #> SD (Residual) | 0.32 | 0.02 | [0.28, 0.35] # model parameters with CI, df and p-values based on Kenward-Roger approximation model_parameters(model, ci_method = "kenward", effects = "fixed") #> # Fixed Effects #> #> Parameter | Coefficient | SE | 95% CI | t | df | p #> ------------------------------------------------------------------------- #> (Intercept) | 2.00 | 0.57 | [0.07, 3.93] | 3.53 | 2.67 | 0.046 #> Petal Length | 0.28 | 0.06 | [0.16, 0.40] | 4.58 | 140.98 | < .001 ``` ### Structural Models Besides many types of regression models and packages, it also works for other types of models, such as [**structural models**](https://easystats.github.io/parameters/articles/efa_cfa.html) (EFA, CFA, SEM…). ``` r library(psych) model <- psych::fa(attitude, nfactors = 3) model_parameters(model) #> # Rotated loadings from Factor Analysis (oblimin-rotation) #> #> Variable | MR1 | MR2 | MR3 | Complexity | Uniqueness #> ------------------------------------------------------------ #> rating | 0.90 | -0.07 | -0.05 | 1.02 | 0.23 #> complaints | 0.97 | -0.06 | 0.04 | 1.01 | 0.10 #> privileges | 0.44 | 0.25 | -0.05 | 1.64 | 0.65 #> learning | 0.47 | 0.54 | -0.28 | 2.51 | 0.24 #> raises | 0.55 | 0.43 | 0.25 | 2.35 | 0.23 #> critical | 0.16 | 0.17 | 0.48 | 1.46 | 0.67 #> advance | -0.11 | 0.91 | 0.07 | 1.04 | 0.22 #> #> The 3 latent factors (oblimin rotation) accounted for 66.60% of the total variance of the original data (MR1 = 38.19%, MR2 = 22.69%, MR3 = 5.72%). ``` ## Variable and parameters selection [`select_parameters()`](https://easystats.github.io/parameters/articles/parameters_selection.html) can help you quickly select and retain the most relevant predictors using methods tailored for the model type. ``` r lm(disp ~ ., data = mtcars) |> select_parameters() |> model_parameters() #> Parameter | Coefficient | SE | 95% CI | t(26) | p #> ----------------------------------------------------------------------- #> (Intercept) | 141.70 | 125.67 | [-116.62, 400.02] | 1.13 | 0.270 #> cyl | 13.14 | 7.90 | [ -3.10, 29.38] | 1.66 | 0.108 #> hp | 0.63 | 0.20 | [ 0.22, 1.03] | 3.18 | 0.004 #> wt | 80.45 | 12.22 | [ 55.33, 105.57] | 6.58 | < .001 #> qsec | -14.68 | 6.14 | [ -27.31, -2.05] | -2.39 | 0.024 #> carb | -28.75 | 5.60 | [ -40.28, -17.23] | -5.13 | < .001 ``` ## Statistical inference - how to quantify evidence There is no standardized approach to drawing conclusions based on the available data and statistical models. A frequently chosen but also much criticized approach is to evaluate results based on their statistical significance (Amrhein, Korner-Nievergelt, & Roth, 2017). A more sophisticated way would be to test whether estimated effects exceed the “smallest effect size of interest”, to avoid even the smallest effects being considered relevant simply because they are statistically significant, but clinically or practically irrelevant (Lakens, 2024; Lakens, Scheel, & Isager, 2018). A rather unconventional approach, which is nevertheless advocated by various authors, is to interpret results from classical regression models in terms of probabilities, similar to the usual approach in Bayesian statistics (Greenland, Rafi, Matthews, & Higgs, 2022; Rafi & Greenland, 2020; Schweder, 2018; Schweder & Hjort, 2003; Vos & Holbert, 2022). The *parameters* package provides several options or functions to aid statistical inference. These are, for example: - [`equivalence_test()`](https://easystats.github.io/parameters/reference/equivalence_test.lm.html), to compute the (conditional) equivalence test for frequentist models - [`p_significance()`](https://easystats.github.io/parameters/reference/p_significance.lm.html), to compute the probability of *practical significance*, which can be conceptualized as a unidirectional equivalence test - [`p_function()`](https://easystats.github.io/parameters/reference/p_function.html), or *consonance function*, to compute p-values and compatibility (confidence) intervals for statistical models - the `pd` argument (setting `pd = TRUE`) in `model_parameters()` includes a column with the *probability of direction*, i.e. the probability that a parameter is strictly positive or negative. See [`bayestestR::p_direction()`](https://easystats.github.io/bayestestR/reference/p_direction.html) for details. - the `s_value` argument (setting `s_value = TRUE`) in `model_parameters()` replaces the p-values with their related *S*-values (@ Rafi & Greenland, 2020) - finally, it is possible to generate distributions of model coefficients by generating bootstrap-samples (setting `bootstrap = TRUE`) or simulating draws from model coefficients using [`simulate_model()`](https://easystats.github.io/parameters/reference/simulate_model.html). These samples can then be treated as “posterior samples” and used in many functions from the **bayestestR** package. Most of the above shown options or functions derive from methods originally implemented for Bayesian models (Makowski, Ben-Shachar, Chen, & Lüdecke, 2019). However, assuming that model assumptions are met (which means, the model fits well to the data, the correct model is chosen that reflects the data generating process (distributional model family) etc.), it seems appropriate to interpret results from classical frequentist models in a “Bayesian way” (more details: documentation in [`p_function()`](https://easystats.github.io/parameters/reference/p_function.html)). ## Citation In order to cite this package, please use the following command: ``` r citation("parameters") To cite package 'parameters' in publications use: Lüdecke D, Ben-Shachar M, Patil I, Makowski D (2020). "Extracting, Computing and Exploring the Parameters of Statistical Models using R." _Journal of Open Source Software_, *5*(53), 2445. doi:10.21105/joss.02445 . A BibTeX entry for LaTeX users is @Article{, title = {Extracting, Computing and Exploring the Parameters of Statistical Models using {R}.}, volume = {5}, doi = {10.21105/joss.02445}, number = {53}, journal = {Journal of Open Source Software}, author = {Daniel Lüdecke and Mattan S. Ben-Shachar and Indrajeet Patil and Dominique Makowski}, year = {2020}, pages = {2445}, } ``` ## Code of Conduct Please note that the parameters project is released with a [Contributor Code of Conduct](https://www.contributor-covenant.org/version/2/1/code_of_conduct/). By contributing to this project, you agree to abide by its terms. ## References
Amrhein, V., Korner-Nievergelt, F., & Roth, T. (2017). The earth is flat ( *p* \> 0.05): Significance thresholds and the crisis of unreplicable research. *PeerJ*, *5*, e3544.
Greenland, S., Rafi, Z., Matthews, R., & Higgs, M. (2022). *To Aid Scientific Inference, Emphasize Unconditional Compatibility Descriptions of Statistics*. Retrieved from
Lakens, D. (2024). *Improving Your Statistical Inferences*.
Lakens, D., Scheel, A. M., & Isager, P. M. (2018). Equivalence testing for psychological research: A tutorial. *Advances in Methods and Practices in Psychological Science*, *1*(2), 259–269.
Makowski, D., Ben-Shachar, M. S., Chen, S. H. A., & Lüdecke, D. (2019). Indices of Effect Existence and Significance in the Bayesian Framework. *Frontiers in Psychology*, *10*, 2767.
Rafi, Z., & Greenland, S. (2020). Semantic and cognitive tools to aid statistical science: Replace confidence and significance by compatibility and surprise. *BMC Medical Research Methodology*, *20*(1), 244.
Schweder, T. (2018). Confidence is epistemic probability for empirical science. *Journal of Statistical Planning and Inference*, *195*, 116–125.
Schweder, T., & Hjort, N. L. (2003). Frequentist Analogues of Priors and Posteriors. In B. Stigum (Ed.), *Econometrics and the Philosophy of Economics: Theory-Data Confrontations in Economics* (pp. 285–217). Princeton: Princeton University Press.
Vos, P., & Holbert, D. (2022). Frequentist statistical inference without repeated sampling. *Synthese*, *200*(2), 89.
parameters/build/0000755000176200001440000000000015111301673013512 5ustar liggesusersparameters/build/vignette.rds0000644000176200001440000000033115111301673016046 0ustar liggesusersb```b`aeb`b2 1# '/K-*L-O/LK-)I- MAS(USH i%9h*q t0XD90!icKŰ% 5/$~hZ8S+`zP԰Aհe ,s\ܠL t7`~΢r=xA$Gs=ʕXVr7 Gparameters/build/partial.rdb0000644000176200001440000000007515111301640015633 0ustar liggesusersb```b`aeb`b1 H020piּb C"F$7parameters/man/0000755000176200001440000000000015073732442013177 5ustar liggesusersparameters/man/print.parameters_model.Rd0000644000176200001440000004132615066721002020142 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/format.R, R/print.parameters_model.R, % R/print_html.R, R/print_md.R \name{format.parameters_model} \alias{format.parameters_model} \alias{print.parameters_model} \alias{summary.parameters_model} \alias{print_html.parameters_model} \alias{print_md.parameters_model} \title{Print model parameters} \usage{ \method{format}{parameters_model}( x, pretty_names = TRUE, split_components = TRUE, select = NULL, digits = 2, ci_digits = digits, p_digits = 3, ci_width = NULL, ci_brackets = NULL, zap_small = FALSE, format = NULL, groups = NULL, include_reference = FALSE, ... ) \method{print}{parameters_model}( x, pretty_names = TRUE, split_components = TRUE, select = NULL, caption = NULL, footer = NULL, digits = 2, ci_digits = digits, p_digits = 3, footer_digits = 3, show_sigma = FALSE, show_formula = FALSE, zap_small = FALSE, groups = NULL, column_width = NULL, ci_brackets = c("[", "]"), include_reference = FALSE, ... ) \method{summary}{parameters_model}(object, ...) \method{print_html}{parameters_model}( x, pretty_names = TRUE, split_components = TRUE, select = NULL, caption = NULL, subtitle = NULL, footer = NULL, align = NULL, digits = 2, ci_digits = digits, p_digits = 3, footer_digits = 3, ci_brackets = c("(", ")"), show_sigma = FALSE, show_formula = FALSE, zap_small = FALSE, groups = NULL, font_size = "100\%", line_padding = 4, column_labels = NULL, include_reference = FALSE, verbose = TRUE, ... ) \method{print_md}{parameters_model}( x, pretty_names = TRUE, split_components = TRUE, select = NULL, caption = NULL, subtitle = NULL, footer = NULL, align = NULL, digits = 2, ci_digits = digits, p_digits = 3, footer_digits = 3, ci_brackets = c("(", ")"), show_sigma = FALSE, show_formula = FALSE, zap_small = FALSE, groups = NULL, include_reference = FALSE, verbose = TRUE, ... ) } \arguments{ \item{x, object}{An object returned by \code{\link[=model_parameters]{model_parameters()}}.} \item{pretty_names}{Can be \code{TRUE}, which will return "pretty" (i.e. more human readable) parameter names. Or \code{"labels"}, in which case value and variable labels will be used as parameters names. The latter only works for "labelled" data, i.e. if the data used to fit the model had \code{"label"} and \code{"labels"} attributes. See also section \emph{Global Options to Customize Messages when Printing}.} \item{split_components}{Logical, if \code{TRUE} (default), For models with multiple components (zero-inflation, smooth terms, ...), each component is printed in a separate table. If \code{FALSE}, model parameters are printed in a single table and a \code{Component} column is added to the output.} \item{select}{Determines which columns and and which layout columns are printed. There are three options for this argument: \itemize{ \item \strong{Selecting columns by name or index} \code{select} can be a character vector (or numeric index) of column names that should be printed, where columns are extracted from the data frame returned by \code{model_parameters()} and related functions. There are two pre-defined options for selecting columns: \code{select = "minimal"} prints coefficients, confidence intervals and p-values, while \code{select = "short"} prints coefficients, standard errors and p-values. \item \strong{A string expression with layout pattern} \code{select} is a string with "tokens" enclosed in braces. These tokens will be replaced by their associated columns, where the selected columns will be collapsed into one column. Following tokens are replaced by the related coefficients or statistics: \code{{estimate}}, \code{{se}}, \code{{ci}} (or \code{{ci_low}} and \code{{ci_high}}), \code{{p}} and \code{{stars}}. The token \code{{ci}} will be replaced by \verb{\{ci_low\}, \{ci_high\}}. Example: \code{select = "{estimate}{stars} ({ci})"} It is possible to create multiple columns as well. A \code{|} separates values into new cells/columns. Example: \code{select = "{estimate} ({ci})|{p}"}. If \code{format = "html"}, a \verb{
} inserts a line break inside a cell. See 'Examples'. \item \strong{A string indicating a pre-defined layout} \code{select} can be one of the following string values, to create one of the following pre-defined column layouts: \itemize{ \item \code{"ci"}: Estimates and confidence intervals, no asterisks for p-values. This is equivalent to \code{select = "{estimate} ({ci})"}. \item \code{"se"}: Estimates and standard errors, no asterisks for p-values. This is equivalent to \code{select = "{estimate} ({se})"}. \item \code{"ci_p"}: Estimates, confidence intervals and asterisks for p-values. This is equivalent to \code{select = "{estimate}{stars} ({ci})"}. \item \code{"se_p"}: Estimates, standard errors and asterisks for p-values. This is equivalent to \code{select = "{estimate}{stars} ({se})"}.. \item \code{"ci_p2"}: Estimates, confidence intervals and numeric p-values, in two columns. This is equivalent to \code{select = "{estimate} ({ci})|{p}"}. \item \code{"se_p2"}: Estimate, standard errors and numeric p-values, in two columns. This is equivalent to \code{select = "{estimate} ({se})|{p}"}. } } For \code{model_parameters()}, glue-like syntax is still experimental in the case of more complex models (like mixed models) and may not return expected results.} \item{digits, ci_digits, p_digits}{Number of digits for rounding or significant figures. May also be \code{"signif"} to return significant figures or \code{"scientific"} to return scientific notation. Control the number of digits by adding the value as suffix, e.g. \code{digits = "scientific4"} to have scientific notation with 4 decimal places, or \code{digits = "signif5"} for 5 significant figures (see also \code{\link[=signif]{signif()}}).} \item{ci_width}{Minimum width of the returned string for confidence intervals. If not \code{NULL} and width is larger than the string's length, leading whitespaces are added to the string. If \code{width="auto"}, width will be set to the length of the longest string.} \item{ci_brackets}{Logical, if \code{TRUE} (default), CI-values are encompassed in square brackets (else in parentheses).} \item{zap_small}{Logical, if \code{TRUE}, small values are rounded after \code{digits} decimal places. If \code{FALSE}, values with more decimal places than \code{digits} are printed in scientific notation.} \item{format}{Name of output-format, as string. If \code{NULL} (or \code{"text"}), returned output is used for basic printing. Can be one of \code{NULL} (the default) resp. \code{"text"} for plain text, \code{"markdown"} (or \code{"md"}) for markdown and \code{"html"} for HTML output. A special option is \code{"tt"}, which creates a \code{\link[tinytable:tt]{tinytable::tt()}} object, where the output format is dependent on the context where the table is used, i.e. it can be markdown format when \code{export_table()} is used in markdown files, or LaTeX format when creating PDFs etc.} \item{groups}{Named list, can be used to group parameters in the printed output. List elements may either be character vectors that match the name of those parameters that belong to one group, or list elements can be row numbers of those parameter rows that should belong to one group. The names of the list elements will be used as group names, which will be inserted as "header row". A possible use case might be to emphasize focal predictors and control variables, see 'Examples'. Parameters will be re-ordered according to the order used in \code{groups}, while all non-matching parameters will be added to the end.} \item{include_reference}{Logical, if \code{TRUE}, the reference level of factors will be added to the parameters table. This is only relevant for models with categorical predictors. The coefficient for the reference level is always \code{0} (except when \code{exponentiate = TRUE}, then the coefficient will be \code{1}), so this is just for completeness.} \item{...}{Arguments passed down to \code{\link[=format.parameters_model]{format.parameters_model()}}, \code{\link[insight:format_table]{insight::format_table()}} and \code{\link[insight:export_table]{insight::export_table()}}} \item{caption}{Table caption as string. If \code{NULL}, depending on the model, either a default caption or no table caption is printed. Use \code{caption = ""} to suppress the table caption.} \item{footer}{Can either be \code{FALSE} or an empty string (i.e. \code{""}) to suppress the footer, \code{NULL} to print the default footer, or a string. The latter will combine the string value with the default footer.} \item{footer_digits}{Number of decimal places for values in the footer summary.} \item{show_sigma}{Logical, if \code{TRUE}, adds information about the residual standard deviation.} \item{show_formula}{Logical, if \code{TRUE}, adds the model formula to the output.} \item{column_width}{Width of table columns. Can be either \code{NULL}, a named numeric vector, or \code{"fixed"}. If \code{NULL}, the width for each table column is adjusted to the minimum required width. If a named numeric vector, value names are matched against column names, and for each match, the specified width is used. If \code{"fixed"}, and table is split into multiple components, columns across all table components are adjusted to have the same width.} \item{subtitle}{Table title (same as caption) and subtitle, as strings. If \code{NULL}, no title or subtitle is printed, unless it is stored as attributes (\code{table_title}, or its alias \code{table_caption}, and \code{table_subtitle}). If \code{x} is a list of data frames, \code{caption} may be a list of table captions, one for each table.} \item{align}{Only applies to HTML tables. May be one of \code{"left"}, \code{"right"} or \code{"center"}.} \item{font_size}{For HTML tables, the font size.} \item{line_padding}{For HTML tables, the distance (in pixel) between lines.} \item{column_labels}{Labels of columns for HTML tables. If \code{NULL}, automatic column names are generated. See 'Examples'.} \item{verbose}{Toggle messages and warnings.} } \value{ Invisibly returns the original input object. } \description{ A \code{print()}-method for objects from \code{\link[=model_parameters]{model_parameters()}}. } \details{ \code{summary()} is a convenient shortcut for \code{print(object, select = "minimal", show_sigma = TRUE, show_formula = TRUE)}. } \section{Global Options to Customize Messages and Tables when Printing}{ The \code{verbose} argument can be used to display or silence messages and warnings for the different functions in the \strong{parameters} package. However, some messages providing additional information can be displayed or suppressed using \code{options()}: \itemize{ \item \code{parameters_info}: \code{options(parameters_info = TRUE)} will override the \code{include_info} argument in \code{model_parameters()} and always show the model summary for non-mixed models. \item \code{parameters_mixed_info}: \code{options(parameters_mixed_info = TRUE)} will override the \code{include_info} argument in \code{model_parameters()} for mixed models, and will then always show the model summary. \item \code{parameters_cimethod}: \code{options(parameters_cimethod = TRUE)} will show the additional information about the approximation method used to calculate confidence intervals and p-values. Set to \code{FALSE} to hide this message when printing \code{model_parameters()} objects. \item \code{parameters_exponentiate}: \code{options(parameters_exponentiate = TRUE)} will show the additional information on how to interpret coefficients of models with log-transformed response variables or with log-/logit-links when the \code{exponentiate} argument in \code{model_parameters()} is not \code{TRUE}. Set this option to \code{FALSE} to hide this message when printing \code{model_parameters()} objects. } There are further options that can be used to modify the default behaviour for printed outputs: \itemize{ \item \code{parameters_labels}: \code{options(parameters_labels = TRUE)} will use variable and value labels for pretty names, if data is labelled. If no labels available, default pretty names are used. \item \code{parameters_interaction}: \verb{options(parameters_interaction = )} will replace the interaction mark (by default, \code{*}) with the related character. \item \code{parameters_select}: \verb{options(parameters_select = )} will set the default for the \code{select} argument. See argument's documentation for available options. \item \code{easystats_table_width}: \verb{options(easystats_table_width = )} will set the default width for tables in text-format, i.e. for most of the outputs printed to console. If not specified, tables will be adjusted to the current available width, e.g. of the of the console (or any other source for textual output, like markdown files). The argument \code{table_width} can also be used in most \code{print()} methods to specify the table width as desired. \item \code{insight_use_symbols}: \code{options(insight_use_symbols = TRUE)} will try to print unicode-chars for symbols as column names, wherever possible (e.g., \ifelse{html}{\out{ω}}{\eqn{\omega}} instead of \code{Omega}). } } \section{Interpretation of Interaction Terms}{ Note that the \emph{interpretation} of interaction terms depends on many characteristics of the model. The number of parameters, and overall performance of the model, can differ \emph{or not} between \code{a * b}, \code{a : b}, and \code{a / b}, suggesting that sometimes interaction terms give different parameterizations of the same model, but other times it gives completely different models (depending on \code{a} or \code{b} being factors of covariates, included as main effects or not, etc.). Their interpretation depends of the full context of the model, which should not be inferred from the parameters table alone - rather, we recommend to use packages that calculate estimated marginal means or marginal effects, such as \CRANpkg{modelbased}, \CRANpkg{emmeans}, \CRANpkg{ggeffects}, or \CRANpkg{marginaleffects}. To raise awareness for this issue, you may use \code{print(...,show_formula=TRUE)} to add the model-specification to the output of the \code{\link[=print.parameters_model]{print()}} method for \code{model_parameters()}. } \section{Labeling the Degrees of Freedom}{ Throughout the \strong{parameters} package, we decided to label the residual degrees of freedom \emph{df_error}. The reason for this is that these degrees of freedom not always refer to the residuals. For certain models, they refer to the estimate error - in a linear model these are the same, but in - for instance - any mixed effects model, this isn't strictly true. Hence, we think that \code{df_error} is the most generic label for these degrees of freedom. } \examples{ \dontshow{if (require("gt", quietly = TRUE) && require("glmmTMB", quietly = TRUE)) withAutoprint(\{ # examplesIf} \donttest{ library(parameters) model <- glmmTMB::glmmTMB( count ~ spp + mined + (1 | site), ziformula = ~mined, family = poisson(), data = Salamanders ) mp <- model_parameters(model) print(mp, pretty_names = FALSE) print(mp, split_components = FALSE) print(mp, select = c("Parameter", "Coefficient", "SE")) print(mp, select = "minimal") # group parameters ------ data(iris) model <- lm( Sepal.Width ~ Sepal.Length + Species + Petal.Length, data = iris ) # don't select "Intercept" parameter mp <- model_parameters(model, parameters = "^(?!\\\\(Intercept)") groups <- list(`Focal Predictors` = c(1, 4), Controls = c(2, 3)) print(mp, groups = groups) # only show coefficients, CI and p, # put non-matched parameters to the end data(mtcars) mtcars$cyl <- as.factor(mtcars$cyl) mtcars$gear <- as.factor(mtcars$gear) model <- lm(mpg ~ hp + gear * vs + cyl + drat, data = mtcars) # don't select "Intercept" parameter mp <- model_parameters(model, parameters = "^(?!\\\\(Intercept)") print(mp, groups = list( Engine = c(5, 6, 4, 1), Interactions = c(8, 9) )) } # custom column layouts ------ data(iris) lm1 <- lm(Sepal.Length ~ Species, data = iris) lm2 <- lm(Sepal.Length ~ Species + Petal.Length, data = iris) # custom style result <- compare_parameters(lm1, lm2, select = "{estimate}{stars} ({se})") print(result) \donttest{ # custom style, in HTML result <- compare_parameters(lm1, lm2, select = "{estimate}
({se})|{p}") print_html(result) } \dontshow{\}) # examplesIf} } \seealso{ See also \code{\link[=display.parameters_model]{display()}}. } parameters/man/p_function.Rd0000644000176200001440000006315015066721002015627 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/p_function.R, R/print_html.R \name{p_function} \alias{p_function} \alias{consonance_function} \alias{confidence_curve} \alias{format.parameters_p_function} \alias{print.parameters_p_function} \alias{print_html.parameters_p_function} \title{p-value or consonance function} \usage{ p_function( model, ci_levels = c(0.25, 0.5, 0.75, emph = 0.95), exponentiate = FALSE, effects = "fixed", component = "all", vcov = NULL, vcov_args = NULL, keep = NULL, drop = NULL, verbose = TRUE, ... ) consonance_function( model, ci_levels = c(0.25, 0.5, 0.75, emph = 0.95), exponentiate = FALSE, effects = "fixed", component = "all", vcov = NULL, vcov_args = NULL, keep = NULL, drop = NULL, verbose = TRUE, ... ) confidence_curve( model, ci_levels = c(0.25, 0.5, 0.75, emph = 0.95), exponentiate = FALSE, effects = "fixed", component = "all", vcov = NULL, vcov_args = NULL, keep = NULL, drop = NULL, verbose = TRUE, ... ) \method{format}{parameters_p_function}( x, digits = 2, format = NULL, ci_width = NULL, ci_brackets = TRUE, pretty_names = TRUE, ... ) \method{print}{parameters_p_function}( x, digits = 2, ci_width = "auto", ci_brackets = TRUE, pretty_names = TRUE, ... ) \method{print_html}{parameters_p_function}( x, digits = 2, ci_width = "auto", ci_brackets = c("(", ")"), pretty_names = TRUE, ... ) } \arguments{ \item{model}{Statistical Model.} \item{ci_levels}{Vector of scalars, indicating the different levels at which compatibility intervals should be printed or plotted. In plots, these levels are highlighted by vertical lines. It is possible to increase thickness for one or more of these lines by providing a names vector, where the to be highlighted values should be named \code{"emph"}, e.g \code{ci_levels = c(0.25, 0.5, emph = 0.95)}.} \item{exponentiate}{Logical, indicating whether or not to exponentiate the coefficients (and related confidence intervals). This is typical for logistic regression, or more generally speaking, for models with log or logit links. It is also recommended to use \code{exponentiate = TRUE} for models with log-transformed response values. For models with a log-transformed response variable, when \code{exponentiate = TRUE}, a one-unit increase in the predictor is associated with multiplying the outcome by that predictor's coefficient. \strong{Note:} Delta-method standard errors are also computed (by multiplying the standard errors by the transformed coefficients). This is to mimic behaviour of other software packages, such as Stata, but these standard errors poorly estimate uncertainty for the transformed coefficient. The transformed confidence interval more clearly captures this uncertainty. For \code{compare_parameters()}, \code{exponentiate = "nongaussian"} will only exponentiate coefficients from non-Gaussian families.} \item{effects}{Should parameters for fixed effects (\code{"fixed"}), random effects (\code{"random"}), or both fixed and random effects (\code{"all"}) be returned? By default, the variance components for random effects are returned. If group-level effects are requested, \code{"grouplevel"} returns the group-level random effects (BLUPs), while \code{"random_total"} return the overall (sum of fixed and random) effects (similar to what \code{coef()} returns). Using \code{"grouplevel"} is equivalent to setting \code{group_level = TRUE}. The \code{effects} argument only applies to mixed models. If the calculation of random effects parameters takes too long, you may use \code{effects = "fixed"}.} \item{component}{Which type of parameters to return, such as parameters for the conditional model, the zero-inflation part of the model, the dispersion term, or other auxiliary parameters be returned? Applies to models with zero-inflation and/or dispersion formula, or if parameters such as \code{sigma} should be included. May be abbreviated. Note that the \emph{conditional} component is also called \emph{count} or \emph{mean} component, depending on the model. There are three convenient shortcuts: \code{component = "all"} returns all possible parameters. If \code{component = "location"}, location parameters such as \code{conditional}, \code{zero_inflated}, or \code{smooth_terms}, are returned (everything that are fixed or random effects - depending on the \code{effects} argument - but no auxiliary parameters). For \code{component = "distributional"} (or \code{"auxiliary"}), components like \code{sigma}, \code{dispersion}, or \code{beta} (and other auxiliary parameters) are returned.} \item{vcov}{Variance-covariance matrix used to compute uncertainty estimates (e.g., for robust standard errors). This argument accepts a covariance matrix, a function which returns a covariance matrix, or a string which identifies the function to be used to compute the covariance matrix. \itemize{ \item A covariance matrix \item A function which returns a covariance matrix (e.g., \code{stats::vcov()}) \item A string which indicates the kind of uncertainty estimates to return. \itemize{ \item Heteroskedasticity-consistent: \code{"HC"}, \code{"HC0"}, \code{"HC1"}, \code{"HC2"}, \code{"HC3"}, \code{"HC4"}, \code{"HC4m"}, \code{"HC5"}. See \code{?sandwich::vcovHC} \item Cluster-robust: \code{"CR"}, \code{"CR0"}, \code{"CR1"}, \code{"CR1p"}, \code{"CR1S"}, \code{"CR2"}, \code{"CR3"}. See \code{?clubSandwich::vcovCR} \item Bootstrap: \code{"BS"}, \code{"xy"}, \code{"residual"}, \code{"wild"}, \code{"mammen"}, \code{"fractional"}, \code{"jackknife"}, \code{"norm"}, \code{"webb"}. See \code{?sandwich::vcovBS} \item Other \code{sandwich} package functions: \code{"HAC"}, \code{"PC"}, \code{"CL"}, \code{"OPG"}, \code{"PL"}. } }} \item{vcov_args}{List of arguments to be passed to the function identified by the \code{vcov} argument. This function is typically supplied by the \strong{sandwich} or \strong{clubSandwich} packages. Please refer to their documentation (e.g., \code{?sandwich::vcovHAC}) to see the list of available arguments. If no estimation type (argument \code{type}) is given, the default type for \code{"HC"} equals the default from the \strong{sandwich} package; for type \code{"CR"}, the default is set to \code{"CR3"}.} \item{keep}{Character containing a regular expression pattern that describes the parameters that should be included (for \code{keep}) or excluded (for \code{drop}) in the returned data frame. \code{keep} may also be a named list of regular expressions. All non-matching parameters will be removed from the output. If \code{keep} is a character vector, every parameter name in the \emph{"Parameter"} column that matches the regular expression in \code{keep} will be selected from the returned data frame (and vice versa, all parameter names matching \code{drop} will be excluded). Furthermore, if \code{keep} has more than one element, these will be merged with an \code{OR} operator into a regular expression pattern like this: \code{"(one|two|three)"}. If \code{keep} is a named list of regular expression patterns, the names of the list-element should equal the column name where selection should be applied. This is useful for model objects where \code{model_parameters()} returns multiple columns with parameter components, like in \code{\link[=model_parameters.lavaan]{model_parameters.lavaan()}}. Note that the regular expression pattern should match the parameter names as they are stored in the returned data frame, which can be different from how they are printed. Inspect the \verb{$Parameter} column of the parameters table to get the exact parameter names.} \item{drop}{See \code{keep}.} \item{verbose}{Toggle warnings and messages.} \item{...}{Arguments passed to or from other methods. Non-documented arguments are \itemize{ \item \code{digits}, \code{p_digits}, \code{ci_digits} and \code{footer_digits} to set the number of digits for the output. \code{groups} can be used to group coefficients. These arguments will be passed to the print-method, or can directly be used in \code{print()}, see documentation in \code{\link[=print.parameters_model]{print.parameters_model()}}. \item If \code{s_value = TRUE}, the p-value will be replaced by the S-value in the output (cf. \emph{Rafi and Greenland 2020}). \item \code{pd} adds an additional column with the \emph{probability of direction} (see \code{\link[bayestestR:p_direction]{bayestestR::p_direction()}} for details). Furthermore, see 'Examples' in \code{\link[=model_parameters.default]{model_parameters.default()}}. \item For developers, whose interest mainly is to get a "tidy" data frame of model summaries, it is recommended to set \code{pretty_names = FALSE} to speed up computation of the summary table. }} \item{x}{An object returned by \code{\link[=model_parameters]{model_parameters()}}.} \item{digits}{Number of digits for rounding or significant figures. May also be \code{"signif"} to return significant figures or \code{"scientific"} to return scientific notation. Control the number of digits by adding the value as suffix, e.g. \code{digits = "scientific4"} to have scientific notation with 4 decimal places, or \code{digits = "signif5"} for 5 significant figures (see also \code{\link[=signif]{signif()}}).} \item{format}{Name of output-format, as string. If \code{NULL} (or \code{"text"}), returned output is used for basic printing. Can be one of \code{NULL} (the default) resp. \code{"text"} for plain text, \code{"markdown"} (or \code{"md"}) for markdown and \code{"html"} for HTML output. A special option is \code{"tt"}, which creates a \code{\link[tinytable:tt]{tinytable::tt()}} object, where the output format is dependent on the context where the table is used, i.e. it can be markdown format when \code{export_table()} is used in markdown files, or LaTeX format when creating PDFs etc.} \item{ci_width}{Minimum width of the returned string for confidence intervals. If not \code{NULL} and width is larger than the string's length, leading whitespaces are added to the string. If \code{width="auto"}, width will be set to the length of the longest string.} \item{ci_brackets}{Logical, if \code{TRUE} (default), CI-values are encompassed in square brackets (else in parentheses).} \item{pretty_names}{Can be \code{TRUE}, which will return "pretty" (i.e. more human readable) parameter names. Or \code{"labels"}, in which case value and variable labels will be used as parameters names. The latter only works for "labelled" data, i.e. if the data used to fit the model had \code{"label"} and \code{"labels"} attributes. See also section \emph{Global Options to Customize Messages when Printing}.} } \value{ A data frame with p-values and compatibility intervals. } \description{ Compute p-values and compatibility (confidence) intervals for statistical models, at different levels. This function is also called consonance function. It allows to see which estimates are compatible with the model at various compatibility levels. Use \code{plot()} to generate plots of the \emph{p} resp. \emph{consonance} function and compatibility intervals at different levels. } \details{ \subsection{Compatibility intervals and continuous \emph{p}-values for different estimate values}{ \code{p_function()} only returns the compatibility interval estimates, not the related \emph{p}-values. The reason for this is because the \emph{p}-value for a given estimate value is just \code{1 - CI_level}. The values indicating the lower and upper limits of the intervals are the related estimates associated with the \emph{p}-value. E.g., if a parameter \code{x} has a 75\% compatibility interval of \verb{(0.81, 1.05)}, then the \emph{p}-value for the estimate value of \code{0.81} would be \code{1 - 0.75}, which is \code{0.25}. This relationship is more intuitive and better to understand when looking at the plots (using \code{plot()}). } \subsection{Conditional versus unconditional interpretation of \emph{p}-values and intervals}{ \code{p_function()}, and in particular its \code{plot()} method, aims at re-interpreting \emph{p}-values and confidence intervals (better named: \emph{compatibility} intervals) in \emph{unconditional} terms. Instead of referring to the long-term property and repeated trials when interpreting interval estimates (so-called "aleatory probability", \emph{Schweder 2018}), and assuming that all underlying assumptions are correct and met, \code{p_function()} interprets \emph{p}-values in a Fisherian way as "\emph{continuous} measure of evidence against the very test hypothesis \emph{and} entire model (all assumptions) used to compute it" (\emph{P-Values Are Tough and S-Values Can Help}, lesslikely.com/statistics/s-values; see also \emph{Amrhein and Greenland 2022}). The common definition of p-values can be considered as "conditional" interpretation: \emph{The p-value is the probability of obtaining test results at least as extreme as the result actually observed, under the assumption that the null hypothesis is correct (Wikipedia).} However, this definition or interpretation is inadequate because it only refers to the test hypothesis (often the null hypothesis), which is only one component of the entire model that is being tested. Thus, \emph{Greenland et al. 2022} suggest an "unconditional" interpretation. This interpretation as a continuous measure of evidence against the test hypothesis and the entire model used to compute it can be seen in the figure below (taken from \emph{P-Values Are Tough and S-Values Can Help}, lesslikely.com/statistics/s-values). The "conditional" interpretation of \emph{p}-values and interval estimates (A) implicitly assumes certain assumptions to be true, thus the interpretation is "conditioned" on these assumptions (i.e. assumptions are taken as given, only the hypothesis is tested). The unconditional interpretation (B), however, questions \emph{all} these assumptions. A non-significant p-value could occur because the test hypothesis is false, but could also be the result of any of the model assumptions being incorrect. \if{html}{\cr \figure{unconditional_interpretation.png}{options: alt="Conditional versus unconditional interpretations of P-values"} \cr} "Emphasizing unconditional interpretations helps avoid overconfident and misleading inferences in light of uncertainties about the assumptions used to arrive at the statistical results." (\emph{Greenland et al. 2022}). \strong{Note:} The term "conditional" as used by Rafi and Greenland probably has a slightly different meaning than normally. "Conditional" in this notion means that all model assumptions are taken as given - it should not be confused with terms like "conditional probability". See also \emph{Greenland et al. 2022} for a detailed elaboration on this issue. In other words, the term compatibility interval emphasizes "the dependence of the \emph{p}-value on the assumptions as well as on the data, recognizing that \emph{p}<0.05 can arise from assumption violations even if the effect under study is null" (\emph{Gelman/Greenland 2019}). } \subsection{Probabilistic interpretation of p-values and compatibility intervals}{ Schweder (2018) resp. Schweder and Hjort (2016) (and others) argue that confidence curves (as produced by \code{p_function()}) have a valid probabilistic interpretation. They distinguish between \emph{aleatory probability}, which describes the aleatory stochastic element of a distribution \emph{ex ante}, i.e. before the data are obtained. This is the classical interpretation of confidence intervals following the Neyman-Pearson school of statistics. However, there is also an \emph{ex post} probability, called \emph{epistemic} probability, for confidence curves. The shift in terminology from \emph{confidence} intervals to \emph{compatibility} intervals may help emphasizing this interpretation. In this sense, the probabilistic interpretation of \emph{p}-values and compatibility intervals is "conditional" - on the data \emph{and} model assumptions (which is in line with the \emph{"unconditional"} interpretation in the sense of Rafi and Greenland). Ascribing a probabilistic interpretation to one realized confidence interval is possible without repeated sampling of the specific experiment. Important is the assumption that a \emph{sampling distribution} is a good description of the variability of the parameter (\emph{Vos and Holbert 2022}). At the core, the interpretation of a confidence interval is "I assume that this sampling distribution is a good description of the uncertainty of the parameter. If that's a good assumption, then the values in this interval are the most plausible or compatible with the data". The source of confidence in probability statements is the assumption that the selected sampling distribution is appropriate. "The realized confidence distribution is clearly an epistemic probability distribution" (\emph{Schweder 2018}). In Bayesian words, compatibility intervals (or confidence distributons, or consonance curves) are "posteriors without priors" (\emph{Schweder, Hjort, 2003}). The \emph{p}-value indicates the degree of compatibility of the endpoints of the interval at a given confidence level with (1) the observed data and (2) model assumptions. The observed point estimate (\emph{p}-value = 1) is the value estimated to be \emph{most compatible} with the data and model assumptions, whereas values values far from the observed point estimate (where \emph{p} approaches 0) are least compatible with the data and model assumptions (\emph{Schweder and Hjort 2016, pp. 60-61; Amrhein and Greenland 2022}). In this regards, \emph{p}-values are statements about \emph{confidence} or \emph{compatibility}: The p-value is not an absolute measure of evidence for a model (such as the null/alternative model), it is a continuous measure of the compatibility of the observed data with the model used to compute it (\emph{Greenland et al. 2016}, \emph{Greenland 2023}). Going one step further, and following \emph{Schweder}, p-values can be considered as \emph{epistemic probability} - "not necessarily of the hypothesis being true, but of it \emph{possibly} being true" (\emph{Schweder 2018}). Hence, the interpretation of \emph{p}-values might be guided using \code{\link[bayestestR:pd_to_p]{bayestestR::p_to_pd()}}. } \subsection{Probability or compatibility?}{ We here presented the discussion of p-values and confidence intervals from the perspective of two paradigms, one saying that probability statements can be made, one saying that interpretation is guided in terms of "compatibility". Cox and Hinkley say, "interval estimates cannot be taken as probability statements" (\emph{Cox and Hinkley 1979: 208}), which conflicts with the Schweder and Hjort confidence distribution school. However, if you view interval estimates as being intervals of values being consistent with the data, this comes close to the idea of (epistemic) probability. We do not believe that these two paradigms contradict or exclude each other. Rather, the aim is to emphasize the one point of view or the other, i.e. to place the linguistic nuances either on 'compatibility' or 'probability'. The main take-away is \emph{not} to interpret p-values as dichotomous decisions that distinguish between "we found an effect" (statistically significant)" vs. "we found no effect" (statistically not significant) (\emph{Altman and Bland 1995}). } \subsection{Compatibility intervals - is their interpretation "conditional" or not?}{ The fact that the term "conditional" is used in different meanings in statistics, is confusing. Thus, we would summarize the (probabilistic) interpretation of compatibility intervals as follows: The intervals are built from the data \emph{and} our modeling assumptions. The accuracy of the intervals depends on our model assumptions. If a value is outside the interval, that might be because (1) that parameter value isn't supported by the data, or (2) the modeling assumptions are a poor fit for the situation. When we make bad assumptions, the compatibility interval might be too wide or (more commonly and seriously) too narrow, making us think we know more about the parameter than is warranted. When we say "there is a 95\% chance the true value is in the interval", that is a statement of \emph{epistemic probability} (i.e. description of uncertainty related to our knowledge or belief). When we talk about repeated samples or sampling distributions, that is referring to \emph{aleatoric} (physical properties) probability. Frequentist inference is built on defining estimators with known \emph{aleatoric} probability properties, from which we can draw \emph{epistemic} probabilistic statements of uncertainty (\emph{Schweder and Hjort 2016}). } \subsection{Functions in the parameters package to check for effect existence and significance}{ The \strong{parameters} package provides several options or functions to aid statistical inference. Beyond \code{p_function()}, there are, for example: \itemize{ \item \code{\link[=equivalence_test.lm]{equivalence_test()}}, to compute the (conditional) equivalence test for frequentist models \item \code{\link[=p_significance.lm]{p_significance()}}, to compute the probability of \emph{practical significance}, which can be conceptualized as a unidirectional equivalence test \item the \code{pd} argument (setting \code{pd = TRUE}) in \code{model_parameters()} includes a column with the \emph{probability of direction}, i.e. the probability that a parameter is strictly positive or negative. See \code{\link[bayestestR:p_direction]{bayestestR::p_direction()}} for details. If plotting is desired, the \code{\link[=p_direction.lm]{p_direction()}} function can be used, together with \code{plot()}. \item the \code{s_value} argument (setting \code{s_value = TRUE}) in \code{model_parameters()} replaces the p-values with their related \emph{S}-values (\emph{Rafi and Greenland 2020}) \item finally, it is possible to generate distributions of model coefficients by generating bootstrap-samples (setting \code{bootstrap = TRUE}) or simulating draws from model coefficients using \code{\link[=simulate_model]{simulate_model()}}. These samples can then be treated as "posterior samples" and used in many functions from the \strong{bayestestR} package. } } } \note{ Curently, \code{p_function()} computes intervals based on Wald t- or z-statistic. For certain models (like mixed models), profiled intervals may be more accurate, however, this is currently not supported. } \examples{ \dontshow{if (requireNamespace("see")) withAutoprint(\{ # examplesIf} model <- lm(Sepal.Length ~ Species, data = iris) p_function(model) model <- lm(mpg ~ wt + as.factor(gear) + am, data = mtcars) result <- p_function(model) # single panels plot(result, n_columns = 2) # integrated plot, the default plot(result) \dontshow{\}) # examplesIf} } \references{ \itemize{ \item Altman DG, Bland JM. Absence of evidence is not evidence of absence. BMJ. 1995;311(7003):485. \doi{10.1136/bmj.311.7003.485} \item Amrhein V, Greenland S. Discuss practical importance of results based on interval estimates and p-value functions, not only on point estimates and null p-values. Journal of Information Technology 2022;37:316–20. \doi{10.1177/02683962221105904} \item Cox DR, Hinkley DV. 1979. Theoretical Statistics. 6th edition. Chapman and Hall/CRC \item Fraser DAS. The P-value function and statistical inference. The American Statistician. 2019;73(sup1):135-147. \doi{10.1080/00031305.2018.1556735} \item Gelman A, Greenland S. Are confidence intervals better termed "uncertainty intervals"? BMJ (2019)l5381. \doi{10.1136/bmj.l5381} \item Greenland S, Rafi Z, Matthews R, Higgs M. To Aid Scientific Inference, Emphasize Unconditional Compatibility Descriptions of Statistics. (2022) https://arxiv.org/abs/1909.08583v7 (Accessed November 10, 2022) \item Greenland S, Senn SJ, Rothman KJ, Carlin JB, Poole C, Goodman SN, et al. (2016). Statistical tests, P values, confidence intervals, and power: A guide to misinterpretations. European Journal of Epidemiology. 31:337-350. \doi{10.1007/s10654-016-0149-3} \item Greenland S (2023). Divergence versus decision P-values: A distinction worth making in theory and keeping in practice: Or, how divergence P-values measure evidence even when decision P-values do not. Scand J Statist, 50(1), 54-88. \item Rafi Z, Greenland S. Semantic and cognitive tools to aid statistical science: Replace confidence and significance by compatibility and surprise. BMC Medical Research Methodology. 2020;20(1):244. \doi{10.1186/s12874-020-01105-9} \item Schweder T. Confidence is epistemic probability for empirical science. Journal of Statistical Planning and Inference (2018) 195:116–125. \doi{10.1016/j.jspi.2017.09.016} \item Schweder T, Hjort NL. Confidence and Likelihood. Scandinavian Journal of Statistics. 2002;29(2):309-332. \doi{10.1111/1467-9469.00285} \item Schweder T, Hjort NL. Frequentist analogues of priors and posteriors. In Stigum, B. (ed.), Econometrics and the Philosophy of Economics: Theory Data Confrontation in Economics, pp. 285-217. Princeton University Press, Princeton, NJ, 2003 \item Schweder T, Hjort NL. Confidence, Likelihood, Probability: Statistical inference with confidence distributions. Cambridge University Press, 2016. \item Vos P, Holbert D. Frequentist statistical inference without repeated sampling. Synthese 200, 89 (2022). \doi{10.1007/s11229-022-03560-x} } } \seealso{ See also \code{\link[=equivalence_test]{equivalence_test()}} and \code{\link[=p_significance]{p_significance()}} for functions related to checking effect existence and significance. } parameters/man/cluster_meta.Rd0000644000176200001440000000513614502257471016162 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/cluster_meta.R \name{cluster_meta} \alias{cluster_meta} \title{Metaclustering} \usage{ cluster_meta(list_of_clusters, rownames = NULL, ...) } \arguments{ \item{list_of_clusters}{A list of vectors with the clustering assignments from various methods.} \item{rownames}{An optional vector of row.names for the matrix.} \item{...}{Currently not used.} } \value{ A matrix containing all the pairwise (between each observation) probabilities of being clustered together by the methods. } \description{ One of the core "issue" of statistical clustering is that, in many cases, different methods will give different results. The \strong{metaclustering} approach proposed by \emph{easystats} (that finds echoes in \emph{consensus clustering}; see Monti et al., 2003) consists of treating the unique clustering solutions as a ensemble, from which we can derive a probability matrix. This matrix contains, for each pair of observations, the probability of being in the same cluster. For instance, if the 6th and the 9th row of a dataframe has been assigned to a similar cluster by 5 our of 10 clustering methods, then its probability of being grouped together is 0.5. } \details{ Metaclustering is based on the hypothesis that, as each clustering algorithm embodies a different prism by which it sees the data, running an infinite amount of algorithms would result in the emergence of the "true" clusters. As the number of algorithms and parameters is finite, the probabilistic perspective is a useful proxy. This method is interesting where there is no obvious reasons to prefer one over another clustering method, as well as to investigate how robust some clusters are under different algorithms. This metaclustering probability matrix can be transformed into a dissimilarity matrix (such as the one produced by \code{dist()}) and submitted for instance to hierarchical clustering (\code{hclust()}). See the example below. } \examples{ \donttest{ data <- iris[1:4] rez1 <- cluster_analysis(data, n = 2, method = "kmeans") rez2 <- cluster_analysis(data, n = 3, method = "kmeans") rez3 <- cluster_analysis(data, n = 6, method = "kmeans") list_of_clusters <- list(rez1, rez2, rez3) m <- cluster_meta(list_of_clusters) # Visualize matrix without reordering heatmap(m, Rowv = NA, Colv = NA, scale = "none") # Without reordering # Reordered heatmap heatmap(m, scale = "none") # Extract 3 clusters predict(m, n = 3) # Convert to dissimilarity d <- as.dist(abs(m - 1)) model <- hclust(d) plot(model, hang = -1) } } parameters/man/get_scores.Rd0000644000176200001440000000513515066721002015617 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utils_pca_efa.R \name{get_scores} \alias{get_scores} \title{Get Scores from Principal Component or Factor Analysis (PCA/FA)} \usage{ get_scores(x, n_items = NULL) } \arguments{ \item{x}{An object returned by \code{\link[=principal_components]{principal_components()}} or \code{\link[=factor_analysis]{factor_analysis()}}.} \item{n_items}{Number of required (i.e. non-missing) items to build the sum score for an observation. If an observation has more missing values than \code{n_items} in all items of a (sub) scale, \code{NA} is returned for that observation, else, the sum score of all (sub) items is calculated. If \code{NULL}, the value is chosen to match half of the number of columns in a data frame, i.e. no more than 50\% missing values are allowed.} } \value{ A data frame with subscales, which are average sum scores for all items from each component or factor. } \description{ \code{get_scores()} takes \code{n_items} amount of items that load the most (either by loading cutoff or number) on a component, and then computes their average. This results in a sum score for each component from the PCA/FA, which is on the same scale as the original, single items that were used to compute the PCA/FA. } \details{ \code{get_scores()} takes the results from \code{\link[=principal_components]{principal_components()}} or \code{\link[=factor_analysis]{factor_analysis()}} and extracts the variables for each component found by the PCA/FA. Then, for each of these "subscales", row means are calculated (which equals adding up the single items and dividing by the number of items). This results in a sum score for each component from the PCA/FA, which is on the same scale as the original, single items that were used to compute the PCA/FA. } \examples{ \dontshow{if (insight::check_if_installed("psych", quietly = TRUE)) withAutoprint(\{ # examplesIf} pca <- principal_components(mtcars[, 1:7], n = 2, rotation = "varimax") # PCA extracted two components pca # assignment of items to each component closest_component(pca) # now we want to have sum scores for each component get_scores(pca) # compare to manually computed sum score for 2nd component, which # consists of items "hp" and "qsec" (mtcars$hp + mtcars$qsec) / 2 \dontshow{\}) # examplesIf} } \seealso{ Functions to carry out a PCA (\code{\link[=principal_components]{principal_components()}}) or a FA (\code{\link[=factor_analysis]{factor_analysis()}}). \code{\link[=factor_scores]{factor_scores()}} extracts factor scores from an FA object. } parameters/man/dot-n_factors_bentler.Rd0000644000176200001440000000052213641634603017741 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/n_factors.R \name{.n_factors_bentler} \alias{.n_factors_bentler} \title{Bentler and Yuan's Procedure} \usage{ .n_factors_bentler(eigen_values = NULL, model = "factors", nobs = NULL) } \description{ Bentler and Yuan's Procedure } \keyword{internal} parameters/man/n_factors.Rd0000644000176200001440000001414215066721002015436 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/n_factors.R \name{n_factors} \alias{n_factors} \alias{n_components} \title{Number of components/factors to retain in PCA/FA} \usage{ n_factors( x, type = "FA", rotation = "varimax", algorithm = "default", package = c("nFactors", "psych"), correlation_matrix = NULL, safe = TRUE, n_max = NULL, ... ) n_components( x, type = "PCA", rotation = "varimax", algorithm = "default", package = c("nFactors", "psych"), correlation_matrix = NULL, safe = TRUE, ... ) } \arguments{ \item{x}{A data frame.} \item{type}{Can be \code{"FA"} or \code{"PCA"}, depending on what you want to do.} \item{rotation}{Only used for VSS (Very Simple Structure criterion, see \code{\link[psych:VSS]{psych::VSS()}}). The rotation to apply. Can be \code{"none"}, \code{"varimax"}, \code{"quartimax"}, \code{"bentlerT"}, \code{"equamax"}, \code{"varimin"}, \code{"geominT"} and \code{"bifactor"} for orthogonal rotations, and \code{"promax"}, \code{"oblimin"}, \code{"simplimax"}, \code{"bentlerQ"}, \code{"geominQ"}, \code{"biquartimin"} and \code{"cluster"} for oblique transformations.} \item{algorithm}{Factoring method used by VSS. Can be \code{"pa"} for Principal Axis Factor Analysis, \code{"minres"} for minimum residual (OLS) factoring, \code{"mle"} for Maximum Likelihood FA and \code{"pc"} for Principal Components. \code{"default"} will select \code{"minres"} if \code{type = "FA"} and \code{"pc"} if \code{type = "PCA"}.} \item{package}{Package from which respective methods are used. Can be \code{"all"} or a vector containing \code{"nFactors"}, \code{"psych"}, \code{"PCDimension"}, \code{"fit"} or \code{"EGAnet"}. Note that \code{"fit"} (which actually also relies on the \code{psych} package) and \code{"EGAnet"} can be very slow for bigger datasets. Thus, the default is \code{c("nFactors", "psych")}. You must have the respective packages installed for the methods to be used.} \item{correlation_matrix}{An optional correlation matrix that can be used (note that the data must still be passed as the first argument). If \code{NULL}, will compute it by running \code{cor()} on the passed data.} \item{safe}{If \code{TRUE}, the function will run all the procedures in try blocks, and will only return those that work and silently skip the ones that may fail.} \item{n_max}{If set to a value (e.g., \code{10}), will drop from the results all methods that suggest a higher number of components. The interpretation becomes 'from all the methods that suggested a number lower than n_max, the results are ...'.} \item{...}{Arguments passed to or from other methods.} } \value{ A data frame. } \description{ This function runs many existing procedures for determining how many factors to retain/extract from factor analysis (FA) or dimension reduction (PCA). It returns the number of factors based on the maximum consensus between methods. In case of ties, it will keep the simplest model and select the solution with the fewer factors. } \details{ \code{n_components()} is actually an alias for \code{n_factors()}, with different defaults for the function arguments. } \note{ There is also a \href{https://easystats.github.io/see/articles/parameters.html}{\code{plot()}-method} implemented in the \href{https://easystats.github.io/see/}{\strong{see}-package}. \code{n_components()} is a convenient short-cut for \code{n_factors(type = "PCA")}. } \examples{ \dontshow{if (require("PCDimension", quietly = TRUE) && require("nFactors", quietly = TRUE) && require("EGAnet", quietly = TRUE) && require("psych", quietly = TRUE)) withAutoprint(\{ # examplesIf} library(parameters) n_factors(mtcars, type = "PCA") result <- n_factors(mtcars[1:5], type = "FA") as.data.frame(result) summary(result) \donttest{ # Setting package = 'all' will increase the number of methods (but is slow) n_factors(mtcars, type = "PCA", package = "all") n_factors(mtcars, type = "FA", algorithm = "mle", package = "all") } \dontshow{\}) # examplesIf} } \references{ \itemize{ \item Bartlett, M. S. (1950). Tests of significance in factor analysis. British Journal of statistical psychology, 3(2), 77-85. \item Bentler, P. M., & Yuan, K. H. (1996). Test of linear trend in eigenvalues of a covariance matrix with application to data analysis. British Journal of Mathematical and Statistical Psychology, 49(2), 299-312. \item Cattell, R. B. (1966). The scree test for the number of factors. Multivariate behavioral research, 1(2), 245-276. \item Finch, W. H. (2019). Using Fit Statistic Differences to Determine the Optimal Number of Factors to Retain in an Exploratory Factor Analysis. Educational and Psychological Measurement. \item Zoski, K. W., & Jurs, S. (1996). An objective counterpart to the visual scree test for factor analysis: The standard error scree. Educational and Psychological Measurement, 56(3), 443-451. \item Zoski, K., & Jurs, S. (1993). Using multiple regression to determine the number of factors to retain in factor analysis. Multiple Linear Regression Viewpoints, 20(1), 5-9. \item Nasser, F., Benson, J., & Wisenbaker, J. (2002). The performance of regression-based variations of the visual scree for determining the number of common factors. Educational and psychological measurement, 62(3), 397-419. \item Golino, H., Shi, D., Garrido, L. E., Christensen, A. P., Nieto, M. D., Sadana, R., & Thiyagarajan, J. A. (2018). Investigating the performance of Exploratory Graph Analysis and traditional techniques to identify the number of latent factors: A simulation and tutorial. \item Golino, H. F., & Epskamp, S. (2017). Exploratory graph analysis: A new approach for estimating the number of dimensions in psychological research. PloS one, 12(6), e0174035. \item Revelle, W., & Rocklin, T. (1979). Very simple structure: An alternative procedure for estimating the optimal number of interpretable factors. Multivariate Behavioral Research, 14(4), 403-414. \item Velicer, W. F. (1976). Determining the number of components from the matrix of partial correlations. Psychometrika, 41(3), 321-327. } } parameters/man/format_p_adjust.Rd0000644000176200001440000000110613774144072016647 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/format_p_adjust.R \name{format_p_adjust} \alias{format_p_adjust} \title{Format the name of the p-value adjustment methods} \usage{ format_p_adjust(method) } \arguments{ \item{method}{Name of the method.} } \value{ A string with the full surname(s) of the author(s), including year of publication, for the adjustment-method. } \description{ Format the name of the p-value adjustment methods. } \examples{ library(parameters) format_p_adjust("holm") format_p_adjust("bonferroni") } parameters/man/p_value_satterthwaite.Rd0000644000176200001440000000465414716604200020073 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ci_satterthwaite.R, R/dof_satterthwaite.R, % R/p_value_satterthwaite.R, R/standard_error_satterthwaite.R \name{ci_satterthwaite} \alias{ci_satterthwaite} \alias{dof_satterthwaite} \alias{p_value_satterthwaite} \alias{se_satterthwaite} \title{Satterthwaite approximation for SEs, CIs and p-values} \usage{ ci_satterthwaite(model, ci = 0.95, ...) dof_satterthwaite(model) p_value_satterthwaite(model, dof = NULL, ...) se_satterthwaite(model) } \arguments{ \item{model}{A statistical model.} \item{ci}{Confidence Interval (CI) level. Default to \code{0.95} (\verb{95\%}).} \item{...}{Additional arguments passed down to the underlying functions. E.g., arguments like \code{vcov} or \code{vcov_args} can be used to compute confidence intervals using a specific variance-covariance matrix for the standard errors.} \item{dof}{Degrees of Freedom.} } \value{ A data frame. } \description{ An approximate F-test based on the Satterthwaite (1946) approach. } \details{ Inferential statistics (like p-values, confidence intervals and standard errors) may be biased in mixed models when the number of clusters is small (even if the sample size of level-1 units is high). In such cases it is recommended to approximate a more accurate number of degrees of freedom for such inferential statistics. Unlike simpler approximation heuristics like the "m-l-1" rule (\code{dof_ml1}), the Satterthwaite approximation is also applicable in more complex multilevel designs. However, the "m-l-1" heuristic also applies to generalized mixed models, while approaches like Kenward-Roger or Satterthwaite are limited to linear mixed models only. } \examples{ \donttest{ if (require("lme4", quietly = TRUE)) { model <- lmer(Petal.Length ~ Sepal.Length + (1 | Species), data = iris) p_value_satterthwaite(model) } } } \references{ Satterthwaite FE (1946) An approximate distribution of estimates of variance components. Biometrics Bulletin 2 (6):110–4. } \seealso{ \code{dof_satterthwaite()} and \code{se_satterthwaite()} are small helper-functions to calculate approximated degrees of freedom and standard errors for model parameters, based on the Satterthwaite (1946) approach. \code{\link[=dof_kenward]{dof_kenward()}} and \code{\link[=dof_ml1]{dof_ml1()}} approximate degrees of freedom based on Kenward-Roger's method or the "m-l-1" rule. } parameters/man/model_parameters.brmsfit.Rd0000644000176200001440000005430515066721002020455 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/methods_base.R, R/methods_brms.R \name{model_parameters.data.frame} \alias{model_parameters.data.frame} \alias{model_parameters.brmsfit} \title{Parameters from Bayesian Models} \usage{ \method{model_parameters}{data.frame}( model, as_draws = FALSE, exponentiate = FALSE, verbose = TRUE, ... ) \method{model_parameters}{brmsfit}( model, centrality = "median", dispersion = FALSE, ci = 0.95, ci_method = "eti", test = "pd", rope_range = "default", rope_ci = 0.95, bf_prior = NULL, diagnostic = c("ESS", "Rhat"), priors = FALSE, effects = "fixed", component = "all", exponentiate = FALSE, standardize = NULL, group_level = FALSE, keep = NULL, drop = NULL, verbose = TRUE, ... ) } \arguments{ \item{model}{Bayesian model (including SEM from \strong{blavaan}. May also be a data frame with posterior samples, however, \code{as_draws} must be set to \code{TRUE} (else, for data frames \code{NULL} is returned).} \item{as_draws}{Logical, if \code{TRUE} and \code{model} is of class \code{data.frame}, the data frame is treated as posterior samples and handled similar to Bayesian models. All arguments in \code{...} are passed to \code{model_parameters.draws()}.} \item{exponentiate}{Logical, indicating whether or not to exponentiate the coefficients (and related confidence intervals). This is typical for logistic regression, or more generally speaking, for models with log or logit links. It is also recommended to use \code{exponentiate = TRUE} for models with log-transformed response values. For models with a log-transformed response variable, when \code{exponentiate = TRUE}, a one-unit increase in the predictor is associated with multiplying the outcome by that predictor's coefficient. \strong{Note:} Delta-method standard errors are also computed (by multiplying the standard errors by the transformed coefficients). This is to mimic behaviour of other software packages, such as Stata, but these standard errors poorly estimate uncertainty for the transformed coefficient. The transformed confidence interval more clearly captures this uncertainty. For \code{compare_parameters()}, \code{exponentiate = "nongaussian"} will only exponentiate coefficients from non-Gaussian families.} \item{verbose}{Toggle warnings and messages.} \item{...}{Arguments passed to or from other methods. For instance, when \code{bootstrap = TRUE}, arguments like \code{type} or \code{parallel} are passed down to \code{bootstrap_model()}. Further non-documented arguments are: \itemize{ \item \code{digits}, \code{p_digits}, \code{ci_digits} and \code{footer_digits} to set the number of digits for the output. \code{groups} can be used to group coefficients. These arguments will be passed to the print-method, or can directly be used in \code{print()}, see documentation in \code{\link[=print.parameters_model]{print.parameters_model()}}. \item If \code{s_value = TRUE}, the p-value will be replaced by the S-value in the output (cf. \emph{Rafi and Greenland 2020}). \item \code{pd} adds an additional column with the \emph{probability of direction} (see \code{\link[bayestestR:p_direction]{bayestestR::p_direction()}} for details). Furthermore, see 'Examples' for this function. \item For developers, whose interest mainly is to get a "tidy" data frame of model summaries, it is recommended to set \code{pretty_names = FALSE} to speed up computation of the summary table. }} \item{centrality}{The point-estimates (centrality indices) to compute. Character (vector) or list with one or more of these options: \code{"median"}, \code{"mean"}, \code{"MAP"} (see \code{\link[bayestestR:map_estimate]{map_estimate()}}), \code{"trimmed"} (which is just \code{mean(x, trim = threshold)}), \code{"mode"} or \code{"all"}.} \item{dispersion}{Logical, if \code{TRUE}, computes indices of dispersion related to the estimate(s) (\code{SD} and \code{MAD} for \code{mean} and \code{median}, respectively). Dispersion is not available for \code{"MAP"} or \code{"mode"} centrality indices.} \item{ci}{Credible Interval (CI) level. Default to \code{0.95} (\verb{95\%}). See \code{\link[bayestestR:ci]{bayestestR::ci()}} for further details.} \item{ci_method}{Method for computing degrees of freedom for confidence intervals (CI) and the related p-values. Allowed are following options (which vary depending on the model class): \code{"residual"}, \code{"normal"}, \code{"likelihood"}, \code{"satterthwaite"}, \code{"kenward"}, \code{"wald"}, \code{"profile"}, \code{"boot"}, \code{"uniroot"}, \code{"ml1"}, \code{"betwithin"}, \code{"hdi"}, \code{"quantile"}, \code{"ci"}, \code{"eti"}, \code{"si"}, \code{"bci"}, or \code{"bcai"}. See section \emph{Confidence intervals and approximation of degrees of freedom} in \code{\link[=model_parameters]{model_parameters()}} for further details. When \code{ci_method=NULL}, in most cases \code{"wald"} is used then.} \item{test}{The indices of effect existence to compute. Character (vector) or list with one or more of these options: \code{"p_direction"} (or \code{"pd"}), \code{"rope"}, \code{"p_map"}, \code{"p_significance"} (or \code{"ps"}), \code{"p_rope"}, \code{"equivalence_test"} (or \code{"equitest"}), \code{"bayesfactor"} (or \code{"bf"}) or \code{"all"} to compute all tests. For each "test", the corresponding \strong{bayestestR} function is called (e.g. \code{\link[bayestestR:rope]{rope()}} or \code{\link[bayestestR:p_direction]{p_direction()}}) and its results included in the summary output.} \item{rope_range}{ROPE's lower and higher bounds. Should be a vector of two values (e.g., \code{c(-0.1, 0.1)}), \code{"default"} or a list of numeric vectors of the same length as numbers of parameters. If \code{"default"}, the bounds are set to \code{x +- 0.1*SD(response)}.} \item{rope_ci}{The Credible Interval (CI) probability, corresponding to the proportion of HDI, to use for the percentage in ROPE.} \item{bf_prior}{Distribution representing a prior for the computation of Bayes factors / SI. Used if the input is a posterior, otherwise (in the case of models) ignored.} \item{diagnostic}{Diagnostic metrics to compute. Character (vector) or list with one or more of these options: \code{"ESS"}, \code{"Rhat"}, \code{"MCSE"} or \code{"all"}.} \item{priors}{Add the prior used for each parameter.} \item{effects}{Should variables for fixed effects (\code{"fixed"}), random effects (\code{"random"}) or both (\code{"all"}) be returned? Only applies to mixed models. May be abbreviated. For models of from packages \strong{brms} or \strong{rstanarm} there are additional options: \itemize{ \item \code{"fixed"} returns fixed effects. \item \code{"random_variance"} return random effects parameters (variance and correlation components, e.g. those parameters that start with \code{sd_} or \code{cor_}). \item \code{"grouplevel"} returns random effects group level estimates, i.e. those parameters that start with \code{r_}. \item \code{"random"} returns both \code{"random_variance"} and \code{"grouplevel"}. \item \code{"all"} returns fixed effects and random effects variances. \item \code{"full"} returns all parameters. }} \item{component}{Which type of parameters to return, such as parameters for the conditional model, the zero-inflation part of the model, the dispersion term, or other auxiliary parameters be returned? Applies to models with zero-inflation and/or dispersion formula, or if parameters such as \code{sigma} should be included. May be abbreviated. Note that the \emph{conditional} component is also called \emph{count} or \emph{mean} component, depending on the model. There are three convenient shortcuts: \code{component = "all"} returns all possible parameters. If \code{component = "location"}, location parameters such as \code{conditional}, \code{zero_inflated}, or \code{smooth_terms}, are returned (everything that are fixed or random effects - depending on the \code{effects} argument - but no auxiliary parameters). For \code{component = "distributional"} (or \code{"auxiliary"}), components like \code{sigma}, \code{dispersion}, or \code{beta} (and other auxiliary parameters) are returned.} \item{standardize}{The method used for standardizing the parameters. Can be \code{NULL} (default; no standardization), \code{"refit"} (for re-fitting the model on standardized data) or one of \code{"basic"}, \code{"posthoc"}, \code{"smart"}, \code{"pseudo"}. See 'Details' in \code{\link[=standardize_parameters]{standardize_parameters()}}. \strong{Importantly}: \itemize{ \item The \code{"refit"} method does \emph{not} standardize categorical predictors (i.e. factors), which may be a different behaviour compared to other R packages (such as \strong{lm.beta}) or other software packages (like SPSS). to mimic such behaviours, either use \code{standardize="basic"} or standardize the data with \code{datawizard::standardize(force=TRUE)} \emph{before} fitting the model. \item By default, the response (dependent) variable is also standardized, \emph{if applicable}. Set \code{include_response = FALSE} to avoid standardization of the response variable. See details in \code{\link[datawizard:standardize.default]{datawizard::standardize.default()}}. \item For mixed models, when using methods other than \code{"refit"}, only the fixed effects will be standardized. \item Robust estimation (i.e., \code{vcov} set to a value other than \code{NULL}) of standardized parameters only works when \code{standardize="refit"}. }} \item{group_level}{Logical, for multilevel models (i.e. models with random effects) and when \code{effects = "random"}, return the parameters for each group level from random effects only. If \code{group_level = FALSE} (the default), also information on SD and COR are returned. Note that this argument is superseded by the new options for the \code{effects} argument. \code{effects = "grouplevel"} should be used instead of \code{group_level = TRUE}.} \item{keep}{Character containing a regular expression pattern that describes the parameters that should be included (for \code{keep}) or excluded (for \code{drop}) in the returned data frame. \code{keep} may also be a named list of regular expressions. All non-matching parameters will be removed from the output. If \code{keep} is a character vector, every parameter name in the \emph{"Parameter"} column that matches the regular expression in \code{keep} will be selected from the returned data frame (and vice versa, all parameter names matching \code{drop} will be excluded). Furthermore, if \code{keep} has more than one element, these will be merged with an \code{OR} operator into a regular expression pattern like this: \code{"(one|two|three)"}. If \code{keep} is a named list of regular expression patterns, the names of the list-element should equal the column name where selection should be applied. This is useful for model objects where \code{model_parameters()} returns multiple columns with parameter components, like in \code{\link[=model_parameters.lavaan]{model_parameters.lavaan()}}. Note that the regular expression pattern should match the parameter names as they are stored in the returned data frame, which can be different from how they are printed. Inspect the \verb{$Parameter} column of the parameters table to get the exact parameter names.} \item{drop}{See \code{keep}.} } \value{ A data frame of indices related to the model's parameters. } \description{ Model parameters from Bayesian models. This function internally calls \code{\link[bayestestR:describe_posterior]{bayestestR::describe_posterior()}} to get the relevant information for the output. } \note{ When \code{standardize = "refit"}, columns \code{diagnostic}, \code{bf_prior} and \code{priors} refer to the \emph{original} \code{model}. If \code{model} is a data frame, arguments \code{diagnostic}, \code{bf_prior} and \code{priors} are ignored. There is also a \href{https://easystats.github.io/see/articles/parameters.html}{\code{plot()}-method} implemented in the \href{https://easystats.github.io/see/}{\strong{see}-package}. } \section{Confidence intervals and approximation of degrees of freedom}{ There are different ways of approximating the degrees of freedom depending on different assumptions about the nature of the model and its sampling distribution. The \code{ci_method} argument modulates the method for computing degrees of freedom (df) that are used to calculate confidence intervals (CI) and the related p-values. Following options are allowed, depending on the model class: \strong{Classical methods:} Classical inference is generally based on the \strong{Wald method}. The Wald approach to inference computes a test statistic by dividing the parameter estimate by its standard error (Coefficient / SE), then comparing this statistic against a t- or normal distribution. This approach can be used to compute CIs and p-values. \code{"wald"}: \itemize{ \item Applies to \emph{non-Bayesian models}. For \emph{linear models}, CIs computed using the Wald method (SE and a \emph{t-distribution with residual df}); p-values computed using the Wald method with a \emph{t-distribution with residual df}. For other models, CIs computed using the Wald method (SE and a \emph{normal distribution}); p-values computed using the Wald method with a \emph{normal distribution}. } \code{"normal"} \itemize{ \item Applies to \emph{non-Bayesian models}. Compute Wald CIs and p-values, but always use a normal distribution. } \code{"residual"} \itemize{ \item Applies to \emph{non-Bayesian models}. Compute Wald CIs and p-values, but always use a \emph{t-distribution with residual df} when possible. If the residual df for a model cannot be determined, a normal distribution is used instead. } \strong{Methods for mixed models:} Compared to fixed effects (or single-level) models, determining appropriate df for Wald-based inference in mixed models is more difficult. See \href{https://bbolker.github.io/mixedmodels-misc/glmmFAQ.html#what-are-the-p-values-listed-by-summaryglmerfit-etc.-are-they-reliable}{the R GLMM FAQ} for a discussion. Several approximate methods for computing df are available, but you should also consider instead using profile likelihood (\code{"profile"}) or bootstrap ("\verb{boot"}) CIs and p-values instead. \code{"satterthwaite"} \itemize{ \item Applies to \emph{linear mixed models}. CIs computed using the Wald method (SE and a \emph{t-distribution with Satterthwaite df}); p-values computed using the Wald method with a \emph{t-distribution with Satterthwaite df}. } \code{"kenward"} \itemize{ \item Applies to \emph{linear mixed models}. CIs computed using the Wald method (\emph{Kenward-Roger SE} and a \emph{t-distribution with Kenward-Roger df}); p-values computed using the Wald method with \emph{Kenward-Roger SE and t-distribution with Kenward-Roger df}. } \code{"ml1"} \itemize{ \item Applies to \emph{linear mixed models}. CIs computed using the Wald method (SE and a \emph{t-distribution with m-l-1 approximated df}); p-values computed using the Wald method with a \emph{t-distribution with m-l-1 approximated df}. See \code{\link[=ci_ml1]{ci_ml1()}}. } \code{"betwithin"} \itemize{ \item Applies to \emph{linear mixed models} and \emph{generalized linear mixed models}. CIs computed using the Wald method (SE and a \emph{t-distribution with between-within df}); p-values computed using the Wald method with a \emph{t-distribution with between-within df}. See \code{\link[=ci_betwithin]{ci_betwithin()}}. } \strong{Likelihood-based methods:} Likelihood-based inference is based on comparing the likelihood for the maximum-likelihood estimate to the the likelihood for models with one or more parameter values changed (e.g., set to zero or a range of alternative values). Likelihood ratios for the maximum-likelihood and alternative models are compared to a \eqn{\chi}-squared distribution to compute CIs and p-values. \code{"profile"} \itemize{ \item Applies to \emph{non-Bayesian models} of class \code{glm}, \code{polr}, \code{merMod} or \code{glmmTMB}. CIs computed by \emph{profiling the likelihood curve for a parameter}, using linear interpolation to find where likelihood ratio equals a critical value; p-values computed using the Wald method with a \emph{normal-distribution} (note: this might change in a future update!) } \code{"uniroot"} \itemize{ \item Applies to \emph{non-Bayesian models} of class \code{glmmTMB}. CIs computed by \emph{profiling the likelihood curve for a parameter}, using root finding to find where likelihood ratio equals a critical value; p-values computed using the Wald method with a \emph{normal-distribution} (note: this might change in a future update!) } \strong{Methods for bootstrapped or Bayesian models:} Bootstrap-based inference is based on \strong{resampling} and refitting the model to the resampled datasets. The distribution of parameter estimates across resampled datasets is used to approximate the parameter's sampling distribution. Depending on the type of model, several different methods for bootstrapping and constructing CIs and p-values from the bootstrap distribution are available. For Bayesian models, inference is based on drawing samples from the model posterior distribution. \code{"quantile"} (or \code{"eti"}) \itemize{ \item Applies to \emph{all models (including Bayesian models)}. For non-Bayesian models, only applies if \code{bootstrap = TRUE}. CIs computed as \emph{equal tailed intervals} using the quantiles of the bootstrap or posterior samples; p-values are based on the \emph{probability of direction}. See \code{\link[bayestestR:eti]{bayestestR::eti()}}. } \code{"hdi"} \itemize{ \item Applies to \emph{all models (including Bayesian models)}. For non-Bayesian models, only applies if \code{bootstrap = TRUE}. CIs computed as \emph{highest density intervals} for the bootstrap or posterior samples; p-values are based on the \emph{probability of direction}. See \code{\link[bayestestR:hdi]{bayestestR::hdi()}}. } \code{"bci"} (or \code{"bcai"}) \itemize{ \item Applies to \emph{all models (including Bayesian models)}. For non-Bayesian models, only applies if \code{bootstrap = TRUE}. CIs computed as \emph{bias corrected and accelerated intervals} for the bootstrap or posterior samples; p-values are based on the \emph{probability of direction}. See \code{\link[bayestestR:bci]{bayestestR::bci()}}. } \code{"si"} \itemize{ \item Applies to \emph{Bayesian models} with proper priors. CIs computed as \emph{support intervals} comparing the posterior samples against the prior samples; p-values are based on the \emph{probability of direction}. See \code{\link[bayestestR:si]{bayestestR::si()}}. } \code{"boot"} \itemize{ \item Applies to \emph{non-Bayesian models} of class \code{merMod}. CIs computed using \emph{parametric bootstrapping} (simulating data from the fitted model); p-values computed using the Wald method with a \emph{normal-distribution)} (note: this might change in a future update!). } For all iteration-based methods other than \code{"boot"} (\code{"hdi"}, \code{"quantile"}, \code{"ci"}, \code{"eti"}, \code{"si"}, \code{"bci"}, \code{"bcai"}), p-values are based on the probability of direction (\code{\link[bayestestR:p_direction]{bayestestR::p_direction()}}), which is converted into a p-value using \code{\link[bayestestR:pd_to_p]{bayestestR::pd_to_p()}}. } \section{Model components}{ Possible values for the \code{component} argument depend on the model class. Following are valid options: \itemize{ \item \code{"all"}: returns all model components, applies to all models, but will only have an effect for models with more than just the conditional model component. \item \code{"conditional"}: only returns the conditional component, i.e. "fixed effects" terms from the model. Will only have an effect for models with more than just the conditional model component. \item \code{"smooth_terms"}: returns smooth terms, only applies to GAMs (or similar models that may contain smooth terms). \item \code{"zero_inflated"} (or \code{"zi"}): returns the zero-inflation component. \item \code{"dispersion"}: returns the dispersion model component. This is common for models with zero-inflation or that can model the dispersion parameter. \item \code{"instruments"}: for instrumental-variable or some fixed effects regression, returns the instruments. \item \code{"nonlinear"}: for non-linear models (like models of class \code{nlmerMod} or \code{nls}), returns staring estimates for the nonlinear parameters. \item \code{"correlation"}: for models with correlation-component, like \code{gls}, the variables used to describe the correlation structure are returned. } \strong{Special models} Some model classes also allow rather uncommon options. These are: \itemize{ \item \strong{mhurdle}: \code{"infrequent_purchase"}, \code{"ip"}, and \code{"auxiliary"} \item \strong{BGGM}: \code{"correlation"} and \code{"intercept"} \item \strong{BFBayesFactor}, \strong{glmx}: \code{"extra"} \item \strong{averaging}:\code{"conditional"} and \code{"full"} \item \strong{mjoint}: \code{"survival"} \item \strong{mfx}: \code{"precision"}, \code{"marginal"} \item \strong{betareg}, \strong{DirichletRegModel}: \code{"precision"} \item \strong{mvord}: \code{"thresholds"} and \code{"correlation"} \item \strong{clm2}: \code{"scale"} \item \strong{selection}: \code{"selection"}, \code{"outcome"}, and \code{"auxiliary"} \item \strong{lavaan}: One or more of \code{"regression"}, \code{"correlation"}, \code{"loading"}, \code{"variance"}, \code{"defined"}, or \code{"mean"}. Can also be \code{"all"} to include all components. } For models of class \code{brmsfit} (package \strong{brms}), even more options are possible for the \code{component} argument, which are not all documented in detail here. } \examples{ \dontshow{if (require("rstanarm")) withAutoprint(\{ # examplesIf} \donttest{ library(parameters) model <- suppressWarnings(stan_glm( Sepal.Length ~ Petal.Length * Species, data = iris, iter = 500, refresh = 0 )) model_parameters(model) } \dontshow{\}) # examplesIf} } \seealso{ \code{\link[insight:standardize_names]{insight::standardize_names()}} to rename columns into a consistent, standardized naming scheme. } parameters/man/random_parameters.Rd0000644000176200001440000000541514331167101017164 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/random_parameters.R \name{random_parameters} \alias{random_parameters} \title{Summary information from random effects} \usage{ random_parameters(model, component = "conditional") } \arguments{ \item{model}{A mixed effects model (including \code{stanreg} models).} \item{component}{Should all parameters, parameters for the conditional model, for the zero-inflation part of the model, or the dispersion model be returned? Applies to models with zero-inflation and/or dispersion component. \code{component} may be one of \code{"conditional"}, \code{"zi"}, \code{"zero-inflated"}, \code{"dispersion"} or \code{"all"} (default). May be abbreviated.} } \value{ A data frame with random effects statistics for the variance components, including number of levels per random effect group, as well as complete observations in the model. } \description{ This function extracts the different variance components of a mixed model and returns the result as a data frame. } \details{ The variance components are obtained from \code{\link[insight:get_variance]{insight::get_variance()}} and are denoted as following: \subsection{Within-group (or residual) variance}{ The residual variance, \ifelse{html}{\out{σ2ε}}{\eqn{\sigma^2_\epsilon}}, is the sum of the distribution-specific variance and the variance due to additive dispersion. It indicates the \emph{within-group variance}. } \subsection{Between-group random intercept variance}{ The random intercept variance, or \emph{between-group} variance for the intercept (\ifelse{html}{\out{τ00}}{\eqn{\tau_{00}}}), is obtained from \code{VarCorr()}. It indicates how much groups or subjects differ from each other. } \subsection{Between-group random slope variance}{ The random slope variance, or \emph{between-group} variance for the slopes (\ifelse{html}{\out{τ11}}{\eqn{\tau_{11}}}) is obtained from \code{VarCorr()}. This measure is only available for mixed models with random slopes. It indicates how much groups or subjects differ from each other according to their slopes. } \subsection{Random slope-intercept correlation}{ The random slope-intercept correlation (\ifelse{html}{\out{ρ01}}{\eqn{\rho_{01}}}) is obtained from \code{VarCorr()}. This measure is only available for mixed models with random intercepts and slopes. \strong{Note:} For the within-group and between-group variance, variance and standard deviations (which are simply the square root of the variance) are shown. } } \examples{ if (require("lme4")) { data(sleepstudy) model <- lmer(Reaction ~ Days + (1 + Days | Subject), data = sleepstudy) random_parameters(model) } } parameters/man/parameters_type.Rd0000644000176200001440000000441114076243300016662 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameters_type.R \name{parameters_type} \alias{parameters_type} \title{Type of model parameters} \usage{ parameters_type(model, ...) } \arguments{ \item{model}{A statistical model.} \item{...}{Arguments passed to or from other methods.} } \value{ A data frame. } \description{ In a regression model, the parameters do not all have the meaning. For instance, the intercept has to be interpreted as theoretical outcome value under some conditions (when predictors are set to 0), whereas other coefficients are to be interpreted as amounts of change. Others, such as interactions, represent changes in another of the parameter. The \code{parameters_type} function attempts to retrieve information and meaning of parameters. It outputs a dataframe of information for each parameters, such as the \code{Type} (whether the parameter corresponds to a factor or a numeric predictor, or whether it is a (regular) interaction or a nested one), the \code{Link} (whether the parameter can be interpreted as a mean value, the slope of an association or a difference between two levels) and, in the case of interactions, which other parameters is impacted by which parameter. } \examples{ library(parameters) model <- lm(Sepal.Length ~ Petal.Length + Species, data = iris) parameters_type(model) model <- lm(Sepal.Length ~ Species + poly(Sepal.Width, 2), data = iris) parameters_type(model) model <- lm(Sepal.Length ~ Species + poly(Sepal.Width, 2, raw = TRUE), data = iris) parameters_type(model) # Interactions model <- lm(Sepal.Length ~ Sepal.Width * Species, data = iris) parameters_type(model) model <- lm(Sepal.Length ~ Sepal.Width * Species * Petal.Length, data = iris) parameters_type(model) model <- lm(Sepal.Length ~ Species * Sepal.Width, data = iris) parameters_type(model) model <- lm(Sepal.Length ~ Species / Sepal.Width, data = iris) parameters_type(model) # Complex interactions data <- iris data$fac2 <- ifelse(data$Sepal.Width > mean(data$Sepal.Width), "A", "B") model <- lm(Sepal.Length ~ Species / fac2 / Petal.Length, data = data) parameters_type(model) model <- lm(Sepal.Length ~ Species / fac2 * Petal.Length, data = data) parameters_type(model) } parameters/man/p_value_ml1.Rd0000644000176200001440000000641514716604200015671 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ci_ml1.R, R/dof_ml1.R, R/p_value_ml1.R \name{ci_ml1} \alias{ci_ml1} \alias{dof_ml1} \alias{p_value_ml1} \title{"m-l-1" approximation for SEs, CIs and p-values} \usage{ ci_ml1(model, ci = 0.95, ...) dof_ml1(model) p_value_ml1(model, dof = NULL, ...) } \arguments{ \item{model}{A mixed model.} \item{ci}{Confidence Interval (CI) level. Default to \code{0.95} (\verb{95\%}).} \item{...}{Additional arguments passed down to the underlying functions. E.g., arguments like \code{vcov} or \code{vcov_args} can be used to compute confidence intervals using a specific variance-covariance matrix for the standard errors.} \item{dof}{Degrees of Freedom.} } \value{ A data frame. } \description{ Approximation of degrees of freedom based on a "m-l-1" heuristic as suggested by Elff et al. (2019). } \details{ \subsection{Small Sample Cluster corrected Degrees of Freedom}{ Inferential statistics (like p-values, confidence intervals and standard errors) may be biased in mixed models when the number of clusters is small (even if the sample size of level-1 units is high). In such cases it is recommended to approximate a more accurate number of degrees of freedom for such inferential statistics (see \emph{Li and Redden 2015}). The \emph{m-l-1} heuristic is such an approach that uses a t-distribution with fewer degrees of freedom (\code{dof_ml1()}) to calculate p-values (\code{p_value_ml1()}) and confidence intervals (\code{ci(method = "ml1")}). } \subsection{Degrees of Freedom for Longitudinal Designs (Repeated Measures)}{ In particular for repeated measure designs (longitudinal data analysis), the \emph{m-l-1} heuristic is likely to be more accurate than simply using the residual or infinite degrees of freedom, because \code{dof_ml1()} returns different degrees of freedom for within-cluster and between-cluster effects. } \subsection{Limitations of the "m-l-1" Heuristic}{ Note that the "m-l-1" heuristic is not applicable (or at least less accurate) for complex multilevel designs, e.g. with cross-classified clusters. In such cases, more accurate approaches like the Kenward-Roger approximation (\code{dof_kenward()}) is recommended. However, the "m-l-1" heuristic also applies to generalized mixed models, while approaches like Kenward-Roger or Satterthwaite are limited to linear mixed models only. } } \examples{ \donttest{ if (require("lme4")) { model <- lmer(Petal.Length ~ Sepal.Length + (1 | Species), data = iris) p_value_ml1(model) } } } \references{ \itemize{ \item Elff, M.; Heisig, J.P.; Schaeffer, M.; Shikano, S. (2019). Multilevel Analysis with Few Clusters: Improving Likelihood-based Methods to Provide Unbiased Estimates and Accurate Inference, British Journal of Political Science. \item Li, P., Redden, D. T. (2015). Comparing denominator degrees of freedom approximations for the generalized linear mixed model in analyzing binary outcome in small sample cluster-randomized trials. BMC Medical Research Methodology, 15(1), 38. \doi{10.1186/s12874-015-0026-x} } } \seealso{ \code{\link[=dof_ml1]{dof_ml1()}} is a small helper-function to calculate approximated degrees of freedom of model parameters, based on the "m-l-1" heuristic. } parameters/man/select_parameters.Rd0000644000176200001440000000466315066721002017171 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/select_parameters.R \name{select_parameters} \alias{select_parameters} \alias{select_parameters.lm} \alias{select_parameters.merMod} \title{Automated selection of model parameters} \usage{ select_parameters(model, ...) \method{select_parameters}{lm}(model, direction = "both", steps = 1000, k = 2, ...) \method{select_parameters}{merMod}(model, direction = "backward", steps = 1000, ...) } \arguments{ \item{model}{A statistical model (of class \code{lm}, \code{glm}, or \code{merMod}).} \item{...}{Arguments passed to or from other methods.} \item{direction}{ the mode of stepwise search, can be one of \code{"both"}, \code{"backward"}, or \code{"forward"}, with a default of \code{"both"}. If the \code{scope} argument is missing the default for \code{direction} is \code{"backward"}. Values can be abbreviated. } \item{steps}{ the maximum number of steps to be considered. The default is 1000 (essentially as many as required). It is typically used to stop the process early. } \item{k}{The multiple of the number of degrees of freedom used for the penalty. Only \code{k = 2} gives the genuine AIC: \code{k = log(n)} is sometimes referred to as BIC or SBC.} } \value{ The model refitted with optimal number of parameters. } \description{ This function performs an automated selection of the 'best' parameters, updating and returning the "best" model. } \section{Classical lm and glm}{ For frequentist GLMs, \code{select_parameters()} performs an AIC-based stepwise selection. } \section{Mixed models}{ For mixed-effects models of class \code{merMod}, stepwise selection is based on \code{\link[cAIC4:stepcAIC]{cAIC4::stepcAIC()}}. This step function only searches the "best" model based on the random-effects structure, i.e. \code{select_parameters()} adds or excludes random-effects until the cAIC can't be improved further. } \examples{ \dontshow{if (requireNamespace("lme4")) withAutoprint(\{ # examplesIf} model <- lm(mpg ~ ., data = mtcars) select_parameters(model) model <- lm(mpg ~ cyl * disp * hp * wt, data = mtcars) select_parameters(model) \donttest{ # lme4 ------------------------------------------- model <- lme4::lmer( Sepal.Width ~ Sepal.Length * Petal.Width * Petal.Length + (1 | Species), data = iris ) select_parameters(model) } \dontshow{\}) # examplesIf} } parameters/man/model_parameters.principal.Rd0000644000176200001440000001765715066721002021001 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/methods_lavaan.R, R/methods_psych.R \name{model_parameters.lavaan} \alias{model_parameters.lavaan} \alias{model_parameters.principal} \title{Parameters from PCA, FA, CFA, SEM} \usage{ \method{model_parameters}{lavaan}( model, ci = 0.95, standardize = FALSE, component = c("regression", "correlation", "loading", "defined"), keep = NULL, drop = NULL, verbose = TRUE, ... ) \method{model_parameters}{principal}( model, sort = FALSE, threshold = NULL, labels = NULL, verbose = TRUE, ... ) } \arguments{ \item{model}{Model object.} \item{ci}{Confidence Interval (CI) level. Default to \code{0.95} (\verb{95\%}).} \item{standardize}{Return standardized parameters (standardized coefficients). Can be \code{TRUE} (or \code{"all"} or \code{"std.all"}) for standardized estimates based on both the variances of observed and latent variables; \code{"latent"} (or \code{"std.lv"}) for standardized estimates based on the variances of the latent variables only; or \code{"no_exogenous"} (or \code{"std.nox"}) for standardized estimates based on both the variances of observed and latent variables, but not the variances of exogenous covariates. See \code{lavaan::standardizedsolution} for details.} \item{component}{What type of links to return. Can be \code{"all"} or some of \code{c("regression", "correlation", "loading", "variance", "mean")}.} \item{keep}{Character containing a regular expression pattern that describes the parameters that should be included (for \code{keep}) or excluded (for \code{drop}) in the returned data frame. \code{keep} may also be a named list of regular expressions. All non-matching parameters will be removed from the output. If \code{keep} is a character vector, every parameter name in the \emph{"Parameter"} column that matches the regular expression in \code{keep} will be selected from the returned data frame (and vice versa, all parameter names matching \code{drop} will be excluded). Furthermore, if \code{keep} has more than one element, these will be merged with an \code{OR} operator into a regular expression pattern like this: \code{"(one|two|three)"}. If \code{keep} is a named list of regular expression patterns, the names of the list-element should equal the column name where selection should be applied. This is useful for model objects where \code{model_parameters()} returns multiple columns with parameter components, like in \code{\link[=model_parameters.lavaan]{model_parameters.lavaan()}}. Note that the regular expression pattern should match the parameter names as they are stored in the returned data frame, which can be different from how they are printed. Inspect the \verb{$Parameter} column of the parameters table to get the exact parameter names.} \item{drop}{See \code{keep}.} \item{verbose}{Toggle warnings.} \item{...}{Arguments passed to or from other methods.} \item{sort}{Sort the loadings.} \item{threshold}{A value between 0 and 1 indicates which (absolute) values from the loadings should be removed. An integer higher than 1 indicates the n strongest loadings to retain. Can also be \code{"max"}, in which case it will only display the maximum loading per variable (the most simple structure).} \item{labels}{A character vector containing labels to be added to the loadings data. Usually, the question related to the item.} } \value{ A data frame of indices or loadings. } \description{ Format structural models from the \strong{psych} or \strong{FactoMineR} packages. There is a \code{summary()} method for the returned output from \code{model_parameters()}, to show further information. See 'Examples'. } \details{ For the structural models obtained with \strong{psych}, the following indices are present: \itemize{ \item \strong{Complexity} (\cite{Hoffman's, 1978; Pettersson and Turkheimer, 2010}) represents the number of latent components needed to account for the observed variables. Whereas a perfect simple structure solution has a complexity of 1 in that each item would only load on one factor, a solution with evenly distributed items has a complexity greater than 1. \item \strong{Uniqueness} represents the variance that is 'unique' to the variable and not shared with other variables. It is equal to \verb{1 – communality} (variance that is shared with other variables). A uniqueness of \code{0.20} suggests that \verb{20\%} or that variable's variance is not shared with other variables in the overall factor model. The greater 'uniqueness' the lower the relevance of the variable in the factor model. \item \strong{MSA} represents the Kaiser-Meyer-Olkin Measure of Sampling Adequacy (\cite{Kaiser and Rice, 1974}) for each item. It indicates whether there is enough data for each factor give reliable results for the PCA. The value should be > 0.6, and desirable values are > 0.8 (\cite{Tabachnick and Fidell, 2013}). } } \note{ There is also a \href{https://easystats.github.io/see/articles/parameters.html}{\code{plot()}-method} for \code{lavaan} models implemented in the \href{https://easystats.github.io/see/}{\strong{see}-package}. } \examples{ \dontshow{if (all(insight::check_if_installed(c("psych", "lavaan"), quietly = TRUE))) withAutoprint(\{ # examplesIf} library(parameters) \donttest{ # Principal Component Analysis (PCA) --------- data(attitude) pca <- psych::principal(attitude) model_parameters(pca) summary(model_parameters(pca)) pca <- psych::principal(attitude, nfactors = 3, rotate = "none") model_parameters(pca, sort = TRUE, threshold = 0.2) principal_components(attitude, n = 3, sort = TRUE, threshold = 0.2) # Exploratory Factor Analysis (EFA) --------- efa <- psych::fa(attitude, nfactors = 3) model_parameters(efa, threshold = "max", sort = TRUE, labels = as.character(1:ncol(attitude)) ) # Omega --------- data(mtcars) omega <- psych::omega(mtcars, nfactors = 3, plot = FALSE) params <- model_parameters(omega) params summary(params) } # lavaan ------------------------------------- # Confirmatory Factor Analysis (CFA) --------- data(HolzingerSwineford1939, package = "lavaan") structure <- " visual =~ x1 + x2 + x3 textual =~ x4 + x5 + x6 speed =~ x7 + x8 + x9 " model <- lavaan::cfa(structure, data = HolzingerSwineford1939) model_parameters(model) model_parameters(model, standardize = TRUE) # filter parameters model_parameters( model, parameters = list( To = "^(?!visual)", From = "^(?!(x7|x8))" ) ) # Structural Equation Model (SEM) ------------ data(PoliticalDemocracy, package = "lavaan") structure <- " # latent variable definitions ind60 =~ x1 + x2 + x3 dem60 =~ y1 + a*y2 + b*y3 + c*y4 dem65 =~ y5 + a*y6 + b*y7 + c*y8 # regressions dem60 ~ ind60 dem65 ~ ind60 + dem60 # residual correlations y1 ~~ y5 y2 ~~ y4 + y6 y3 ~~ y7 y4 ~~ y8 y6 ~~ y8 " model <- lavaan::sem(structure, data = PoliticalDemocracy) model_parameters(model) model_parameters(model, standardize = TRUE) \dontshow{\}) # examplesIf} } \references{ \itemize{ \item Kaiser, H.F. and Rice. J. (1974). Little jiffy, mark iv. Educational and Psychological Measurement, 34(1):111–117 \item Pettersson, E., and Turkheimer, E. (2010). Item selection, evaluation, and simple structure in personality data. Journal of research in personality, 44(4), 407-420. \item Revelle, W. (2016). How To: Use the psych package for Factor Analysis and data reduction. \item Tabachnick, B. G., and Fidell, L. S. (2013). Using multivariate statistics (6th ed.). Boston: Pearson Education. \item Rosseel Y (2012). lavaan: An R Package for Structural Equation Modeling. Journal of Statistical Software, 48(2), 1-36. \item Merkle EC , Rosseel Y (2018). blavaan: Bayesian Structural Equation Models via Parameter Expansion. Journal of Statistical Software, 85(4), 1-30. http://www.jstatsoft.org/v85/i04/ } } parameters/man/model_parameters.aov.Rd0000644000176200001440000002406715066721002017576 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/methods_aov.R \name{model_parameters.aov} \alias{model_parameters.aov} \title{Parameters from ANOVAs} \usage{ \method{model_parameters}{aov}( model, type = NULL, df_error = NULL, ci = NULL, alternative = NULL, p_adjust = NULL, test = NULL, power = FALSE, es_type = NULL, keep = NULL, drop = NULL, include_intercept = FALSE, table_wide = FALSE, verbose = TRUE, ... ) } \arguments{ \item{model}{Object of class \code{\link[=aov]{aov()}}, \code{\link[=anova]{anova()}}, \code{aovlist}, \code{Gam}, \code{\link[=manova]{manova()}}, \code{Anova.mlm}, \code{afex_aov} or \code{maov}.} \item{type}{Numeric, type of sums of squares. May be 1, 2 or 3. If 2 or 3, ANOVA-tables using \code{car::Anova()} will be returned. (Ignored for \code{afex_aov}.)} \item{df_error}{Denominator degrees of freedom (or degrees of freedom of the error estimate, i.e., the residuals). This is used to compute effect sizes for ANOVA-tables from mixed models. See 'Examples'. (Ignored for \code{afex_aov}.)} \item{ci}{Confidence Interval (CI) level for effect sizes specified in \code{es_type}. The default, \code{NULL}, will compute no confidence intervals. \code{ci} should be a scalar between 0 and 1.} \item{alternative}{A character string specifying the alternative hypothesis; Controls the type of CI returned: \code{"two.sided"} (default, two-sided CI), \code{"greater"} or \code{"less"} (one-sided CI). Partial matching is allowed (e.g., \code{"g"}, \code{"l"}, \code{"two"}...). See section \emph{One-Sided CIs} in the \href{https://easystats.github.io/effectsize/}{effectsize_CIs vignette}.} \item{p_adjust}{String value, if not \code{NULL}, indicates the method to adjust p-values. See \code{\link[stats:p.adjust]{stats::p.adjust()}} for details. Further possible adjustment methods are \code{"tukey"}, \code{"scheffe"}, \code{"sidak"}, \code{"sup-t"}, and \code{"none"} to explicitly disable adjustment for \code{emmGrid} objects (from \strong{emmeans}). \code{"sup-t"} computes simultaneous confidence bands, also called sup-t confidence band (Montiel Olea & Plagborg-Møller, 2019).} \item{test}{String, indicating the type of test for \code{Anova.mlm} to be returned. If \code{"multivariate"} (or \code{NULL}), returns the summary of the multivariate test (that is also given by the \code{print}-method). If \code{test = "univariate"}, returns the summary of the univariate test.} \item{power}{Logical, if \code{TRUE}, adds a column with power for each parameter.} \item{es_type}{The effect size of interest. Not that possibly not all effect sizes are applicable to the model object. See 'Details'. For Anova models, can also be a character vector with multiple effect size names.} \item{keep}{Character containing a regular expression pattern that describes the parameters that should be included (for \code{keep}) or excluded (for \code{drop}) in the returned data frame. \code{keep} may also be a named list of regular expressions. All non-matching parameters will be removed from the output. If \code{keep} is a character vector, every parameter name in the \emph{"Parameter"} column that matches the regular expression in \code{keep} will be selected from the returned data frame (and vice versa, all parameter names matching \code{drop} will be excluded). Furthermore, if \code{keep} has more than one element, these will be merged with an \code{OR} operator into a regular expression pattern like this: \code{"(one|two|three)"}. If \code{keep} is a named list of regular expression patterns, the names of the list-element should equal the column name where selection should be applied. This is useful for model objects where \code{model_parameters()} returns multiple columns with parameter components, like in \code{\link[=model_parameters.lavaan]{model_parameters.lavaan()}}. Note that the regular expression pattern should match the parameter names as they are stored in the returned data frame, which can be different from how they are printed. Inspect the \verb{$Parameter} column of the parameters table to get the exact parameter names.} \item{drop}{See \code{keep}.} \item{include_intercept}{Logical, if \code{TRUE}, includes the intercept (\code{(Intercept)}) in the anova table.} \item{table_wide}{Logical that decides whether the ANOVA table should be in wide format, i.e. should the numerator and denominator degrees of freedom be in the same row. Default: \code{FALSE}.} \item{verbose}{Toggle warnings and messages.} \item{...}{Arguments passed to \code{\link[effectsize:effectsize]{effectsize::effectsize()}}. For example, to calculate \emph{partial} effect sizes types, use \code{partial = TRUE}. For objects of class \code{htest} or \code{BFBayesFactor}, \code{adjust = TRUE} can be used to return bias-corrected effect sizes, which is advisable for small samples and large tables. See also \href{https://easystats.github.io/effectsize/reference/eta_squared.html}{\code{?effectsize::eta_squared}} for arguments \code{partial} and \code{generalized}; \href{https://easystats.github.io/effectsize/reference/phi.html}{\code{?effectsize::phi}} for \code{adjust}; and \href{https://easystats.github.io/effectsize/reference/oddsratio.html}{\code{?effectsize::oddratio}} for \code{log}.} } \value{ A data frame of indices related to the model's parameters. } \description{ Parameters from ANOVAs } \details{ \itemize{ \item For an object of class \code{htest}, data is extracted via \code{\link[insight:get_data]{insight::get_data()}}, and passed to the relevant function according to: \itemize{ \item A \strong{t-test} depending on \code{type}: \code{"cohens_d"} (default), \code{"hedges_g"}, or one of \code{"p_superiority"}, \code{"u1"}, \code{"u2"}, \code{"u3"}, \code{"overlap"}. \itemize{ \item For a \strong{Paired t-test}: depending on \code{type}: \code{"rm_rm"}, \code{"rm_av"}, \code{"rm_b"}, \code{"rm_d"}, \code{"rm_z"}. } \item A \strong{Chi-squared tests of independence} or \strong{Fisher's Exact Test}, depending on \code{type}: \code{"cramers_v"} (default), \code{"tschuprows_t"}, \code{"phi"}, \code{"cohens_w"}, \code{"pearsons_c"}, \code{"cohens_h"}, \code{"oddsratio"}, \code{"riskratio"}, \code{"arr"}, or \code{"nnt"}. \item A \strong{Chi-squared tests of goodness-of-fit}, depending on \code{type}: \code{"fei"} (default) \code{"cohens_w"}, \code{"pearsons_c"} \item A \strong{One-way ANOVA test}, depending on \code{type}: \code{"eta"} (default), \code{"omega"} or \code{"epsilon"} -squared, \code{"f"}, or \code{"f2"}. \item A \strong{McNemar test} returns \emph{Cohen's g}. \item A \strong{Wilcoxon test} depending on \code{type}: returns "\code{rank_biserial}" correlation (default) or one of \code{"p_superiority"}, \code{"vda"}, \code{"u2"}, \code{"u3"}, \code{"overlap"}. \item A \strong{Kruskal-Wallis test} depending on \code{type}: \code{"epsilon"} (default) or \code{"eta"}. \item A \strong{Friedman test} returns \emph{Kendall's W}. (Where applicable, \code{ci} and \code{alternative} are taken from the \code{htest} if not otherwise provided.) } \item For an object of class \code{BFBayesFactor}, using \code{\link[bayestestR:describe_posterior]{bayestestR::describe_posterior()}}, \itemize{ \item A \strong{t-test} depending on \code{type}: \code{"cohens_d"} (default) or one of \code{"p_superiority"}, \code{"u1"}, \code{"u2"}, \code{"u3"}, \code{"overlap"}. \item A \strong{correlation test} returns \emph{r}. \item A \strong{contingency table test}, depending on \code{type}: \code{"cramers_v"} (default), \code{"phi"}, \code{"tschuprows_t"}, \code{"cohens_w"}, \code{"pearsons_c"}, \code{"cohens_h"}, \code{"oddsratio"}, or \code{"riskratio"}, \code{"arr"}, or \code{"nnt"}. \item A \strong{proportion test} returns \emph{p}. } \item Objects of class \code{anova}, \code{aov}, \code{aovlist} or \code{afex_aov}, depending on \code{type}: \code{"eta"} (default), \code{"omega"} or \code{"epsilon"} -squared, \code{"f"}, or \code{"f2"}. \item Objects of class \code{datawizard_crosstab(s)} / \code{datawizard_table(s)} built with \code{\link[datawizard:data_tabulate]{datawizard::data_tabulate()}} - same as Chi-squared tests of independence / goodness-of-fit, respectively. \item Other objects are passed to \code{\link[parameters:standardize_parameters]{parameters::standardize_parameters()}}. } \strong{For statistical models it is recommended to directly use the listed functions, for the full range of options they provide.} } \note{ For ANOVA-tables from mixed models (i.e. \code{anova(lmer())}), only partial or adjusted effect sizes can be computed. Note that type 3 ANOVAs with interactions involved only give sensible and informative results when covariates are mean-centred and factors are coded with orthogonal contrasts (such as those produced by \code{contr.sum}, \code{contr.poly}, or \code{contr.helmert}, but \emph{not} by the default \code{contr.treatment}). } \examples{ \dontshow{if (requireNamespace("effectsize", quietly = TRUE)) withAutoprint(\{ # examplesIf} df <- iris df$Sepal.Big <- ifelse(df$Sepal.Width >= 3, "Yes", "No") model <- aov(Sepal.Length ~ Sepal.Big, data = df) model_parameters(model) model_parameters(model, es_type = c("omega", "eta"), ci = 0.9) model <- anova(lm(Sepal.Length ~ Sepal.Big, data = df)) model_parameters(model) model_parameters( model, es_type = c("omega", "eta", "epsilon"), alternative = "greater" ) model <- aov(Sepal.Length ~ Sepal.Big + Error(Species), data = df) model_parameters(model) \dontshow{\}) # examplesIf} \dontshow{if (requireNamespace("lme4", quietly = TRUE) && requireNamespace("effectsize", quietly = TRUE)) withAutoprint(\{ # examplesIf} \donttest{ df <- iris df$Sepal.Big <- ifelse(df$Sepal.Width >= 3, "Yes", "No") mm <- lme4::lmer(Sepal.Length ~ Sepal.Big + Petal.Width + (1 | Species), data = df) model <- anova(mm) # simple parameters table model_parameters(model) # parameters table including effect sizes model_parameters( model, es_type = "eta", ci = 0.9, df_error = dof_satterthwaite(mm)[2:3] ) } \dontshow{\}) # examplesIf} } parameters/man/print.compare_parameters.Rd0000644000176200001440000002776715066721002020505 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/format.R, R/print.compare_parameters.R, % R/print_html.R, R/print_md.R \name{format.compare_parameters} \alias{format.compare_parameters} \alias{print.compare_parameters} \alias{print_html.compare_parameters} \alias{print_md.compare_parameters} \title{Print comparisons of model parameters} \usage{ \method{format}{compare_parameters}( x, split_components = TRUE, select = NULL, digits = 2, ci_digits = digits, p_digits = 3, ci_width = NULL, ci_brackets = NULL, zap_small = FALSE, format = NULL, groups = NULL, ... ) \method{print}{compare_parameters}( x, split_components = TRUE, caption = NULL, subtitle = NULL, footer = NULL, digits = 2, ci_digits = digits, p_digits = 3, zap_small = FALSE, groups = NULL, column_width = NULL, ci_brackets = c("[", "]"), select = NULL, ... ) \method{print_html}{compare_parameters}( x, caption = NULL, subtitle = NULL, footer = NULL, digits = 2, ci_digits = digits, p_digits = 3, zap_small = FALSE, groups = NULL, select = NULL, ci_brackets = c("(", ")"), font_size = "100\%", line_padding = 4, column_labels = NULL, ... ) \method{print_md}{compare_parameters}( x, digits = 2, ci_digits = digits, p_digits = 3, caption = NULL, subtitle = NULL, footer = NULL, select = NULL, split_components = TRUE, ci_brackets = c("(", ")"), zap_small = FALSE, groups = NULL, ... ) } \arguments{ \item{x}{An object returned by \code{\link[=compare_parameters]{compare_parameters()}}.} \item{split_components}{Logical, if \code{TRUE} (default), For models with multiple components (zero-inflation, smooth terms, ...), each component is printed in a separate table. If \code{FALSE}, model parameters are printed in a single table and a \code{Component} column is added to the output.} \item{select}{Determines which columns and and which layout columns are printed. There are three options for this argument: \itemize{ \item \strong{Selecting columns by name or index} \code{select} can be a character vector (or numeric index) of column names that should be printed, where columns are extracted from the data frame returned by \code{model_parameters()} and related functions. There are two pre-defined options for selecting columns: \code{select = "minimal"} prints coefficients, confidence intervals and p-values, while \code{select = "short"} prints coefficients, standard errors and p-values. \item \strong{A string expression with layout pattern} \code{select} is a string with "tokens" enclosed in braces. These tokens will be replaced by their associated columns, where the selected columns will be collapsed into one column. Following tokens are replaced by the related coefficients or statistics: \code{{estimate}}, \code{{se}}, \code{{ci}} (or \code{{ci_low}} and \code{{ci_high}}), \code{{p}} and \code{{stars}}. The token \code{{ci}} will be replaced by \verb{\{ci_low\}, \{ci_high\}}. Example: \code{select = "{estimate}{stars} ({ci})"} It is possible to create multiple columns as well. A \code{|} separates values into new cells/columns. Example: \code{select = "{estimate} ({ci})|{p}"}. If \code{format = "html"}, a \verb{
} inserts a line break inside a cell. See 'Examples'. \item \strong{A string indicating a pre-defined layout} \code{select} can be one of the following string values, to create one of the following pre-defined column layouts: \itemize{ \item \code{"ci"}: Estimates and confidence intervals, no asterisks for p-values. This is equivalent to \code{select = "{estimate} ({ci})"}. \item \code{"se"}: Estimates and standard errors, no asterisks for p-values. This is equivalent to \code{select = "{estimate} ({se})"}. \item \code{"ci_p"}: Estimates, confidence intervals and asterisks for p-values. This is equivalent to \code{select = "{estimate}{stars} ({ci})"}. \item \code{"se_p"}: Estimates, standard errors and asterisks for p-values. This is equivalent to \code{select = "{estimate}{stars} ({se})"}.. \item \code{"ci_p2"}: Estimates, confidence intervals and numeric p-values, in two columns. This is equivalent to \code{select = "{estimate} ({ci})|{p}"}. \item \code{"se_p2"}: Estimate, standard errors and numeric p-values, in two columns. This is equivalent to \code{select = "{estimate} ({se})|{p}"}. } } For \code{model_parameters()}, glue-like syntax is still experimental in the case of more complex models (like mixed models) and may not return expected results.} \item{digits, ci_digits, p_digits}{Number of digits for rounding or significant figures. May also be \code{"signif"} to return significant figures or \code{"scientific"} to return scientific notation. Control the number of digits by adding the value as suffix, e.g. \code{digits = "scientific4"} to have scientific notation with 4 decimal places, or \code{digits = "signif5"} for 5 significant figures (see also \code{\link[=signif]{signif()}}).} \item{ci_width}{Minimum width of the returned string for confidence intervals. If not \code{NULL} and width is larger than the string's length, leading whitespaces are added to the string. If \code{width="auto"}, width will be set to the length of the longest string.} \item{ci_brackets}{Logical, if \code{TRUE} (default), CI-values are encompassed in square brackets (else in parentheses).} \item{zap_small}{Logical, if \code{TRUE}, small values are rounded after \code{digits} decimal places. If \code{FALSE}, values with more decimal places than \code{digits} are printed in scientific notation.} \item{format}{Name of output-format, as string. If \code{NULL} (or \code{"text"}), returned output is used for basic printing. Can be one of \code{NULL} (the default) resp. \code{"text"} for plain text, \code{"markdown"} (or \code{"md"}) for markdown and \code{"html"} for HTML output. A special option is \code{"tt"}, which creates a \code{\link[tinytable:tt]{tinytable::tt()}} object, where the output format is dependent on the context where the table is used, i.e. it can be markdown format when \code{export_table()} is used in markdown files, or LaTeX format when creating PDFs etc.} \item{groups}{Named list, can be used to group parameters in the printed output. List elements may either be character vectors that match the name of those parameters that belong to one group, or list elements can be row numbers of those parameter rows that should belong to one group. The names of the list elements will be used as group names, which will be inserted as "header row". A possible use case might be to emphasize focal predictors and control variables, see 'Examples'. Parameters will be re-ordered according to the order used in \code{groups}, while all non-matching parameters will be added to the end.} \item{...}{Arguments passed down to \code{\link[=format.parameters_model]{format.parameters_model()}}, \code{\link[insight:format_table]{insight::format_table()}} and \code{\link[insight:export_table]{insight::export_table()}}} \item{caption}{Table caption as string. If \code{NULL}, depending on the model, either a default caption or no table caption is printed. Use \code{caption = ""} to suppress the table caption.} \item{subtitle}{Table title (same as caption) and subtitle, as strings. If \code{NULL}, no title or subtitle is printed, unless it is stored as attributes (\code{table_title}, or its alias \code{table_caption}, and \code{table_subtitle}). If \code{x} is a list of data frames, \code{caption} may be a list of table captions, one for each table.} \item{footer}{Can either be \code{FALSE} or an empty string (i.e. \code{""}) to suppress the footer, \code{NULL} to print the default footer, or a string. The latter will combine the string value with the default footer.} \item{column_width}{Width of table columns. Can be either \code{NULL}, a named numeric vector, or \code{"fixed"}. If \code{NULL}, the width for each table column is adjusted to the minimum required width. If a named numeric vector, value names are matched against column names, and for each match, the specified width is used. If \code{"fixed"}, and table is split into multiple components, columns across all table components are adjusted to have the same width.} \item{font_size}{For HTML tables, the font size.} \item{line_padding}{For HTML tables, the distance (in pixel) between lines.} \item{column_labels}{Labels of columns for HTML tables. If \code{NULL}, automatic column names are generated. See 'Examples'.} } \value{ Invisibly returns the original input object. } \description{ A \code{print()}-method for objects from \code{\link[=compare_parameters]{compare_parameters()}}. } \section{Global Options to Customize Messages and Tables when Printing}{ The \code{verbose} argument can be used to display or silence messages and warnings for the different functions in the \strong{parameters} package. However, some messages providing additional information can be displayed or suppressed using \code{options()}: \itemize{ \item \code{parameters_info}: \code{options(parameters_info = TRUE)} will override the \code{include_info} argument in \code{model_parameters()} and always show the model summary for non-mixed models. \item \code{parameters_mixed_info}: \code{options(parameters_mixed_info = TRUE)} will override the \code{include_info} argument in \code{model_parameters()} for mixed models, and will then always show the model summary. \item \code{parameters_cimethod}: \code{options(parameters_cimethod = TRUE)} will show the additional information about the approximation method used to calculate confidence intervals and p-values. Set to \code{FALSE} to hide this message when printing \code{model_parameters()} objects. \item \code{parameters_exponentiate}: \code{options(parameters_exponentiate = TRUE)} will show the additional information on how to interpret coefficients of models with log-transformed response variables or with log-/logit-links when the \code{exponentiate} argument in \code{model_parameters()} is not \code{TRUE}. Set this option to \code{FALSE} to hide this message when printing \code{model_parameters()} objects. } There are further options that can be used to modify the default behaviour for printed outputs: \itemize{ \item \code{parameters_labels}: \code{options(parameters_labels = TRUE)} will use variable and value labels for pretty names, if data is labelled. If no labels available, default pretty names are used. \item \code{parameters_interaction}: \verb{options(parameters_interaction = )} will replace the interaction mark (by default, \code{*}) with the related character. \item \code{parameters_select}: \verb{options(parameters_select = )} will set the default for the \code{select} argument. See argument's documentation for available options. \item \code{easystats_table_width}: \verb{options(easystats_table_width = )} will set the default width for tables in text-format, i.e. for most of the outputs printed to console. If not specified, tables will be adjusted to the current available width, e.g. of the of the console (or any other source for textual output, like markdown files). The argument \code{table_width} can also be used in most \code{print()} methods to specify the table width as desired. \item \code{insight_use_symbols}: \code{options(insight_use_symbols = TRUE)} will try to print unicode-chars for symbols as column names, wherever possible (e.g., \ifelse{html}{\out{ω}}{\eqn{\omega}} instead of \code{Omega}). } } \examples{ \dontshow{if (require("gt", quietly = TRUE)) withAutoprint(\{ # examplesIf} \donttest{ data(iris) lm1 <- lm(Sepal.Length ~ Species, data = iris) lm2 <- lm(Sepal.Length ~ Species + Petal.Length, data = iris) # custom style result <- compare_parameters(lm1, lm2, select = "{estimate}{stars} ({se})") print(result) # custom style, in HTML result <- compare_parameters(lm1, lm2, select = "{estimate}
({se})|{p}") print_html(result) } \dontshow{\}) # examplesIf} } parameters/man/dot-n_factors_sescree.Rd0000644000176200001440000000061513641634603017742 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/n_factors.R \name{.n_factors_sescree} \alias{.n_factors_sescree} \title{Standard Error Scree and Coefficient of Determination Procedures} \usage{ .n_factors_sescree(eigen_values = NULL, model = "factors") } \description{ Standard Error Scree and Coefficient of Determination Procedures } \keyword{internal} parameters/man/model_parameters.compare.loo.Rd0000644000176200001440000000402615066721002021220 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/methods_loo.R \name{model_parameters.compare.loo} \alias{model_parameters.compare.loo} \title{Bayesian Model Comparison} \usage{ \method{model_parameters}{compare.loo}(model, include_IC = TRUE, include_ENP = FALSE, ...) } \arguments{ \item{model}{An object of class \link[brms:loo_compare.brmsfit]{brms::loo_compare}.} \item{include_IC}{Whether to include the information criteria (IC).} \item{include_ENP}{Whether to include the effective number of parameters (ENP).} \item{...}{Additional arguments (not used for now).} } \value{ Objects of \code{parameters_model}. } \description{ Make a table of Bayesian model comparisons using the \code{loo} package. } \details{ The rule of thumb is that the models are "very similar" if |elpd_diff| (the absolute value of elpd_diff) is less than 4 (Sivula, Magnusson and Vehtari, 2020). If superior to 4, then one can use the SE to obtain a standardized difference (Z-diff) and interpret it as such, assuming that the difference is normally distributed. The corresponding p-value is then calculated as \code{2 * pnorm(-abs(Z-diff))}. However, note that if the raw ELPD difference is small (less than 4), it doesn't make much sense to rely on its standardized value: it is not very useful to conclude that a model is much better than another if both models make very similar predictions. } \examples{ \dontshow{if (all(insight::check_if_installed(c("brms", "RcppEigen", "BH"), quietly = TRUE))) withAutoprint(\{ # examplesIf} \donttest{ library(brms) m1 <- brms::brm(mpg ~ qsec, data = mtcars) m2 <- brms::brm(mpg ~ qsec + drat, data = mtcars) m3 <- brms::brm(mpg ~ qsec + drat + wt, data = mtcars) x <- suppressWarnings(brms::loo_compare( brms::add_criterion(m1, "loo"), brms::add_criterion(m2, "loo"), brms::add_criterion(m3, "loo"), model_names = c("m1", "m2", "m3") )) model_parameters(x) model_parameters(x, include_IC = FALSE, include_ENP = TRUE) } \dontshow{\}) # examplesIf} } parameters/man/equivalence_test.lm.Rd0000644000176200001440000004213715066721002017434 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/equivalence_test.R \name{equivalence_test.lm} \alias{equivalence_test.lm} \title{Equivalence test} \usage{ \method{equivalence_test}{lm}( x, range = "default", ci = 0.95, rule = "classic", effects = "fixed", vcov = NULL, vcov_args = NULL, verbose = TRUE, ... ) } \arguments{ \item{x}{A statistical model.} \item{range}{The range of practical equivalence of an effect. May be \code{"default"}, to automatically define this range based on properties of the model's data.} \item{ci}{Confidence Interval (CI) level. Default to \code{0.95} (\verb{95\%}).} \item{rule}{Character, indicating the rules when testing for practical equivalence. Can be \code{"bayes"}, \code{"classic"} or \code{"cet"}. See 'Details'.} \item{effects}{Should parameters for fixed effects (\code{"fixed"}), random effects (\code{"random"}), or both fixed and random effects (\code{"all"}) be returned? By default, the variance components for random effects are returned. If group-level effects are requested, \code{"grouplevel"} returns the group-level random effects (BLUPs), while \code{"random_total"} return the overall (sum of fixed and random) effects (similar to what \code{coef()} returns). Using \code{"grouplevel"} is equivalent to setting \code{group_level = TRUE}. The \code{effects} argument only applies to mixed models. If the calculation of random effects parameters takes too long, you may use \code{effects = "fixed"}.} \item{vcov}{Variance-covariance matrix used to compute uncertainty estimates (e.g., for robust standard errors). This argument accepts a covariance matrix, a function which returns a covariance matrix, or a string which identifies the function to be used to compute the covariance matrix. \itemize{ \item A covariance matrix \item A function which returns a covariance matrix (e.g., \code{stats::vcov()}) \item A string which indicates the kind of uncertainty estimates to return. \itemize{ \item Heteroskedasticity-consistent: \code{"HC"}, \code{"HC0"}, \code{"HC1"}, \code{"HC2"}, \code{"HC3"}, \code{"HC4"}, \code{"HC4m"}, \code{"HC5"}. See \code{?sandwich::vcovHC} \item Cluster-robust: \code{"CR"}, \code{"CR0"}, \code{"CR1"}, \code{"CR1p"}, \code{"CR1S"}, \code{"CR2"}, \code{"CR3"}. See \code{?clubSandwich::vcovCR} \item Bootstrap: \code{"BS"}, \code{"xy"}, \code{"residual"}, \code{"wild"}, \code{"mammen"}, \code{"fractional"}, \code{"jackknife"}, \code{"norm"}, \code{"webb"}. See \code{?sandwich::vcovBS} \item Other \code{sandwich} package functions: \code{"HAC"}, \code{"PC"}, \code{"CL"}, \code{"OPG"}, \code{"PL"}. } }} \item{vcov_args}{List of arguments to be passed to the function identified by the \code{vcov} argument. This function is typically supplied by the \strong{sandwich} or \strong{clubSandwich} packages. Please refer to their documentation (e.g., \code{?sandwich::vcovHAC}) to see the list of available arguments. If no estimation type (argument \code{type}) is given, the default type for \code{"HC"} equals the default from the \strong{sandwich} package; for type \code{"CR"}, the default is set to \code{"CR3"}.} \item{verbose}{Toggle warnings and messages.} \item{...}{Arguments passed to or from other methods.} } \value{ A data frame. } \description{ Compute the (conditional) equivalence test for frequentist models. } \details{ In classical null hypothesis significance testing (NHST) within a frequentist framework, it is not possible to accept the null hypothesis, H0 - unlike in Bayesian statistics, where such probability statements are possible. "\link{...} one can only reject the null hypothesis if the test statistics falls into the critical region(s), or fail to reject this hypothesis. In the latter case, all we can say is that no significant effect was observed, but one cannot conclude that the null hypothesis is true." (\emph{Pernet 2017}). One way to address this issues without Bayesian methods is \emph{Equivalence Testing}, as implemented in \code{equivalence_test()}. While you either can reject the null hypothesis or claim an inconclusive result in NHST, the equivalence test - according to \emph{Pernet} - adds a third category, \emph{"accept"}. Roughly speaking, the idea behind equivalence testing in a frequentist framework is to check whether an estimate and its uncertainty (i.e. confidence interval) falls within a region of "practical equivalence". Depending on the rule for this test (see below), statistical significance does not necessarily indicate whether the null hypothesis can be rejected or not, i.e. the classical interpretation of the p-value may differ from the results returned from the equivalence test. \subsection{Calculation of equivalence testing}{ \itemize{ \item "bayes" - Bayesian rule (Kruschke 2018) This rule follows the "HDI+ROPE decision rule" (\emph{Kruschke, 2014, 2018}) used for the \code{\link[bayestestR:equivalence_test]{Bayesian counterpart()}}. This means, if the confidence intervals are completely outside the ROPE, the "null hypothesis" for this parameter is "rejected". If the ROPE completely covers the CI, the null hypothesis is accepted. Else, it's undecided whether to accept or reject the null hypothesis. Desirable results are low proportions inside the ROPE (the closer to zero the better). \item "classic" - The TOST rule (Lakens 2017) This rule follows the "TOST rule", i.e. a two one-sided test procedure (\emph{Lakens 2017}). Following this rule... \itemize{ \item practical equivalence is assumed (i.e. H0 \emph{"accepted"}) when the narrow confidence intervals are completely inside the ROPE, no matter if the effect is statistically significant or not; \item practical equivalence (i.e. H0) is \emph{rejected}, when the coefficient is statistically significant, both when the narrow confidence intervals (i.e. \code{1-2*alpha}) include or exclude the the ROPE boundaries, but the narrow confidence intervals are \emph{not fully covered} by the ROPE; \item else the decision whether to accept or reject practical equivalence is undecided (i.e. when effects are \emph{not} statistically significant \emph{and} the narrow confidence intervals overlaps the ROPE). } \item "cet" - Conditional Equivalence Testing (Campbell/Gustafson 2018) The Conditional Equivalence Testing as described by \emph{Campbell and Gustafson 2018}. According to this rule, practical equivalence is rejected when the coefficient is statistically significant. When the effect is \emph{not} significant and the narrow confidence intervals are completely inside the ROPE, we accept (i.e. assume) practical equivalence, else it is undecided. } } \subsection{Levels of Confidence Intervals used for Equivalence Testing}{ For \code{rule = "classic"}, "narrow" confidence intervals are used for equivalence testing. "Narrow" means, the the intervals is not 1 - alpha, but 1 - 2 * alpha. Thus, if \code{ci = .95}, alpha is assumed to be 0.05 and internally a ci-level of 0.90 is used. \code{rule = "cet"} uses both regular and narrow confidence intervals, while \code{rule = "bayes"} only uses the regular intervals. } \subsection{p-Values}{ The equivalence p-value is the area of the (cumulative) confidence distribution that is outside of the region of equivalence. It can be interpreted as p-value for \emph{rejecting} the alternative hypothesis and \emph{accepting} the "null hypothesis" (i.e. assuming practical equivalence). That is, a high p-value means we reject the assumption of practical equivalence and accept the alternative hypothesis. } \subsection{Second Generation p-Value (SGPV)}{ Second generation p-values (SGPV) were proposed as a statistic that represents \emph{the proportion of data-supported hypotheses that are also null hypotheses} \emph{(Blume et al. 2018, Lakens and Delacre 2020)}. It represents the proportion of the \emph{full} confidence interval range (assuming a normally or t-distributed, equal-tailed interval, based on the model) that is inside the ROPE. The SGPV ranges from zero to one. Higher values indicate that the effect is more likely to be practically equivalent ("not of interest"). Note that the assumed interval, which is used to calculate the SGPV, is an estimation of the \emph{full interval} based on the chosen confidence level. For example, if the 95\% confidence interval of a coefficient ranges from -1 to 1, the underlying \emph{full (normally or t-distributed) interval} approximately ranges from -1.9 to 1.9, see also following code: \if{html}{\out{
}}\preformatted{# simulate full normal distribution out <- bayestestR::distribution_normal(10000, 0, 0.5) # range of "full" distribution range(out) # range of 95\% CI round(quantile(out, probs = c(0.025, 0.975)), 2) }\if{html}{\out{
}} This ensures that the SGPV always refers to the general compatible parameter space of coefficients, independent from the confidence interval chosen for testing practical equivalence. Therefore, the SGPV of the \emph{full interval} is similar to the ROPE coverage of Bayesian equivalence tests, see following code: \if{html}{\out{
}}\preformatted{library(bayestestR) library(brms) m <- lm(mpg ~ gear + wt + cyl + hp, data = mtcars) m2 <- brm(mpg ~ gear + wt + cyl + hp, data = mtcars) # SGPV for frequentist models equivalence_test(m) # similar to ROPE coverage of Bayesian models equivalence_test(m2) # similar to ROPE coverage of simulated draws / bootstrap samples equivalence_test(simulate_model(m)) }\if{html}{\out{
}} } \subsection{ROPE range}{ Some attention is required for finding suitable values for the ROPE limits (argument \code{range}). See 'Details' in \code{\link[bayestestR:rope_range]{bayestestR::rope_range()}} for further information. } } \note{ There is also a \href{https://easystats.github.io/see/articles/parameters.html}{\code{plot()}-method} implemented in the \href{https://easystats.github.io/see/}{\strong{see}-package}. } \section{Statistical inference - how to quantify evidence}{ There is no standardized approach to drawing conclusions based on the available data and statistical models. A frequently chosen but also much criticized approach is to evaluate results based on their statistical significance (\emph{Amrhein et al. 2017}). A more sophisticated way would be to test whether estimated effects exceed the "smallest effect size of interest", to avoid even the smallest effects being considered relevant simply because they are statistically significant, but clinically or practically irrelevant (\emph{Lakens et al. 2018, Lakens 2024}). A rather unconventional approach, which is nevertheless advocated by various authors, is to interpret results from classical regression models either in terms of probabilities, similar to the usual approach in Bayesian statistics (\emph{Schweder 2018; Schweder and Hjort 2003; Vos 2022}) or in terms of relative measure of "evidence" or "compatibility" with the data (\emph{Greenland et al. 2022; Rafi and Greenland 2020}), which nevertheless comes close to a probabilistic interpretation. A more detailed discussion of this topic is found in the documentation of \code{\link[=p_function]{p_function()}}. The \strong{parameters} package provides several options or functions to aid statistical inference. These are, for example: \itemize{ \item \code{\link[=equivalence_test.lm]{equivalence_test()}}, to compute the (conditional) equivalence test for frequentist models \item \code{\link[=p_significance.lm]{p_significance()}}, to compute the probability of \emph{practical significance}, which can be conceptualized as a unidirectional equivalence test \item \code{\link[=p_function]{p_function()}}, or \emph{consonance function}, to compute p-values and compatibility (confidence) intervals for statistical models \item the \code{pd} argument (setting \code{pd = TRUE}) in \code{model_parameters()} includes a column with the \emph{probability of direction}, i.e. the probability that a parameter is strictly positive or negative. See \code{\link[bayestestR:p_direction]{bayestestR::p_direction()}} for details. If plotting is desired, the \code{\link[=p_direction.lm]{p_direction()}} function can be used, together with \code{plot()}. \item the \code{s_value} argument (setting \code{s_value = TRUE}) in \code{model_parameters()} replaces the p-values with their related \emph{S}-values (\emph{Rafi and Greenland 2020}) \item finally, it is possible to generate distributions of model coefficients by generating bootstrap-samples (setting \code{bootstrap = TRUE}) or simulating draws from model coefficients using \code{\link[=simulate_model]{simulate_model()}}. These samples can then be treated as "posterior samples" and used in many functions from the \strong{bayestestR} package. } Most of the above shown options or functions derive from methods originally implemented for Bayesian models (\emph{Makowski et al. 2019}). However, assuming that model assumptions are met (which means, the model fits well to the data, the correct model is chosen that reflects the data generating process (distributional model family) etc.), it seems appropriate to interpret results from classical frequentist models in a "Bayesian way" (more details: documentation in \code{\link[=p_function]{p_function()}}). } \examples{ \dontshow{if (requireNamespace("sandwich")) withAutoprint(\{ # examplesIf} data(qol_cancer) model <- lm(QoL ~ time + age + education, data = qol_cancer) # default rule equivalence_test(model) # using heteroscedasticity-robust standard errors equivalence_test(model, vcov = "HC3") # conditional equivalence test equivalence_test(model, rule = "cet") # plot method if (require("see", quietly = TRUE)) { result <- equivalence_test(model) plot(result) } \dontshow{\}) # examplesIf} } \references{ \itemize{ \item Amrhein, V., Korner-Nievergelt, F., and Roth, T. (2017). The earth is flat (p > 0.05): Significance thresholds and the crisis of unreplicable research. PeerJ, 5, e3544. \doi{10.7717/peerj.3544} \item Blume, J. D., D'Agostino McGowan, L., Dupont, W. D., & Greevy, R. A. (2018). Second-generation p-values: Improved rigor, reproducibility, & transparency in statistical analyses. PLOS ONE, 13(3), e0188299. https://doi.org/10.1371/journal.pone.0188299 \item Campbell, H., & Gustafson, P. (2018). Conditional equivalence testing: An alternative remedy for publication bias. PLOS ONE, 13(4), e0195145. doi: 10.1371/journal.pone.0195145 \item Greenland S, Rafi Z, Matthews R, Higgs M. To Aid Scientific Inference, Emphasize Unconditional Compatibility Descriptions of Statistics. (2022) https://arxiv.org/abs/1909.08583v7 (Accessed November 10, 2022) \item Kruschke, J. K. (2014). Doing Bayesian data analysis: A tutorial with R, JAGS, and Stan. Academic Press \item Kruschke, J. K. (2018). Rejecting or accepting parameter values in Bayesian estimation. Advances in Methods and Practices in Psychological Science, 1(2), 270-280. doi: 10.1177/2515245918771304 \item Lakens, D. (2017). Equivalence Tests: A Practical Primer for t Tests, Correlations, and Meta-Analyses. Social Psychological and Personality Science, 8(4), 355–362. doi: 10.1177/1948550617697177 \item Lakens, D. (2024). Improving Your Statistical Inferences (Version v1.5.1). Retrieved from https://lakens.github.io/statistical_inferences/. \doi{10.5281/ZENODO.6409077} \item Lakens, D., and Delacre, M. (2020). Equivalence Testing and the Second Generation P-Value. Meta-Psychology, 4. https://doi.org/10.15626/MP.2018.933 \item Lakens, D., Scheel, A. M., and Isager, P. M. (2018). Equivalence Testing for Psychological Research: A Tutorial. Advances in Methods and Practices in Psychological Science, 1(2), 259–269. \item Makowski, D., Ben-Shachar, M. S., Chen, S. H. A., and Lüdecke, D. (2019). Indices of Effect Existence and Significance in the Bayesian Framework. Frontiers in Psychology, 10, 2767. \doi{10.3389/fpsyg.2019.02767} \item Pernet, C. (2017). Null hypothesis significance testing: A guide to commonly misunderstood concepts and recommendations for good practice. F1000Research, 4, 621. doi: 10.12688/f1000research.6963.5 \item Rafi Z, Greenland S. Semantic and cognitive tools to aid statistical science: replace confidence and significance by compatibility and surprise. BMC Medical Research Methodology (2020) 20:244. \item Schweder T. Confidence is epistemic probability for empirical science. Journal of Statistical Planning and Inference (2018) 195:116–125. \doi{10.1016/j.jspi.2017.09.016} \item Schweder T, Hjort NL. Frequentist analogues of priors and posteriors. In Stigum, B. (ed.), Econometrics and the Philosophy of Economics: Theory Data Confrontation in Economics, pp. 285-217. Princeton University Press, Princeton, NJ, 2003 \item Vos P, Holbert D. Frequentist statistical inference without repeated sampling. Synthese 200, 89 (2022). \doi{10.1007/s11229-022-03560-x} } } \seealso{ For more details, see \code{\link[bayestestR:equivalence_test]{bayestestR::equivalence_test()}}. Further readings can be found in the references. See also \code{\link[=p_significance]{p_significance()}} for a unidirectional equivalence test. } parameters/man/cluster_centers.Rd0000644000176200001440000000201714412513617016667 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/cluster_centers.R \name{cluster_centers} \alias{cluster_centers} \title{Find the cluster centers in your data} \usage{ cluster_centers(data, clusters, fun = mean, ...) } \arguments{ \item{data}{A data.frame.} \item{clusters}{A vector with clusters assignments (must be same length as rows in data).} \item{fun}{What function to use, \code{mean} by default.} \item{...}{Other arguments to be passed to or from other functions.} } \value{ A dataframe containing the cluster centers. Attributes include performance statistics and distance between each observation and its respective cluster centre. } \description{ For each cluster, computes the mean (or other indices) of the variables. Can be used to retrieve the centers of clusters. Also returns the within Sum of Squares. } \examples{ k <- kmeans(iris[1:4], 3) cluster_centers(iris[1:4], clusters = k$cluster) cluster_centers(iris[1:4], clusters = k$cluster, fun = median) } parameters/man/p_value_kenward.Rd0000644000176200001440000000461415073732442016641 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ci_kenward.R, R/dof_kenward.R, % R/p_value_kenward.R, R/standard_error_kenward.R \name{ci_kenward} \alias{ci_kenward} \alias{dof_kenward} \alias{p_value_kenward} \alias{se_kenward} \title{Kenward-Roger approximation for SEs, CIs and p-values} \usage{ ci_kenward(model, ci = 0.95, ...) dof_kenward(model) p_value_kenward(model, dof = NULL) se_kenward(model, ...) } \arguments{ \item{model}{A statistical model.} \item{ci}{Confidence Interval (CI) level. Default to \code{0.95} (\verb{95\%}).} \item{...}{Additional arguments passed down to the underlying functions. E.g., arguments like \code{vcov} or \code{vcov_args} can be used to compute confidence intervals using a specific variance-covariance matrix for the standard errors.} \item{dof}{Degrees of Freedom.} } \value{ A data frame. } \description{ An approximate F-test based on the Kenward-Roger (1997) approach. } \details{ Inferential statistics (like p-values, confidence intervals and standard errors) may be biased in mixed models when the number of clusters is small (even if the sample size of level-1 units is high). In such cases it is recommended to approximate a more accurate number of degrees of freedom for such inferential statistics. Unlike simpler approximation heuristics like the "m-l-1" rule (\code{dof_ml1}), the Kenward-Roger approximation is also applicable in more complex multilevel designs, e.g. with cross-classified clusters. However, the "m-l-1" heuristic also applies to generalized mixed models, while approaches like Kenward-Roger or Satterthwaite are limited to linear mixed models only. } \examples{ \donttest{ if (require("lme4", quietly = TRUE)) { model <- lmer(Petal.Length ~ Sepal.Length + (1 | Species), data = iris) p_value_kenward(model) } } } \references{ Kenward, M. G., & Roger, J. H. (1997). Small sample inference for fixed effects from restricted maximum likelihood. Biometrics, 983-997. } \seealso{ \code{dof_kenward()} and \code{se_kenward()} are small helper-functions to calculate approximated degrees of freedom and standard errors for model parameters, based on the Kenward-Roger (1997) approach. \code{\link[=dof_satterthwaite]{dof_satterthwaite()}} and \code{\link[=dof_ml1]{dof_ml1()}} approximate degrees of freedom based on Satterthwaite's method or the "m-l-1" rule. } parameters/man/cluster_discrimination.Rd0000644000176200001440000000262315066721002020236 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/cluster_discrimination.R \name{cluster_discrimination} \alias{cluster_discrimination} \title{Compute a linear discriminant analysis on classified cluster groups} \usage{ cluster_discrimination(x, cluster_groups = NULL, ...) } \arguments{ \item{x}{A data frame} \item{cluster_groups}{Group classification of the cluster analysis, which can be retrieved from the \code{\link[=cluster_analysis]{cluster_analysis()}} function.} \item{...}{Other arguments to be passed to or from.} } \description{ Computes linear discriminant analysis (LDA) on classified cluster groups, and determines the goodness of classification for each cluster group. See \code{MASS::lda()} for details. } \examples{ \dontshow{if (requireNamespace("MASS", quietly = TRUE)) withAutoprint(\{ # examplesIf} # Retrieve group classification from hierarchical cluster analysis clustering <- cluster_analysis(iris[, 1:4], n = 3) # Goodness of group classification cluster_discrimination(clustering) \dontshow{\}) # examplesIf} } \seealso{ \code{\link[=n_clusters]{n_clusters()}} to determine the number of clusters to extract, \code{\link[=cluster_analysis]{cluster_analysis()}} to compute a cluster analysis and \code{\link[performance:check_clusterstructure]{performance::check_clusterstructure()}} to check suitability of data for clustering. } parameters/man/p_calibrate.Rd0000644000176200001440000000322014334452253015726 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/p_calibrate.R \name{p_calibrate} \alias{p_calibrate} \alias{p_calibrate.default} \title{Calculate calibrated p-values.} \usage{ p_calibrate(x, ...) \method{p_calibrate}{default}(x, type = "frequentist", verbose = TRUE, ...) } \arguments{ \item{x}{A numeric vector of p-values, or a regression model object.} \item{...}{Currently not used.} \item{type}{Type of calibration. Can be \code{"frequentist"} or \code{"bayesian"}. See 'Details'.} \item{verbose}{Toggle warnings.} } \value{ A data frame with p-values and calibrated p-values. } \description{ Compute calibrated p-values that can be interpreted probabilistically, i.e. as posterior probability of H0 (given that H0 and H1 have equal prior probabilities). } \details{ The Bayesian calibration, i.e. when \code{type = "bayesian"}, can be interpreted as the lower bound of the Bayes factor for H0 to H1, based on the data. The full Bayes factor would then require multiplying by the prior odds of H0 to H1. The frequentist calibration also has a Bayesian interpretation; it is the posterior probability of H0, assuming that H0 and H1 have equal prior probabilities of 0.5 each (\emph{Sellke et al. 2001}). The calibration only works for p-values lower than or equal to \code{1/e}. } \examples{ model <- lm(mpg ~ wt + as.factor(gear) + am, data = mtcars) p_calibrate(model, verbose = FALSE) } \references{ Thomas Sellke, M. J Bayarri and James O Berger (2001) Calibration of p Values for Testing Precise Null Hypotheses, The American Statistician, 55:1, 62-71, \doi{10.1198/000313001300339950} } parameters/man/format_order.Rd0000644000176200001440000000131114246070503016137 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/format_order.R \name{format_order} \alias{format_order} \title{Order (first, second, ...) formatting} \usage{ format_order(order, textual = TRUE, ...) } \arguments{ \item{order}{value or vector of orders.} \item{textual}{Return number as words. If \code{FALSE}, will run \code{\link[insight:format_value]{insight::format_value()}}.} \item{...}{Arguments to be passed to \code{\link[insight:format_value]{insight::format_value()}} if \code{textual} is \code{FALSE}.} } \value{ A formatted string. } \description{ Format order. } \examples{ format_order(2) format_order(8) format_order(25, textual = FALSE) } parameters/man/dominance_analysis.Rd0000644000176200001440000001603215066721002017320 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dominance_analysis.R \name{dominance_analysis} \alias{dominance_analysis} \title{Dominance Analysis} \usage{ dominance_analysis( model, sets = NULL, all = NULL, conditional = TRUE, complete = TRUE, quote_args = NULL, contrasts = model$contrasts, ... ) } \arguments{ \item{model}{A model object supported by \code{performance::r2()}. See 'Details'.} \item{sets}{A (named) list of formula objects with no left hand side/response. If the list has names, the name provided each element will be used as the label for the set. Unnamed list elements will be provided a set number name based on its position among the sets as entered. Predictors in each formula are bound together as a set in the dominance analysis and dominance statistics and designations are computed for the predictors together. Predictors in \code{sets} must be present in the model submitted to the \code{model} argument and cannot be in the \code{all} argument.} \item{all}{A formula with no left hand side/response. Predictors in the formula are included in each subset in the dominance analysis and the R2 value associated with them is subtracted from the overall value. Predictors in \code{all} must be present in the model submitted to the \code{model} argument and cannot be in the \code{sets} argument.} \item{conditional}{Logical. If \code{FALSE} then conditional dominance matrix is not computed. If conditional dominance is not desired as an importance criterion, avoiding computing the conditional dominance matrix can save computation time.} \item{complete}{Logical. If \code{FALSE} then complete dominance matrix is not computed. If complete dominance is not desired as an importance criterion, avoiding computing complete dominance designations can save computation time.} \item{quote_args}{A character vector of arguments in the model submitted to \code{model} to \code{quote()} prior to submitting to the dominance analysis. This is necessary for data masked arguments (e.g., \code{weights}) to prevent them from being evaluated before being applied to the model and causing an error.} \item{contrasts}{A named list of \code{\link{contrasts}} used by the model object. This list is required in order for the correct mapping of parameters to predictors in the output when the model creates indicator codes for factor variables using \code{\link[insight:get_modelmatrix]{insight::get_modelmatrix()}}. By default, the \code{contrast} element from the model object submitted is used. If the model object does not have a \code{contrast} element the user can supply this named list.} \item{...}{Not used at current.} } \value{ Object of class \code{"parameters_da"}. An object of class \code{"parameters_da"} is a list of \code{data.frame}s composed of the following elements: \describe{ \item{\code{General}}{A \code{data.frame} which associates dominance statistics with model parameters. The variables in this \code{data.frame} include: \describe{ \item{\code{Parameter}}{Parameter names.} \item{\code{General_Dominance}}{Vector of general dominance statistics. The R2 ascribed to variables in the \code{all} argument are also reported here though they are not general dominance statistics.} \item{\code{Percent}}{Vector of general dominance statistics normalized to sum to 1.} \item{\code{Ranks}}{Vector of ranks applied to the general dominance statistics.} \item{\code{Subset}}{Names of the subset to which the parameter belongs in the dominance analysis. Each other \code{data.frame} returned will refer to these subset names.}}} \item{\code{Conditional}}{A \code{data.frame} of conditional dominance statistics. Each observation represents a subset and each variable represents an the average increment to R2 with a specific number of subsets in the model. \code{NULL} if \code{conditional} argument is \code{FALSE}.} \item{\code{Complete}}{A \code{data.frame} of complete dominance designations. The subsets in the observations are compared to the subsets referenced in each variable. Whether the subset in each variable dominates the subset in each observation is represented in the logical value. \code{NULL} if \code{complete} argument is \code{FALSE}.} } } \description{ Computes Dominance Analysis Statistics and Designations } \details{ Computes two decompositions of the model's R2 and returns a matrix of designations from which predictor relative importance determinations can be obtained. Note in the output that the "constant" subset is associated with a component of the model that does not directly contribute to the R2 such as an intercept. The "all" subset is apportioned a component of the fit statistic but is not considered a part of the dominance analysis and therefore does not receive a rank, conditional dominance statistics, or complete dominance designations. The input model is parsed using \code{insight::find_predictors()}, does not yet support interactions, transformations, or offsets applied in the R formula, and will fail with an error if any such terms are detected. The model submitted must accept an formula object as a \code{formula} argument. In addition, the model object must accept the data on which the model is estimated as a \code{data} argument. Formulas submitted using object references (i.e., \code{lm(mtcars$mpg ~ mtcars$vs)}) and functions that accept data as a non-\code{data} argument (e.g., \code{survey::svyglm()} uses \code{design}) will fail with an error. Models that return \code{TRUE} for the \code{insight::model_info()} function's values "is_bayesian", "is_mixed", "is_gam", is_multivariate", "is_zero_inflated", or "is_hurdle" are not supported at current. When \code{performance::r2()} returns multiple values, only the first is used by default. } \examples{ \dontshow{if (require("domir") && require("performance")) withAutoprint(\{ # examplesIf} data(mtcars) # Dominance Analysis with Logit Regression model <- glm(vs ~ cyl + carb + mpg, data = mtcars, family = binomial()) performance::r2(model) dominance_analysis(model) # Dominance Analysis with Weighted Logit Regression model_wt <- glm(vs ~ cyl + carb + mpg, data = mtcars, weights = wt, family = quasibinomial() ) dominance_analysis(model_wt, quote_args = "weights") \dontshow{\}) # examplesIf} } \references{ \itemize{ \item Azen, R., & Budescu, D. V. (2003). The dominance analysis approach for comparing predictors in multiple regression. Psychological Methods, 8(2), 129-148. doi:10.1037/1082-989X.8.2.129 \item Budescu, D. V. (1993). Dominance analysis: A new approach to the problem of relative importance of predictors in multiple regression. Psychological Bulletin, 114(3), 542-551. doi:10.1037/0033-2909.114.3.542 \item Groemping, U. (2007). Estimators of relative importance in linear regression based on variance decomposition. The American Statistician, 61(2), 139-147. doi:10.1198/000313007X188252 } } \seealso{ \code{\link[domir:domin]{domir::domin()}} } \author{ Joseph Luchman } parameters/man/fish.Rd0000644000176200001440000000036613754272263014430 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/datasets.R \docType{data} \name{fish} \alias{fish} \title{Sample data set} \description{ A sample data set, used in tests and some examples. } \keyword{data} parameters/man/degrees_of_freedom.Rd0000644000176200001440000000613115066721002017262 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/dof.R \name{degrees_of_freedom} \alias{degrees_of_freedom} \alias{dof} \title{Degrees of Freedom (DoF)} \usage{ degrees_of_freedom(model, method = "analytical", ...) dof(model, method = "analytical", ...) } \arguments{ \item{model}{A statistical model.} \item{method}{Type of approximation for the degrees of freedom. Can be one of the following: \itemize{ \item \code{"residual"} (aka \code{"analytical"}) returns the residual degrees of freedom, which usually is what \code{\link[stats:df.residual]{stats::df.residual()}} returns. If a model object has no method to extract residual degrees of freedom, these are calculated as \code{n-p}, i.e. the number of observations minus the number of estimated parameters. If residual degrees of freedom cannot be extracted by either approach, returns \code{Inf}. \item \code{"wald"} returns residual (aka analytical) degrees of freedom for models with t-statistic, \code{1} for models with Chi-squared statistic, and \code{Inf} for all other models. Also returns \code{Inf} if residual degrees of freedom cannot be extracted. \item \code{"normal"} always returns \code{Inf}. \item \code{"model"} returns model-based degrees of freedom, i.e. the number of (estimated) parameters. \item For mixed models, can also be \code{"ml1"} (or \code{"m-l-1"}, approximation of degrees of freedom based on a "m-l-1" heuristic as suggested by \emph{Elff et al. 2019}) or \code{"between-within"} (or \code{"betwithin"}). \item For mixed models of class \code{merMod}, \code{type} can also be \code{"satterthwaite"} or \code{"kenward-roger"} (or \code{"kenward"}). See 'Details'. } Usually, when degrees of freedom are required to calculate p-values or confidence intervals, \code{type = "wald"} is likely to be the best choice in most cases.} \item{...}{Currently not used.} } \description{ Estimate or extract degrees of freedom of models parameters. } \note{ In many cases, \code{degrees_of_freedom()} returns the same as \code{df.residuals()}, or \code{n-k} (number of observations minus number of parameters). However, \code{degrees_of_freedom()} refers to the model's \emph{parameters} degrees of freedom of the distribution for the related test statistic. Thus, for models with z-statistic, results from \code{degrees_of_freedom()} and \code{df.residuals()} differ. Furthermore, for other approximation methods like \code{"kenward"} or \code{"satterthwaite"}, each model parameter can have a different degree of freedom. } \examples{ \dontshow{if (require("lme4", quietly = TRUE)) withAutoprint(\{ # examplesIf} model <- lm(Sepal.Length ~ Petal.Length * Species, data = iris) dof(model) model <- glm(vs ~ mpg * cyl, data = mtcars, family = "binomial") dof(model) \donttest{ model <- lmer(Sepal.Length ~ Petal.Length + (1 | Species), data = iris) dof(model) if (require("rstanarm", quietly = TRUE)) { model <- stan_glm( Sepal.Length ~ Petal.Length * Species, data = iris, chains = 2, refresh = 0 ) dof(model) } } \dontshow{\}) # examplesIf} } parameters/man/principal_components.Rd0000644000176200001440000003516715066721002017720 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/factor_analysis.R, R/principal_components.R, % R/print_html.R, R/utils_pca_efa.R \name{factor_analysis} \alias{factor_analysis} \alias{factor_analysis.data.frame} \alias{factor_analysis.matrix} \alias{principal_components} \alias{rotated_data} \alias{principal_components.data.frame} \alias{print_html.parameters_efa} \alias{predict.parameters_efa} \alias{print.parameters_efa} \alias{sort.parameters_efa} \alias{closest_component} \title{Principal Component Analysis (PCA) and Factor Analysis (FA)} \usage{ factor_analysis(x, ...) \method{factor_analysis}{data.frame}( x, n = "auto", rotation = "oblimin", factor_method = "minres", sort = FALSE, threshold = NULL, standardize = FALSE, ... ) \method{factor_analysis}{matrix}( x, n = "auto", rotation = "oblimin", factor_method = "minres", n_obs = NULL, sort = FALSE, threshold = NULL, standardize = FALSE, ... ) principal_components(x, ...) rotated_data(x, verbose = TRUE) \method{principal_components}{data.frame}( x, n = "auto", rotation = "none", sparse = FALSE, sort = FALSE, threshold = NULL, standardize = TRUE, ... ) \method{print_html}{parameters_efa}(x, digits = 2, sort = FALSE, threshold = NULL, labels = NULL, ...) \method{predict}{parameters_efa}( object, newdata = NULL, names = NULL, keep_na = TRUE, verbose = TRUE, ... ) \method{print}{parameters_efa}(x, digits = 2, sort = FALSE, threshold = NULL, labels = NULL, ...) \method{sort}{parameters_efa}(x, ...) closest_component(x) } \arguments{ \item{x}{A data frame or a statistical model. For \code{closest_component()}, the output of the \code{principal_components()} function.} \item{...}{Arguments passed to or from other methods.} \item{n}{Number of components to extract. If \code{n="all"}, then \code{n} is set as the number of variables minus 1 (\code{ncol(x)-1}). If \code{n="auto"} (default) or \code{n=NULL}, the number of components is selected through \code{\link[=n_factors]{n_factors()}} resp. \code{\link[=n_components]{n_components()}}. Else, if \code{n} is a number, \code{n} components are extracted. If \code{n} exceeds number of variables in the data, it is automatically set to the maximum number (i.e. \code{ncol(x)}). In \code{\link[=reduce_parameters]{reduce_parameters()}}, can also be \code{"max"}, in which case it will select all the components that are maximally pseudo-loaded (i.e., correlated) by at least one variable.} \item{rotation}{If not \code{"none"}, the PCA / FA will be computed using the \strong{psych} package. Possible options include \code{"varimax"}, \code{"quartimax"}, \code{"promax"}, \code{"oblimin"}, \code{"simplimax"}, or \code{"cluster"} (and more). See \code{\link[psych:fa]{psych::fa()}} for details. The default is \code{"none"} for PCA, and \code{"oblimin"} for FA.} \item{factor_method}{The factoring method to be used. Passed to the \code{fm} argument in \code{psych::fa()}. Defaults to \code{"minres"} (minimum residual). Other options include \code{"uls"}, \code{"ols"}, \code{"wls"}, \code{"gls"}, \code{"ml"}, \code{"minchi"}, \code{"minrank"}, \code{"old.min"}, and \code{"alpha"}. See \code{?psych::fa} for details.} \item{sort}{Sort the loadings.} \item{threshold}{A value between 0 and 1 indicates which (absolute) values from the loadings should be removed. An integer higher than 1 indicates the n strongest loadings to retain. Can also be \code{"max"}, in which case it will only display the maximum loading per variable (the most simple structure).} \item{standardize}{A logical value indicating whether the variables should be standardized (centered and scaled) to have unit variance before the analysis (in general, such scaling is advisable). \strong{Note:} This defaults to \code{TRUE} for PCA, but to \code{FALSE} for FA (because \code{factor_analysis()} computes a correlation matrix and uses that r-matrix for the factor analysis by default - therefore, standardization of the raw variables is unnecessary, and even undesirable when using \code{cor = "poly"}).} \item{n_obs}{An integer or a matrix. \itemize{ \item \strong{Integer:} Number of observations in the original data set if \code{x} is a correlation matrix. Required to compute correct fit indices. \item \strong{Matrix:} A matrix where each cell \verb{[i, j]} specifies the number of pairwise complete observations used to compute the correlation between variable \code{i} and variable \code{j} in the input \code{x}. It is crucial when \code{x} is a correlation matrix (rather than raw data), especially if that matrix was derived from a dataset containing missing values using pairwise deletion. Providing a matrix allows \code{psych::fa()} to accurately calculate statistical measures, such as chi-square fit statistics, by accounting for the varying sample sizes that contribute to each individual correlation coefficient. }} \item{verbose}{Toggle warnings.} \item{sparse}{Whether to compute sparse PCA (SPCA, using \code{\link[sparsepca:spca]{sparsepca::spca()}}). SPCA attempts to find sparse loadings (with few nonzero values), which improves interpretability and avoids overfitting. Can be \code{TRUE} or \code{"robust"} (see \code{\link[sparsepca:robspca]{sparsepca::robspca()}}).} \item{digits}{Argument for \code{print()}, indicates the number of digits (rounding) to be used.} \item{labels}{Argument for \code{print()}, character vector of same length as columns in \code{x}. If provided, adds an additional column with the labels.} \item{object}{An object of class \code{parameters_pca}, \code{parameters_efa} or \code{psych_efa}.} \item{newdata}{An optional data frame in which to look for variables with which to predict. If omitted, the fitted values are used.} \item{names}{Optional character vector to name columns of the returned data frame.} \item{keep_na}{Logical, if \code{TRUE}, predictions also return observations with missing values from the original data, hence the number of rows of predicted data and original data is equal.} } \value{ A data frame of loadings. For \code{factor_analysis()}, this data frame is also of class \code{parameters_efa()}. Objects from \code{principal_components()} are of class \code{parameters_pca()}. } \description{ The functions \code{principal_components()} and \code{factor_analysis()} can be used to perform a principal component analysis (PCA) or a factor analysis (FA). They return the loadings as a data frame, and various methods and functions are available to access / display other information (see the 'Details' section). } \details{ \subsection{Methods and Utilities}{ \itemize{ \item \code{\link[=n_components]{n_components()}} and \code{\link[=n_factors]{n_factors()}} automatically estimates the optimal number of dimensions to retain. \item \code{\link[performance:check_factorstructure]{performance::check_factorstructure()}} checks the suitability of the data for factor analysis using the sphericity (see \code{\link[performance:check_factorstructure]{performance::check_sphericity_bartlett()}}) and the KMO (see \code{\link[performance:check_factorstructure]{performance::check_kmo()}}) measure. \item \code{\link[performance:check_itemscale]{performance::check_itemscale()}} computes various measures of internal consistencies applied to the (sub)scales (i.e., components) extracted from the PCA. \item Running \code{summary()} returns information related to each component/factor, such as the explained variance and the Eivenvalues. \item Running \code{\link[=get_scores]{get_scores()}} computes scores for each subscale. \item \code{\link[=factor_scores]{factor_scores()}} extracts the factor scores from objects returned by \code{\link[psych:fa]{psych::fa()}}, \code{\link[=factor_analysis]{factor_analysis()}}, or \code{\link[psych:omega]{psych::omega()}}. \item Running \code{\link[=closest_component]{closest_component()}} will return a numeric vector with the assigned component index for each column from the original data frame. \item Running \code{\link[=rotated_data]{rotated_data()}} will return the rotated data, including missing values, so it matches the original data frame. \item \code{performance::item_omega()} is a convenient wrapper around \code{psych::omega()}, which provides some additional methods to work seamlessly within the \emph{easystats} framework. \item \code{\link[performance:check_normality]{performance::check_normality()}} checks residuals from objects returned by \code{\link[psych:fa]{psych::fa()}}, \code{\link[=factor_analysis]{factor_analysis()}}, \code{performance::item_omega()}, or \code{\link[psych:omega]{psych::omega()}} for normality. \item \code{\link[performance:model_performance]{performance::model_performance()}} returns fit-indices for objects returned by \code{\link[psych:fa]{psych::fa()}}, \code{\link[=factor_analysis]{factor_analysis()}}, or \code{\link[psych:omega]{psych::omega()}}. \item Running \href{https://easystats.github.io/see/articles/parameters.html#principal-component-analysis}{\code{plot()}} visually displays the loadings (that requires the \href{https://easystats.github.io/see/}{\strong{see}-package} to work). } } \subsection{Complexity}{ Complexity represents the number of latent components needed to account for the observed variables. Whereas a perfect simple structure solution has a complexity of 1 in that each item would only load on one factor, a solution with evenly distributed items has a complexity greater than 1 (\emph{Hofman, 1978; Pettersson and Turkheimer, 2010}). } \subsection{Uniqueness}{ Uniqueness represents the variance that is 'unique' to the variable and not shared with other variables. It is equal to \code{1 - communality} (variance that is shared with other variables). A uniqueness of \code{0.20} suggests that \verb{20\%} or that variable's variance is not shared with other variables in the overall factor model. The greater 'uniqueness' the lower the relevance of the variable in the factor model. } \subsection{MSA}{ MSA represents the Kaiser-Meyer-Olkin Measure of Sampling Adequacy (\emph{Kaiser and Rice, 1974}) for each item. It indicates whether there is enough data for each factor give reliable results for the PCA. The value should be > 0.6, and desirable values are > 0.8 (\emph{Tabachnick and Fidell, 2013}). } \subsection{PCA or FA?}{ There is a simplified rule of thumb that may help do decide whether to run a factor analysis or a principal component analysis: \itemize{ \item Run \emph{factor analysis} if you assume or wish to test a theoretical model of \emph{latent factors} causing observed variables. \item Run \emph{principal component analysis} If you want to simply \emph{reduce} your correlated observed variables to a smaller set of important independent composite variables. } (Source: \href{https://stats.stackexchange.com/q/1576/54740}{CrossValidated}) } \subsection{Computing Item Scores}{ Use \code{\link[=get_scores]{get_scores()}} to compute scores for the "subscales" represented by the extracted principal components or factors. \code{get_scores()} takes the results from \code{principal_components()} or \code{factor_analysis()} and extracts the variables for each component found by the PCA. Then, for each of these "subscales", raw means are calculated (which equals adding up the single items and dividing by the number of items). This results in a sum score for each component from the PCA, which is on the same scale as the original, single items that were used to compute the PCA. One can also use \code{predict()} to back-predict scores for each component, to which one can provide \code{newdata} or a vector of \code{names} for the components. } \subsection{Explained Variance and Eingenvalues}{ Use \code{summary()} to get the Eigenvalues and the explained variance for each extracted component. The eigenvectors and eigenvalues represent the "core" of a PCA: The eigenvectors (the principal components) determine the directions of the new feature space, and the eigenvalues determine their magnitude. In other words, the eigenvalues explain the variance of the data along the new feature axes. } } \examples{ \dontshow{if (require("nFactors", quietly = TRUE) && require("sparsepca", quietly = TRUE) && require("psych", quietly = TRUE)) withAutoprint(\{ # examplesIf} library(parameters) \donttest{ # Principal Component Analysis (PCA) ------------------- principal_components(mtcars[, 1:7], n = "all", threshold = 0.2) # Automated number of components principal_components(mtcars[, 1:4], n = "auto") # labels can be useful if variable names are not self-explanatory print( principal_components(mtcars[, 1:4], n = "auto"), labels = c( "Miles/(US) gallon", "Number of cylinders", "Displacement (cu.in.)", "Gross horsepower" ) ) # Sparse PCA principal_components(mtcars[, 1:7], n = 4, sparse = TRUE) principal_components(mtcars[, 1:7], n = 4, sparse = "robust") # Rotated PCA principal_components(mtcars[, 1:7], n = 2, rotation = "oblimin", threshold = "max", sort = TRUE ) principal_components(mtcars[, 1:7], n = 2, threshold = 2, sort = TRUE) pca <- principal_components(mtcars[, 1:5], n = 2, rotation = "varimax") pca # Print loadings summary(pca) # Print information about the factors predict(pca, names = c("Component1", "Component2")) # Back-predict scores # which variables from the original data belong to which extracted component? closest_component(pca) } # Factor Analysis (FA) ------------------------ factor_analysis(mtcars[, 1:7], n = "all", threshold = 0.2, rotation = "Promax") factor_analysis(mtcars[, 1:7], n = 2, threshold = "max", sort = TRUE) factor_analysis(mtcars[, 1:7], n = 2, rotation = "none", threshold = 2, sort = TRUE) efa <- factor_analysis(mtcars[, 1:5], n = 2) summary(efa) predict(efa, verbose = FALSE) \donttest{ # Automated number of components factor_analysis(mtcars[, 1:4], n = "auto") } \dontshow{\}) # examplesIf} } \references{ \itemize{ \item Kaiser, H.F. and Rice. J. (1974). Little jiffy, mark iv. Educational and Psychological Measurement, 34(1):111–117 \item Hofmann, R. (1978). Complexity and simplicity as objective indices descriptive of factor solutions. Multivariate Behavioral Research, 13:2, 247-250, \doi{10.1207/s15327906mbr1302_9} \item Pettersson, E., & Turkheimer, E. (2010). Item selection, evaluation, and simple structure in personality data. Journal of research in personality, 44(4), 407-420, \doi{10.1016/j.jrp.2010.03.002} \item Tabachnick, B. G., and Fidell, L. S. (2013). Using multivariate statistics (6th ed.). Boston: Pearson Education. } } parameters/man/dot-n_factors_mreg.Rd0000644000176200001440000000047613641634603017250 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/n_factors.R \name{.n_factors_mreg} \alias{.n_factors_mreg} \title{Multiple Regression Procedure} \usage{ .n_factors_mreg(eigen_values = NULL, model = "factors") } \description{ Multiple Regression Procedure } \keyword{internal} parameters/man/model_parameters.befa.Rd0000644000176200001440000000524115004371714017702 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/methods_BayesFM.R \name{model_parameters.befa} \alias{model_parameters.befa} \title{Parameters from Bayesian Exploratory Factor Analysis} \usage{ \method{model_parameters}{befa}( model, sort = FALSE, centrality = "median", dispersion = FALSE, ci = 0.95, ci_method = "eti", test = NULL, verbose = TRUE, ... ) } \arguments{ \item{model}{Bayesian EFA created by the \code{BayesFM::befa}.} \item{sort}{Sort the loadings.} \item{centrality}{The point-estimates (centrality indices) to compute. Character (vector) or list with one or more of these options: \code{"median"}, \code{"mean"}, \code{"MAP"} (see \code{\link[bayestestR:map_estimate]{map_estimate()}}), \code{"trimmed"} (which is just \code{mean(x, trim = threshold)}), \code{"mode"} or \code{"all"}.} \item{dispersion}{Logical, if \code{TRUE}, computes indices of dispersion related to the estimate(s) (\code{SD} and \code{MAD} for \code{mean} and \code{median}, respectively). Dispersion is not available for \code{"MAP"} or \code{"mode"} centrality indices.} \item{ci}{Value or vector of probability of the CI (between 0 and 1) to be estimated. Default to \code{0.95} (\verb{95\%}).} \item{ci_method}{The type of index used for Credible Interval. Can be \code{"ETI"} (default, see \code{\link[bayestestR:eti]{eti()}}), \code{"HDI"} (see \code{\link[bayestestR:hdi]{hdi()}}), \code{"BCI"} (see \code{\link[bayestestR:bci]{bci()}}), \code{"SPI"} (see \code{\link[bayestestR:spi]{spi()}}), or \code{"SI"} (see \code{\link[bayestestR:si]{si()}}).} \item{test}{The indices of effect existence to compute. Character (vector) or list with one or more of these options: \code{"p_direction"} (or \code{"pd"}), \code{"rope"}, \code{"p_map"}, \code{"p_significance"} (or \code{"ps"}), \code{"p_rope"}, \code{"equivalence_test"} (or \code{"equitest"}), \code{"bayesfactor"} (or \code{"bf"}) or \code{"all"} to compute all tests. For each "test", the corresponding \strong{bayestestR} function is called (e.g. \code{\link[bayestestR:rope]{rope()}} or \code{\link[bayestestR:p_direction]{p_direction()}}) and its results included in the summary output.} \item{verbose}{Toggle warnings.} \item{...}{Arguments passed to or from other methods.} } \value{ A data frame of loadings. } \description{ Format Bayesian Exploratory Factor Analysis objects from the BayesFM package. } \examples{ library(parameters) \donttest{ if (require("BayesFM")) { efa <- BayesFM::befa(mtcars, iter = 1000) results <- model_parameters(efa, sort = TRUE, verbose = FALSE) results efa_to_cfa(results, verbose = FALSE) } } } parameters/man/format_df_adjust.Rd0000644000176200001440000000134714075246500017002 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/format_df_adjust.R \name{format_df_adjust} \alias{format_df_adjust} \title{Format the name of the degrees-of-freedom adjustment methods} \usage{ format_df_adjust( method, approx_string = "-approximated", dof_string = " degrees of freedom" ) } \arguments{ \item{method}{Name of the method.} \item{approx_string, dof_string}{Suffix added to the name of the method in the returned string.} } \value{ A formatted string. } \description{ Format the name of the degrees-of-freedom adjustment methods. } \examples{ library(parameters) format_df_adjust("kenward") format_df_adjust("kenward", approx_string = "", dof_string = " DoF") } parameters/man/model_parameters.rma.Rd0000644000176200001440000001575715013124472017576 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/methods_metafor.R \name{model_parameters.rma} \alias{model_parameters.rma} \title{Parameters from Meta-Analysis} \usage{ \method{model_parameters}{rma}( model, ci = 0.95, bootstrap = FALSE, iterations = 1000, standardize = NULL, exponentiate = FALSE, include_studies = TRUE, keep = NULL, drop = NULL, verbose = TRUE, ... ) } \arguments{ \item{model}{Model object.} \item{ci}{Confidence Interval (CI) level. Default to \code{0.95} (\verb{95\%}).} \item{bootstrap}{Should estimates be based on bootstrapped model? If \code{TRUE}, then arguments of \link[=model_parameters.brmsfit]{Bayesian regressions} apply (see also \code{\link[=bootstrap_parameters]{bootstrap_parameters()}}).} \item{iterations}{The number of bootstrap replicates. This only apply in the case of bootstrapped frequentist models.} \item{standardize}{The method used for standardizing the parameters. Can be \code{NULL} (default; no standardization), \code{"refit"} (for re-fitting the model on standardized data) or one of \code{"basic"}, \code{"posthoc"}, \code{"smart"}, \code{"pseudo"}. See 'Details' in \code{\link[=standardize_parameters]{standardize_parameters()}}. \strong{Importantly}: \itemize{ \item The \code{"refit"} method does \emph{not} standardize categorical predictors (i.e. factors), which may be a different behaviour compared to other R packages (such as \strong{lm.beta}) or other software packages (like SPSS). to mimic such behaviours, either use \code{standardize="basic"} or standardize the data with \code{datawizard::standardize(force=TRUE)} \emph{before} fitting the model. \item By default, the response (dependent) variable is also standardized, \emph{if applicable}. Set \code{include_response = FALSE} to avoid standardization of the response variable. See details in \code{\link[datawizard:standardize.default]{datawizard::standardize.default()}}. \item For mixed models, when using methods other than \code{"refit"}, only the fixed effects will be standardized. \item Robust estimation (i.e., \code{vcov} set to a value other than \code{NULL}) of standardized parameters only works when \code{standardize="refit"}. }} \item{exponentiate}{Logical, indicating whether or not to exponentiate the coefficients (and related confidence intervals). This is typical for logistic regression, or more generally speaking, for models with log or logit links. It is also recommended to use \code{exponentiate = TRUE} for models with log-transformed response values. For models with a log-transformed response variable, when \code{exponentiate = TRUE}, a one-unit increase in the predictor is associated with multiplying the outcome by that predictor's coefficient. \strong{Note:} Delta-method standard errors are also computed (by multiplying the standard errors by the transformed coefficients). This is to mimic behaviour of other software packages, such as Stata, but these standard errors poorly estimate uncertainty for the transformed coefficient. The transformed confidence interval more clearly captures this uncertainty. For \code{compare_parameters()}, \code{exponentiate = "nongaussian"} will only exponentiate coefficients from non-Gaussian families.} \item{include_studies}{Logical, if \code{TRUE} (default), includes parameters for all studies. Else, only parameters for overall-effects are shown.} \item{keep}{Character containing a regular expression pattern that describes the parameters that should be included (for \code{keep}) or excluded (for \code{drop}) in the returned data frame. \code{keep} may also be a named list of regular expressions. All non-matching parameters will be removed from the output. If \code{keep} is a character vector, every parameter name in the \emph{"Parameter"} column that matches the regular expression in \code{keep} will be selected from the returned data frame (and vice versa, all parameter names matching \code{drop} will be excluded). Furthermore, if \code{keep} has more than one element, these will be merged with an \code{OR} operator into a regular expression pattern like this: \code{"(one|two|three)"}. If \code{keep} is a named list of regular expression patterns, the names of the list-element should equal the column name where selection should be applied. This is useful for model objects where \code{model_parameters()} returns multiple columns with parameter components, like in \code{\link[=model_parameters.lavaan]{model_parameters.lavaan()}}. Note that the regular expression pattern should match the parameter names as they are stored in the returned data frame, which can be different from how they are printed. Inspect the \verb{$Parameter} column of the parameters table to get the exact parameter names.} \item{drop}{See \code{keep}.} \item{verbose}{Toggle warnings and messages.} \item{...}{Arguments passed to or from other methods. For instance, when \code{bootstrap = TRUE}, arguments like \code{type} or \code{parallel} are passed down to \code{bootstrap_model()}. Further non-documented arguments are: \itemize{ \item \code{digits}, \code{p_digits}, \code{ci_digits} and \code{footer_digits} to set the number of digits for the output. \code{groups} can be used to group coefficients. These arguments will be passed to the print-method, or can directly be used in \code{print()}, see documentation in \code{\link[=print.parameters_model]{print.parameters_model()}}. \item If \code{s_value = TRUE}, the p-value will be replaced by the S-value in the output (cf. \emph{Rafi and Greenland 2020}). \item \code{pd} adds an additional column with the \emph{probability of direction} (see \code{\link[bayestestR:p_direction]{bayestestR::p_direction()}} for details). Furthermore, see 'Examples' for this function. \item For developers, whose interest mainly is to get a "tidy" data frame of model summaries, it is recommended to set \code{pretty_names = FALSE} to speed up computation of the summary table. }} } \value{ A data frame of indices related to the model's parameters. } \description{ Extract and compute indices and measures to describe parameters of meta-analysis models. } \examples{ library(parameters) mydat <<- data.frame( effectsize = c(-0.393, 0.675, 0.282, -1.398), stderr = c(0.317, 0.317, 0.13, 0.36) ) if (require("metafor", quietly = TRUE)) { model <- rma(yi = effectsize, sei = stderr, method = "REML", data = mydat) model_parameters(model) } \donttest{ # with subgroups if (require("metafor", quietly = TRUE)) { data(dat.bcg) dat <- escalc( measure = "RR", ai = tpos, bi = tneg, ci = cpos, di = cneg, data = dat.bcg ) dat$alloc <- ifelse(dat$alloc == "random", "random", "other") d <<- dat model <- rma(yi, vi, mods = ~alloc, data = d, digits = 3, slab = author) model_parameters(model) } if (require("metaBMA", quietly = TRUE)) { data(towels) m <- suppressWarnings(meta_random(logOR, SE, study, data = towels)) model_parameters(m) } } } parameters/man/ci.default.Rd0000644000176200001440000003441115066721002015477 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/2_ci.R \name{ci.default} \alias{ci.default} \title{Confidence Intervals (CI)} \usage{ \method{ci}{default}( x, ci = 0.95, dof = NULL, method = NULL, iterations = 500, component = "all", vcov = NULL, vcov_args = NULL, verbose = TRUE, ... ) } \arguments{ \item{x}{A statistical model.} \item{ci}{Confidence Interval (CI) level. Default to \code{0.95} (\verb{95\%}).} \item{dof}{Number of degrees of freedom to be used when calculating confidence intervals. If \code{NULL} (default), the degrees of freedom are retrieved by calling \code{\link[insight:get_df]{insight::get_df()}} with approximation method defined in \code{method}. If not \code{NULL}, use this argument to override the default degrees of freedom used to compute confidence intervals.} \item{method}{Method for computing degrees of freedom for confidence intervals (CI) and the related p-values. Allowed are following options (which vary depending on the model class): \code{"residual"}, \code{"normal"}, \code{"likelihood"}, \code{"satterthwaite"}, \code{"kenward"}, \code{"wald"}, \code{"profile"}, \code{"boot"}, \code{"uniroot"}, \code{"ml1"}, \code{"betwithin"}, \code{"hdi"}, \code{"quantile"}, \code{"ci"}, \code{"eti"}, \code{"si"}, \code{"bci"}, or \code{"bcai"}. See section \emph{Confidence intervals and approximation of degrees of freedom} in \code{\link[=model_parameters]{model_parameters()}} for further details.} \item{iterations}{The number of bootstrap replicates. Only applies to models of class \code{merMod} when \code{method=boot}.} \item{component}{Model component for which parameters should be shown. See the documentation for your object's class in \code{\link[=model_parameters]{model_parameters()}} or \code{\link[=p_value]{p_value()}} for further details, or see section \emph{Model components}.} \item{vcov}{Variance-covariance matrix used to compute uncertainty estimates (e.g., for robust standard errors). This argument accepts a covariance matrix, a function which returns a covariance matrix, or a string which identifies the function to be used to compute the covariance matrix. \itemize{ \item A covariance matrix \item A function which returns a covariance matrix (e.g., \code{stats::vcov()}) \item A string which indicates the kind of uncertainty estimates to return. \itemize{ \item Heteroskedasticity-consistent: \code{"HC"}, \code{"HC0"}, \code{"HC1"}, \code{"HC2"}, \code{"HC3"}, \code{"HC4"}, \code{"HC4m"}, \code{"HC5"}. See \code{?sandwich::vcovHC} \item Cluster-robust: \code{"CR"}, \code{"CR0"}, \code{"CR1"}, \code{"CR1p"}, \code{"CR1S"}, \code{"CR2"}, \code{"CR3"}. See \code{?clubSandwich::vcovCR} \item Bootstrap: \code{"BS"}, \code{"xy"}, \code{"residual"}, \code{"wild"}, \code{"mammen"}, \code{"fractional"}, \code{"jackknife"}, \code{"norm"}, \code{"webb"}. See \code{?sandwich::vcovBS} \item Other \code{sandwich} package functions: \code{"HAC"}, \code{"PC"}, \code{"CL"}, \code{"OPG"}, \code{"PL"}. } }} \item{vcov_args}{List of arguments to be passed to the function identified by the \code{vcov} argument. This function is typically supplied by the \strong{sandwich} or \strong{clubSandwich} packages. Please refer to their documentation (e.g., \code{?sandwich::vcovHAC}) to see the list of available arguments. If no estimation type (argument \code{type}) is given, the default type for \code{"HC"} equals the default from the \strong{sandwich} package; for type \code{"CR"}, the default is set to \code{"CR3"}.} \item{verbose}{Toggle warnings and messages.} \item{...}{Additional arguments passed down to the underlying functions. E.g., arguments like \code{vcov} or \code{vcov_args} can be used to compute confidence intervals using a specific variance-covariance matrix for the standard errors.} } \value{ A data frame containing the CI bounds. } \description{ \code{ci()} attempts to return confidence intervals of model parameters. } \section{Confidence intervals and approximation of degrees of freedom}{ There are different ways of approximating the degrees of freedom depending on different assumptions about the nature of the model and its sampling distribution. The \code{ci_method} argument modulates the method for computing degrees of freedom (df) that are used to calculate confidence intervals (CI) and the related p-values. Following options are allowed, depending on the model class: \strong{Classical methods:} Classical inference is generally based on the \strong{Wald method}. The Wald approach to inference computes a test statistic by dividing the parameter estimate by its standard error (Coefficient / SE), then comparing this statistic against a t- or normal distribution. This approach can be used to compute CIs and p-values. \code{"wald"}: \itemize{ \item Applies to \emph{non-Bayesian models}. For \emph{linear models}, CIs computed using the Wald method (SE and a \emph{t-distribution with residual df}); p-values computed using the Wald method with a \emph{t-distribution with residual df}. For other models, CIs computed using the Wald method (SE and a \emph{normal distribution}); p-values computed using the Wald method with a \emph{normal distribution}. } \code{"normal"} \itemize{ \item Applies to \emph{non-Bayesian models}. Compute Wald CIs and p-values, but always use a normal distribution. } \code{"residual"} \itemize{ \item Applies to \emph{non-Bayesian models}. Compute Wald CIs and p-values, but always use a \emph{t-distribution with residual df} when possible. If the residual df for a model cannot be determined, a normal distribution is used instead. } \strong{Methods for mixed models:} Compared to fixed effects (or single-level) models, determining appropriate df for Wald-based inference in mixed models is more difficult. See \href{https://bbolker.github.io/mixedmodels-misc/glmmFAQ.html#what-are-the-p-values-listed-by-summaryglmerfit-etc.-are-they-reliable}{the R GLMM FAQ} for a discussion. Several approximate methods for computing df are available, but you should also consider instead using profile likelihood (\code{"profile"}) or bootstrap ("\verb{boot"}) CIs and p-values instead. \code{"satterthwaite"} \itemize{ \item Applies to \emph{linear mixed models}. CIs computed using the Wald method (SE and a \emph{t-distribution with Satterthwaite df}); p-values computed using the Wald method with a \emph{t-distribution with Satterthwaite df}. } \code{"kenward"} \itemize{ \item Applies to \emph{linear mixed models}. CIs computed using the Wald method (\emph{Kenward-Roger SE} and a \emph{t-distribution with Kenward-Roger df}); p-values computed using the Wald method with \emph{Kenward-Roger SE and t-distribution with Kenward-Roger df}. } \code{"ml1"} \itemize{ \item Applies to \emph{linear mixed models}. CIs computed using the Wald method (SE and a \emph{t-distribution with m-l-1 approximated df}); p-values computed using the Wald method with a \emph{t-distribution with m-l-1 approximated df}. See \code{\link[=ci_ml1]{ci_ml1()}}. } \code{"betwithin"} \itemize{ \item Applies to \emph{linear mixed models} and \emph{generalized linear mixed models}. CIs computed using the Wald method (SE and a \emph{t-distribution with between-within df}); p-values computed using the Wald method with a \emph{t-distribution with between-within df}. See \code{\link[=ci_betwithin]{ci_betwithin()}}. } \strong{Likelihood-based methods:} Likelihood-based inference is based on comparing the likelihood for the maximum-likelihood estimate to the the likelihood for models with one or more parameter values changed (e.g., set to zero or a range of alternative values). Likelihood ratios for the maximum-likelihood and alternative models are compared to a \eqn{\chi}-squared distribution to compute CIs and p-values. \code{"profile"} \itemize{ \item Applies to \emph{non-Bayesian models} of class \code{glm}, \code{polr}, \code{merMod} or \code{glmmTMB}. CIs computed by \emph{profiling the likelihood curve for a parameter}, using linear interpolation to find where likelihood ratio equals a critical value; p-values computed using the Wald method with a \emph{normal-distribution} (note: this might change in a future update!) } \code{"uniroot"} \itemize{ \item Applies to \emph{non-Bayesian models} of class \code{glmmTMB}. CIs computed by \emph{profiling the likelihood curve for a parameter}, using root finding to find where likelihood ratio equals a critical value; p-values computed using the Wald method with a \emph{normal-distribution} (note: this might change in a future update!) } \strong{Methods for bootstrapped or Bayesian models:} Bootstrap-based inference is based on \strong{resampling} and refitting the model to the resampled datasets. The distribution of parameter estimates across resampled datasets is used to approximate the parameter's sampling distribution. Depending on the type of model, several different methods for bootstrapping and constructing CIs and p-values from the bootstrap distribution are available. For Bayesian models, inference is based on drawing samples from the model posterior distribution. \code{"quantile"} (or \code{"eti"}) \itemize{ \item Applies to \emph{all models (including Bayesian models)}. For non-Bayesian models, only applies if \code{bootstrap = TRUE}. CIs computed as \emph{equal tailed intervals} using the quantiles of the bootstrap or posterior samples; p-values are based on the \emph{probability of direction}. See \code{\link[bayestestR:eti]{bayestestR::eti()}}. } \code{"hdi"} \itemize{ \item Applies to \emph{all models (including Bayesian models)}. For non-Bayesian models, only applies if \code{bootstrap = TRUE}. CIs computed as \emph{highest density intervals} for the bootstrap or posterior samples; p-values are based on the \emph{probability of direction}. See \code{\link[bayestestR:hdi]{bayestestR::hdi()}}. } \code{"bci"} (or \code{"bcai"}) \itemize{ \item Applies to \emph{all models (including Bayesian models)}. For non-Bayesian models, only applies if \code{bootstrap = TRUE}. CIs computed as \emph{bias corrected and accelerated intervals} for the bootstrap or posterior samples; p-values are based on the \emph{probability of direction}. See \code{\link[bayestestR:bci]{bayestestR::bci()}}. } \code{"si"} \itemize{ \item Applies to \emph{Bayesian models} with proper priors. CIs computed as \emph{support intervals} comparing the posterior samples against the prior samples; p-values are based on the \emph{probability of direction}. See \code{\link[bayestestR:si]{bayestestR::si()}}. } \code{"boot"} \itemize{ \item Applies to \emph{non-Bayesian models} of class \code{merMod}. CIs computed using \emph{parametric bootstrapping} (simulating data from the fitted model); p-values computed using the Wald method with a \emph{normal-distribution)} (note: this might change in a future update!). } For all iteration-based methods other than \code{"boot"} (\code{"hdi"}, \code{"quantile"}, \code{"ci"}, \code{"eti"}, \code{"si"}, \code{"bci"}, \code{"bcai"}), p-values are based on the probability of direction (\code{\link[bayestestR:p_direction]{bayestestR::p_direction()}}), which is converted into a p-value using \code{\link[bayestestR:pd_to_p]{bayestestR::pd_to_p()}}. } \section{Model components}{ Possible values for the \code{component} argument depend on the model class. Following are valid options: \itemize{ \item \code{"all"}: returns all model components, applies to all models, but will only have an effect for models with more than just the conditional model component. \item \code{"conditional"}: only returns the conditional component, i.e. "fixed effects" terms from the model. Will only have an effect for models with more than just the conditional model component. \item \code{"smooth_terms"}: returns smooth terms, only applies to GAMs (or similar models that may contain smooth terms). \item \code{"zero_inflated"} (or \code{"zi"}): returns the zero-inflation component. \item \code{"dispersion"}: returns the dispersion model component. This is common for models with zero-inflation or that can model the dispersion parameter. \item \code{"instruments"}: for instrumental-variable or some fixed effects regression, returns the instruments. \item \code{"nonlinear"}: for non-linear models (like models of class \code{nlmerMod} or \code{nls}), returns staring estimates for the nonlinear parameters. \item \code{"correlation"}: for models with correlation-component, like \code{gls}, the variables used to describe the correlation structure are returned. } \strong{Special models} Some model classes also allow rather uncommon options. These are: \itemize{ \item \strong{mhurdle}: \code{"infrequent_purchase"}, \code{"ip"}, and \code{"auxiliary"} \item \strong{BGGM}: \code{"correlation"} and \code{"intercept"} \item \strong{BFBayesFactor}, \strong{glmx}: \code{"extra"} \item \strong{averaging}:\code{"conditional"} and \code{"full"} \item \strong{mjoint}: \code{"survival"} \item \strong{mfx}: \code{"precision"}, \code{"marginal"} \item \strong{betareg}, \strong{DirichletRegModel}: \code{"precision"} \item \strong{mvord}: \code{"thresholds"} and \code{"correlation"} \item \strong{clm2}: \code{"scale"} \item \strong{selection}: \code{"selection"}, \code{"outcome"}, and \code{"auxiliary"} \item \strong{lavaan}: One or more of \code{"regression"}, \code{"correlation"}, \code{"loading"}, \code{"variance"}, \code{"defined"}, or \code{"mean"}. Can also be \code{"all"} to include all components. } For models of class \code{brmsfit} (package \strong{brms}), even more options are possible for the \code{component} argument, which are not all documented in detail here. } \examples{ \dontshow{if (require("glmmTMB") && requireNamespace("sandwich")) withAutoprint(\{ # examplesIf} data(qol_cancer) model <- lm(QoL ~ time + age + education, data = qol_cancer) # regular confidence intervals ci(model) # using heteroscedasticity-robust standard errors ci(model, vcov = "HC3") \donttest{ library(parameters) data(Salamanders, package = "glmmTMB") model <- glmmTMB::glmmTMB( count ~ spp + mined + (1 | site), ziformula = ~mined, family = poisson(), data = Salamanders ) ci(model) ci(model, component = "zi") } \dontshow{\}) # examplesIf} } parameters/man/standard_error.Rd0000644000176200001440000001003115066721002016462 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/4_standard_error.R, R/methods_base.R \name{standard_error} \alias{standard_error} \alias{standard_error.default} \alias{standard_error.factor} \title{Standard Errors} \usage{ standard_error(model, ...) \method{standard_error}{default}( model, effects = "fixed", component = "all", vcov = NULL, vcov_args = NULL, verbose = TRUE, ... ) \method{standard_error}{factor}(model, force = FALSE, verbose = TRUE, ...) } \arguments{ \item{model}{A model.} \item{...}{Arguments passed to or from other methods.} \item{effects}{Should standard errors for fixed effects (\code{"fixed"}), random effects (\code{"random"}), or both (\code{"all"}) be returned? Only applies to mixed models. May be abbreviated. When standard errors for random effects are requested, for each grouping factor a list of standard errors (per group level) for random intercepts and slopes is returned.} \item{component}{Model component for which standard errors should be shown. See the documentation for your object's class in \code{\link[=model_parameters]{model_parameters()}} or \code{\link[=p_value]{p_value()}} for further details.} \item{vcov}{Variance-covariance matrix used to compute uncertainty estimates (e.g., for robust standard errors). This argument accepts a covariance matrix, a function which returns a covariance matrix, or a string which identifies the function to be used to compute the covariance matrix. \itemize{ \item A covariance matrix \item A function which returns a covariance matrix (e.g., \code{stats::vcov()}) \item A string which indicates the kind of uncertainty estimates to return. \itemize{ \item Heteroskedasticity-consistent: \code{"HC"}, \code{"HC0"}, \code{"HC1"}, \code{"HC2"}, \code{"HC3"}, \code{"HC4"}, \code{"HC4m"}, \code{"HC5"}. See \code{?sandwich::vcovHC} \item Cluster-robust: \code{"CR"}, \code{"CR0"}, \code{"CR1"}, \code{"CR1p"}, \code{"CR1S"}, \code{"CR2"}, \code{"CR3"}. See \code{?clubSandwich::vcovCR} \item Bootstrap: \code{"BS"}, \code{"xy"}, \code{"residual"}, \code{"wild"}, \code{"mammen"}, \code{"fractional"}, \code{"jackknife"}, \code{"norm"}, \code{"webb"}. See \code{?sandwich::vcovBS} \item Other \code{sandwich} package functions: \code{"HAC"}, \code{"PC"}, \code{"CL"}, \code{"OPG"}, \code{"PL"}. } }} \item{vcov_args}{List of arguments to be passed to the function identified by the \code{vcov} argument. This function is typically supplied by the \strong{sandwich} or \strong{clubSandwich} packages. Please refer to their documentation (e.g., \code{?sandwich::vcovHAC}) to see the list of available arguments. If no estimation type (argument \code{type}) is given, the default type for \code{"HC"} equals the default from the \strong{sandwich} package; for type \code{"CR"}, the default is set to \code{"CR3"}.} \item{verbose}{Toggle warnings and messages.} \item{force}{Logical, if \code{TRUE}, factors are converted to numerical values to calculate the standard error, with the lowest level being the value \code{1} (unless the factor has numeric levels, which are converted to the corresponding numeric value). By default, \code{NA} is returned for factors or character vectors.} } \value{ A data frame with at least two columns: the parameter names and the standard errors. Depending on the model, may also include columns for model components etc. } \description{ \code{standard_error()} attempts to return standard errors of model parameters. } \note{ For Bayesian models (from \strong{rstanarm} or \strong{brms}), the standard error is the SD of the posterior samples. } \examples{ \dontshow{if (require("sandwich") && require("clubSandwich")) withAutoprint(\{ # examplesIf} model <- lm(Petal.Length ~ Sepal.Length * Species, data = iris) standard_error(model) # robust standard errors standard_error(model, vcov = "HC3") # cluster-robust standard errors standard_error(model, vcov = "vcovCL", vcov_args = list(cluster = iris$Species) ) \dontshow{\}) # examplesIf} } parameters/man/standardize_info.Rd0000644000176200001440000000442015066721002017001 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/standardize_info.R \name{standardize_info} \alias{standardize_info} \alias{standardise_info} \alias{standardize_info.default} \title{Get Standardization Information} \usage{ standardize_info(model, ...) \method{standardize_info}{default}( model, robust = FALSE, two_sd = FALSE, include_pseudo = FALSE, verbose = TRUE, ... ) } \arguments{ \item{model}{A statistical model.} \item{...}{Arguments passed to or from other methods.} \item{robust}{Logical, if \code{TRUE}, centering is done by subtracting the median from the variables and dividing it by the median absolute deviation (MAD). If \code{FALSE}, variables are standardized by subtracting the mean and dividing it by the standard deviation (SD).} \item{two_sd}{If \code{TRUE}, the variables are scaled by two times the deviation (SD or MAD depending on \code{robust}). This method can be useful to obtain model coefficients of continuous parameters comparable to coefficients related to binary predictors, when applied to \strong{the predictors} (not the outcome) (Gelman, 2008).} \item{include_pseudo}{(For (G)LMMs) Should Pseudo-standardized information be included?} \item{verbose}{Toggle warnings and messages on or off.} } \value{ A data frame with information on each parameter (see \code{\link[=parameters_type]{parameters_type()}}), and various standardization coefficients for the post-hoc methods (see \code{\link[=standardize_parameters]{standardize_parameters()}}) for the predictor and the response. } \description{ This function extracts information, such as the deviations (SD or MAD) from parent variables, that are necessary for post-hoc standardization of parameters. This function gives a window on how standardized are obtained, i.e., by what they are divided. The "basic" method of standardization uses. } \examples{ \dontshow{if (insight::check_if_installed("datawizard", quietly = TRUE)) withAutoprint(\{ # examplesIf} model <- lm(mpg ~ ., data = mtcars) standardize_info(model) standardize_info(model, robust = TRUE) standardize_info(model, two_sd = TRUE) \dontshow{\}) # examplesIf} } \seealso{ Other standardize: \code{\link{standardize_parameters}()} } \concept{standardize} parameters/man/compare_parameters.Rd0000644000176200001440000003202515066721002017331 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/compare_parameters.R \name{compare_parameters} \alias{compare_parameters} \alias{compare_models} \title{Compare model parameters of multiple models} \usage{ compare_parameters( ..., ci = 0.95, effects = "fixed", component = "conditional", standardize = NULL, exponentiate = FALSE, ci_method = "wald", p_adjust = NULL, select = NULL, column_names = NULL, pretty_names = TRUE, coefficient_names = NULL, keep = NULL, drop = NULL, include_reference = FALSE, groups = NULL, verbose = TRUE ) compare_models( ..., ci = 0.95, effects = "fixed", component = "conditional", standardize = NULL, exponentiate = FALSE, ci_method = "wald", p_adjust = NULL, select = NULL, column_names = NULL, pretty_names = TRUE, coefficient_names = NULL, keep = NULL, drop = NULL, include_reference = FALSE, groups = NULL, verbose = TRUE ) } \arguments{ \item{...}{One or more regression model objects, or objects returned by \code{model_parameters()}. Regression models may be of different model types. Model objects may be passed comma separated, or as a list. If model objects are passed with names or the list has named elements, these names will be used as column names.} \item{ci}{Confidence Interval (CI) level. Default to \code{0.95} (\verb{95\%}).} \item{effects}{Should parameters for fixed effects (\code{"fixed"}), random effects (\code{"random"}), or both fixed and random effects (\code{"all"}) be returned? By default, the variance components for random effects are returned. If group-level effects are requested, \code{"grouplevel"} returns the group-level random effects (BLUPs), while \code{"random_total"} return the overall (sum of fixed and random) effects (similar to what \code{coef()} returns). Using \code{"grouplevel"} is equivalent to setting \code{group_level = TRUE}. The \code{effects} argument only applies to mixed models. If the calculation of random effects parameters takes too long, you may use \code{effects = "fixed"}.} \item{component}{Model component for which parameters should be shown. See documentation for related model class in \code{\link[=model_parameters]{model_parameters()}}.} \item{standardize}{The method used for standardizing the parameters. Can be \code{NULL} (default; no standardization), \code{"refit"} (for re-fitting the model on standardized data) or one of \code{"basic"}, \code{"posthoc"}, \code{"smart"}, \code{"pseudo"}. See 'Details' in \code{\link[=standardize_parameters]{standardize_parameters()}}. \strong{Importantly}: \itemize{ \item The \code{"refit"} method does \emph{not} standardize categorical predictors (i.e. factors), which may be a different behaviour compared to other R packages (such as \strong{lm.beta}) or other software packages (like SPSS). to mimic such behaviours, either use \code{standardize="basic"} or standardize the data with \code{datawizard::standardize(force=TRUE)} \emph{before} fitting the model. \item By default, the response (dependent) variable is also standardized, \emph{if applicable}. Set \code{include_response = FALSE} to avoid standardization of the response variable. See details in \code{\link[datawizard:standardize.default]{datawizard::standardize.default()}}. \item For mixed models, when using methods other than \code{"refit"}, only the fixed effects will be standardized. \item Robust estimation (i.e., \code{vcov} set to a value other than \code{NULL}) of standardized parameters only works when \code{standardize="refit"}. }} \item{exponentiate}{Logical, indicating whether or not to exponentiate the coefficients (and related confidence intervals). This is typical for logistic regression, or more generally speaking, for models with log or logit links. It is also recommended to use \code{exponentiate = TRUE} for models with log-transformed response values. For models with a log-transformed response variable, when \code{exponentiate = TRUE}, a one-unit increase in the predictor is associated with multiplying the outcome by that predictor's coefficient. \strong{Note:} Delta-method standard errors are also computed (by multiplying the standard errors by the transformed coefficients). This is to mimic behaviour of other software packages, such as Stata, but these standard errors poorly estimate uncertainty for the transformed coefficient. The transformed confidence interval more clearly captures this uncertainty. For \code{compare_parameters()}, \code{exponentiate = "nongaussian"} will only exponentiate coefficients from non-Gaussian families.} \item{ci_method}{Method for computing degrees of freedom for p-values and confidence intervals (CI). See documentation for related model class in \code{\link[=model_parameters]{model_parameters()}}.} \item{p_adjust}{String value, if not \code{NULL}, indicates the method to adjust p-values. See \code{\link[stats:p.adjust]{stats::p.adjust()}} for details. Further possible adjustment methods are \code{"tukey"}, \code{"scheffe"}, \code{"sidak"}, \code{"sup-t"}, and \code{"none"} to explicitly disable adjustment for \code{emmGrid} objects (from \strong{emmeans}). \code{"sup-t"} computes simultaneous confidence bands, also called sup-t confidence band (Montiel Olea & Plagborg-Møller, 2019).} \item{select}{Determines which columns and and which layout columns are printed. There are three options for this argument: \itemize{ \item \strong{Selecting columns by name or index} \code{select} can be a character vector (or numeric index) of column names that should be printed, where columns are extracted from the data frame returned by \code{model_parameters()} and related functions. There are two pre-defined options for selecting columns: \code{select = "minimal"} prints coefficients, confidence intervals and p-values, while \code{select = "short"} prints coefficients, standard errors and p-values. \item \strong{A string expression with layout pattern} \code{select} is a string with "tokens" enclosed in braces. These tokens will be replaced by their associated columns, where the selected columns will be collapsed into one column. Following tokens are replaced by the related coefficients or statistics: \code{{estimate}}, \code{{se}}, \code{{ci}} (or \code{{ci_low}} and \code{{ci_high}}), \code{{p}} and \code{{stars}}. The token \code{{ci}} will be replaced by \verb{\{ci_low\}, \{ci_high\}}. Example: \code{select = "{estimate}{stars} ({ci})"} It is possible to create multiple columns as well. A \code{|} separates values into new cells/columns. Example: \code{select = "{estimate} ({ci})|{p}"}. If \code{format = "html"}, a \verb{
} inserts a line break inside a cell. See 'Examples'. \item \strong{A string indicating a pre-defined layout} \code{select} can be one of the following string values, to create one of the following pre-defined column layouts: \itemize{ \item \code{"ci"}: Estimates and confidence intervals, no asterisks for p-values. This is equivalent to \code{select = "{estimate} ({ci})"}. \item \code{"se"}: Estimates and standard errors, no asterisks for p-values. This is equivalent to \code{select = "{estimate} ({se})"}. \item \code{"ci_p"}: Estimates, confidence intervals and asterisks for p-values. This is equivalent to \code{select = "{estimate}{stars} ({ci})"}. \item \code{"se_p"}: Estimates, standard errors and asterisks for p-values. This is equivalent to \code{select = "{estimate}{stars} ({se})"}.. \item \code{"ci_p2"}: Estimates, confidence intervals and numeric p-values, in two columns. This is equivalent to \code{select = "{estimate} ({ci})|{p}"}. \item \code{"se_p2"}: Estimate, standard errors and numeric p-values, in two columns. This is equivalent to \code{select = "{estimate} ({se})|{p}"}. } } For \code{model_parameters()}, glue-like syntax is still experimental in the case of more complex models (like mixed models) and may not return expected results.} \item{column_names}{Character vector with strings that should be used as column headers. Must be of same length as number of models in \code{...}.} \item{pretty_names}{Can be \code{TRUE}, which will return "pretty" (i.e. more human readable) parameter names. Or \code{"labels"}, in which case value and variable labels will be used as parameters names. The latter only works for "labelled" data, i.e. if the data used to fit the model had \code{"label"} and \code{"labels"} attributes. See also section \emph{Global Options to Customize Messages when Printing}.} \item{coefficient_names}{Character vector with strings that should be used as column headers for the coefficient column. Must be of same length as number of models in \code{...}, or length 1. If length 1, this name will be used for all coefficient columns. If \code{NULL}, the name for the coefficient column will detected automatically (as in \code{model_parameters()}).} \item{keep}{Character containing a regular expression pattern that describes the parameters that should be included (for \code{keep}) or excluded (for \code{drop}) in the returned data frame. \code{keep} may also be a named list of regular expressions. All non-matching parameters will be removed from the output. If \code{keep} is a character vector, every parameter name in the \emph{"Parameter"} column that matches the regular expression in \code{keep} will be selected from the returned data frame (and vice versa, all parameter names matching \code{drop} will be excluded). Furthermore, if \code{keep} has more than one element, these will be merged with an \code{OR} operator into a regular expression pattern like this: \code{"(one|two|three)"}. If \code{keep} is a named list of regular expression patterns, the names of the list-element should equal the column name where selection should be applied. This is useful for model objects where \code{model_parameters()} returns multiple columns with parameter components, like in \code{\link[=model_parameters.lavaan]{model_parameters.lavaan()}}. Note that the regular expression pattern should match the parameter names as they are stored in the returned data frame, which can be different from how they are printed. Inspect the \verb{$Parameter} column of the parameters table to get the exact parameter names.} \item{drop}{See \code{keep}.} \item{include_reference}{Logical, if \code{TRUE}, the reference level of factors will be added to the parameters table. This is only relevant for models with categorical predictors. The coefficient for the reference level is always \code{0} (except when \code{exponentiate = TRUE}, then the coefficient will be \code{1}), so this is just for completeness.} \item{groups}{Named list, can be used to group parameters in the printed output. List elements may either be character vectors that match the name of those parameters that belong to one group, or list elements can be row numbers of those parameter rows that should belong to one group. The names of the list elements will be used as group names, which will be inserted as "header row". A possible use case might be to emphasize focal predictors and control variables, see 'Examples'. Parameters will be re-ordered according to the order used in \code{groups}, while all non-matching parameters will be added to the end.} \item{verbose}{Toggle warnings and messages.} } \value{ A data frame of indices related to the model's parameters. } \description{ Compute and extract model parameters of multiple regression models. See \code{\link[=model_parameters]{model_parameters()}} for further details. } \details{ This function is in an early stage and does not yet cope with more complex models, and probably does not yet properly render all model components. It should also be noted that when including models with interaction terms, not only do the values of the parameters change, but so does their meaning (from main effects, to simple slopes), thereby making such comparisons hard. Therefore, you should not use this function to compare models with interaction terms with models without interaction terms. } \examples{ \dontshow{if (require("gt", quietly = TRUE)) withAutoprint(\{ # examplesIf} data(iris) lm1 <- lm(Sepal.Length ~ Species, data = iris) lm2 <- lm(Sepal.Length ~ Species + Petal.Length, data = iris) compare_parameters(lm1, lm2) # custom style compare_parameters(lm1, lm2, select = "{estimate}{stars} ({se})") \donttest{ # custom style, in HTML result <- compare_parameters(lm1, lm2, select = "{estimate}
({se})|{p}") print_html(result) } data(mtcars) m1 <- lm(mpg ~ wt, data = mtcars) m2 <- glm(vs ~ wt + cyl, data = mtcars, family = "binomial") compare_parameters(m1, m2) \donttest{ # exponentiate coefficients, but not for lm compare_parameters(m1, m2, exponentiate = "nongaussian") # change column names compare_parameters("linear model" = m1, "logistic reg." = m2) compare_parameters(m1, m2, column_names = c("linear model", "logistic reg.")) # or as list compare_parameters(list(m1, m2)) compare_parameters(list("linear model" = m1, "logistic reg." = m2)) } \dontshow{\}) # examplesIf} } parameters/man/format_parameters.Rd0000644000176200001440000000443014716604200017173 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/format_parameters.R \name{format_parameters} \alias{format_parameters} \alias{format_parameters.default} \title{Parameter names formatting} \usage{ format_parameters(model, ...) \method{format_parameters}{default}(model, brackets = c("[", "]"), ...) } \arguments{ \item{model}{A statistical model.} \item{...}{Currently not used.} \item{brackets}{A character vector of length two, indicating the opening and closing brackets.} } \value{ A (names) character vector with formatted parameter names. The value names refer to the original names of the coefficients. } \description{ This functions formats the names of model parameters (coefficients) to make them more human-readable. } \section{Interpretation of Interaction Terms}{ Note that the \emph{interpretation} of interaction terms depends on many characteristics of the model. The number of parameters, and overall performance of the model, can differ \emph{or not} between \code{a * b}, \code{a : b}, and \code{a / b}, suggesting that sometimes interaction terms give different parameterizations of the same model, but other times it gives completely different models (depending on \code{a} or \code{b} being factors of covariates, included as main effects or not, etc.). Their interpretation depends of the full context of the model, which should not be inferred from the parameters table alone - rather, we recommend to use packages that calculate estimated marginal means or marginal effects, such as \CRANpkg{modelbased}, \CRANpkg{emmeans}, \CRANpkg{ggeffects}, or \CRANpkg{marginaleffects}. To raise awareness for this issue, you may use \code{print(...,show_formula=TRUE)} to add the model-specification to the output of the \code{\link[=print.parameters_model]{print()}} method for \code{model_parameters()}. } \examples{ model <- lm(Sepal.Length ~ Species * Sepal.Width, data = iris) format_parameters(model) model <- lm(Sepal.Length ~ Petal.Length + (Species / Sepal.Width), data = iris) format_parameters(model) model <- lm(Sepal.Length ~ Species + poly(Sepal.Width, 2), data = iris) format_parameters(model) model <- lm(Sepal.Length ~ Species + poly(Sepal.Width, 2, raw = TRUE), data = iris) format_parameters(model) } parameters/man/convert_efa_to_cfa.Rd0000644000176200001440000000327315066721002017271 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/convert_efa_to_cfa.R \name{convert_efa_to_cfa} \alias{convert_efa_to_cfa} \alias{convert_efa_to_cfa.fa} \alias{efa_to_cfa} \title{Conversion between EFA results and CFA structure} \usage{ convert_efa_to_cfa(model, ...) \method{convert_efa_to_cfa}{fa}( model, threshold = "max", names = NULL, max_per_dimension = NULL, ... ) efa_to_cfa(model, ...) } \arguments{ \item{model}{An EFA model (e.g., a \code{psych::fa} object).} \item{...}{Arguments passed to or from other methods.} \item{threshold}{A value between 0 and 1 indicates which (absolute) values from the loadings should be removed. An integer higher than 1 indicates the n strongest loadings to retain. Can also be \code{"max"}, in which case it will only display the maximum loading per variable (the most simple structure).} \item{names}{Vector containing dimension names.} \item{max_per_dimension}{Maximum number of variables to keep per dimension.} } \value{ Converted index. } \description{ Enables a conversion between Exploratory Factor Analysis (EFA) and Confirmatory Factor Analysis (CFA) \code{lavaan}-ready structure. } \examples{ \dontshow{if (require("psych") && require("lavaan")) withAutoprint(\{ # examplesIf} \donttest{ library(parameters) data(attitude) efa <- psych::fa(attitude, nfactors = 3) model1 <- efa_to_cfa(efa) model2 <- efa_to_cfa(efa, threshold = 0.3) model3 <- efa_to_cfa(efa, max_per_dimension = 2) suppressWarnings(anova( lavaan::cfa(model1, data = attitude), lavaan::cfa(model2, data = attitude), lavaan::cfa(model3, data = attitude) )) } \dontshow{\}) # examplesIf} } parameters/man/reshape_loadings.Rd0000644000176200001440000000247515030725674017007 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reshape_loadings.R \name{reshape_loadings} \alias{reshape_loadings} \alias{reshape_loadings.parameters_efa} \alias{reshape_loadings.data.frame} \title{Reshape loadings between wide/long formats} \usage{ reshape_loadings(x, ...) \method{reshape_loadings}{parameters_efa}(x, threshold = NULL, ...) \method{reshape_loadings}{data.frame}(x, threshold = NULL, loadings_columns = NULL, ...) } \arguments{ \item{x}{A data frame or a statistical model. For \code{closest_component()}, the output of the \code{principal_components()} function.} \item{...}{Arguments passed to or from other methods.} \item{threshold}{A value between 0 and 1 indicates which (absolute) values from the loadings should be removed. An integer higher than 1 indicates the n strongest loadings to retain. Can also be \code{"max"}, in which case it will only display the maximum loading per variable (the most simple structure).} \item{loadings_columns}{Vector indicating the columns corresponding to loadings.} } \description{ Reshape loadings between wide/long formats. } \examples{ if (require("psych")) { pca <- model_parameters(psych::fa(attitude, nfactors = 3)) loadings <- reshape_loadings(pca) loadings reshape_loadings(loadings) } } parameters/man/p_value.Rd0000644000176200001440000003455615066721002015126 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/3_p_value.R, R/methods_emmeans.R \name{p_value} \alias{p_value} \alias{p_value.default} \alias{p_value.emmGrid} \title{p-values} \usage{ p_value(model, ...) \method{p_value}{default}( model, dof = NULL, method = NULL, component = "all", vcov = NULL, vcov_args = NULL, verbose = TRUE, ... ) \method{p_value}{emmGrid}(model, ci = 0.95, adjust = "none", ...) } \arguments{ \item{model}{A statistical model.} \item{...}{Additional arguments} \item{dof}{Number of degrees of freedom to be used when calculating confidence intervals. If \code{NULL} (default), the degrees of freedom are retrieved by calling \code{\link[insight:get_df]{insight::get_df()}} with approximation method defined in \code{method}. If not \code{NULL}, use this argument to override the default degrees of freedom used to compute confidence intervals.} \item{method}{Method for computing degrees of freedom for confidence intervals (CI) and the related p-values. Allowed are following options (which vary depending on the model class): \code{"residual"}, \code{"normal"}, \code{"likelihood"}, \code{"satterthwaite"}, \code{"kenward"}, \code{"wald"}, \code{"profile"}, \code{"boot"}, \code{"uniroot"}, \code{"ml1"}, \code{"betwithin"}, \code{"hdi"}, \code{"quantile"}, \code{"ci"}, \code{"eti"}, \code{"si"}, \code{"bci"}, or \code{"bcai"}. See section \emph{Confidence intervals and approximation of degrees of freedom} in \code{\link[=model_parameters]{model_parameters()}} for further details.} \item{component}{Model component for which parameters should be shown. See the documentation for your object's class in \code{\link[=model_parameters]{model_parameters()}} or \code{\link[=p_value]{p_value()}} for further details, or see section \emph{Model components}.} \item{vcov}{Variance-covariance matrix used to compute uncertainty estimates (e.g., for robust standard errors). This argument accepts a covariance matrix, a function which returns a covariance matrix, or a string which identifies the function to be used to compute the covariance matrix. \itemize{ \item A covariance matrix \item A function which returns a covariance matrix (e.g., \code{stats::vcov()}) \item A string which indicates the kind of uncertainty estimates to return. \itemize{ \item Heteroskedasticity-consistent: \code{"HC"}, \code{"HC0"}, \code{"HC1"}, \code{"HC2"}, \code{"HC3"}, \code{"HC4"}, \code{"HC4m"}, \code{"HC5"}. See \code{?sandwich::vcovHC} \item Cluster-robust: \code{"CR"}, \code{"CR0"}, \code{"CR1"}, \code{"CR1p"}, \code{"CR1S"}, \code{"CR2"}, \code{"CR3"}. See \code{?clubSandwich::vcovCR} \item Bootstrap: \code{"BS"}, \code{"xy"}, \code{"residual"}, \code{"wild"}, \code{"mammen"}, \code{"fractional"}, \code{"jackknife"}, \code{"norm"}, \code{"webb"}. See \code{?sandwich::vcovBS} \item Other \code{sandwich} package functions: \code{"HAC"}, \code{"PC"}, \code{"CL"}, \code{"OPG"}, \code{"PL"}. } }} \item{vcov_args}{List of arguments to be passed to the function identified by the \code{vcov} argument. This function is typically supplied by the \strong{sandwich} or \strong{clubSandwich} packages. Please refer to their documentation (e.g., \code{?sandwich::vcovHAC}) to see the list of available arguments. If no estimation type (argument \code{type}) is given, the default type for \code{"HC"} equals the default from the \strong{sandwich} package; for type \code{"CR"}, the default is set to \code{"CR3"}.} \item{verbose}{Toggle warnings and messages.} \item{ci}{Confidence Interval (CI) level. Default to \code{0.95} (\verb{95\%}).} \item{adjust}{Character value naming the method used to adjust p-values or confidence intervals. See \code{?emmeans::summary.emmGrid} for details.} } \value{ A data frame with at least two columns: the parameter names and the p-values. Depending on the model, may also include columns for model components etc. } \description{ This function attempts to return, or compute, p-values of a model's parameters. } \details{ For Bayesian models, the p-values corresponds to the \emph{probability of direction} (\code{\link[bayestestR:p_direction]{bayestestR::p_direction()}}), which is converted to a p-value using \code{bayestestR::convert_pd_to_p()}. } \section{Confidence intervals and approximation of degrees of freedom}{ There are different ways of approximating the degrees of freedom depending on different assumptions about the nature of the model and its sampling distribution. The \code{ci_method} argument modulates the method for computing degrees of freedom (df) that are used to calculate confidence intervals (CI) and the related p-values. Following options are allowed, depending on the model class: \strong{Classical methods:} Classical inference is generally based on the \strong{Wald method}. The Wald approach to inference computes a test statistic by dividing the parameter estimate by its standard error (Coefficient / SE), then comparing this statistic against a t- or normal distribution. This approach can be used to compute CIs and p-values. \code{"wald"}: \itemize{ \item Applies to \emph{non-Bayesian models}. For \emph{linear models}, CIs computed using the Wald method (SE and a \emph{t-distribution with residual df}); p-values computed using the Wald method with a \emph{t-distribution with residual df}. For other models, CIs computed using the Wald method (SE and a \emph{normal distribution}); p-values computed using the Wald method with a \emph{normal distribution}. } \code{"normal"} \itemize{ \item Applies to \emph{non-Bayesian models}. Compute Wald CIs and p-values, but always use a normal distribution. } \code{"residual"} \itemize{ \item Applies to \emph{non-Bayesian models}. Compute Wald CIs and p-values, but always use a \emph{t-distribution with residual df} when possible. If the residual df for a model cannot be determined, a normal distribution is used instead. } \strong{Methods for mixed models:} Compared to fixed effects (or single-level) models, determining appropriate df for Wald-based inference in mixed models is more difficult. See \href{https://bbolker.github.io/mixedmodels-misc/glmmFAQ.html#what-are-the-p-values-listed-by-summaryglmerfit-etc.-are-they-reliable}{the R GLMM FAQ} for a discussion. Several approximate methods for computing df are available, but you should also consider instead using profile likelihood (\code{"profile"}) or bootstrap ("\verb{boot"}) CIs and p-values instead. \code{"satterthwaite"} \itemize{ \item Applies to \emph{linear mixed models}. CIs computed using the Wald method (SE and a \emph{t-distribution with Satterthwaite df}); p-values computed using the Wald method with a \emph{t-distribution with Satterthwaite df}. } \code{"kenward"} \itemize{ \item Applies to \emph{linear mixed models}. CIs computed using the Wald method (\emph{Kenward-Roger SE} and a \emph{t-distribution with Kenward-Roger df}); p-values computed using the Wald method with \emph{Kenward-Roger SE and t-distribution with Kenward-Roger df}. } \code{"ml1"} \itemize{ \item Applies to \emph{linear mixed models}. CIs computed using the Wald method (SE and a \emph{t-distribution with m-l-1 approximated df}); p-values computed using the Wald method with a \emph{t-distribution with m-l-1 approximated df}. See \code{\link[=ci_ml1]{ci_ml1()}}. } \code{"betwithin"} \itemize{ \item Applies to \emph{linear mixed models} and \emph{generalized linear mixed models}. CIs computed using the Wald method (SE and a \emph{t-distribution with between-within df}); p-values computed using the Wald method with a \emph{t-distribution with between-within df}. See \code{\link[=ci_betwithin]{ci_betwithin()}}. } \strong{Likelihood-based methods:} Likelihood-based inference is based on comparing the likelihood for the maximum-likelihood estimate to the the likelihood for models with one or more parameter values changed (e.g., set to zero or a range of alternative values). Likelihood ratios for the maximum-likelihood and alternative models are compared to a \eqn{\chi}-squared distribution to compute CIs and p-values. \code{"profile"} \itemize{ \item Applies to \emph{non-Bayesian models} of class \code{glm}, \code{polr}, \code{merMod} or \code{glmmTMB}. CIs computed by \emph{profiling the likelihood curve for a parameter}, using linear interpolation to find where likelihood ratio equals a critical value; p-values computed using the Wald method with a \emph{normal-distribution} (note: this might change in a future update!) } \code{"uniroot"} \itemize{ \item Applies to \emph{non-Bayesian models} of class \code{glmmTMB}. CIs computed by \emph{profiling the likelihood curve for a parameter}, using root finding to find where likelihood ratio equals a critical value; p-values computed using the Wald method with a \emph{normal-distribution} (note: this might change in a future update!) } \strong{Methods for bootstrapped or Bayesian models:} Bootstrap-based inference is based on \strong{resampling} and refitting the model to the resampled datasets. The distribution of parameter estimates across resampled datasets is used to approximate the parameter's sampling distribution. Depending on the type of model, several different methods for bootstrapping and constructing CIs and p-values from the bootstrap distribution are available. For Bayesian models, inference is based on drawing samples from the model posterior distribution. \code{"quantile"} (or \code{"eti"}) \itemize{ \item Applies to \emph{all models (including Bayesian models)}. For non-Bayesian models, only applies if \code{bootstrap = TRUE}. CIs computed as \emph{equal tailed intervals} using the quantiles of the bootstrap or posterior samples; p-values are based on the \emph{probability of direction}. See \code{\link[bayestestR:eti]{bayestestR::eti()}}. } \code{"hdi"} \itemize{ \item Applies to \emph{all models (including Bayesian models)}. For non-Bayesian models, only applies if \code{bootstrap = TRUE}. CIs computed as \emph{highest density intervals} for the bootstrap or posterior samples; p-values are based on the \emph{probability of direction}. See \code{\link[bayestestR:hdi]{bayestestR::hdi()}}. } \code{"bci"} (or \code{"bcai"}) \itemize{ \item Applies to \emph{all models (including Bayesian models)}. For non-Bayesian models, only applies if \code{bootstrap = TRUE}. CIs computed as \emph{bias corrected and accelerated intervals} for the bootstrap or posterior samples; p-values are based on the \emph{probability of direction}. See \code{\link[bayestestR:bci]{bayestestR::bci()}}. } \code{"si"} \itemize{ \item Applies to \emph{Bayesian models} with proper priors. CIs computed as \emph{support intervals} comparing the posterior samples against the prior samples; p-values are based on the \emph{probability of direction}. See \code{\link[bayestestR:si]{bayestestR::si()}}. } \code{"boot"} \itemize{ \item Applies to \emph{non-Bayesian models} of class \code{merMod}. CIs computed using \emph{parametric bootstrapping} (simulating data from the fitted model); p-values computed using the Wald method with a \emph{normal-distribution)} (note: this might change in a future update!). } For all iteration-based methods other than \code{"boot"} (\code{"hdi"}, \code{"quantile"}, \code{"ci"}, \code{"eti"}, \code{"si"}, \code{"bci"}, \code{"bcai"}), p-values are based on the probability of direction (\code{\link[bayestestR:p_direction]{bayestestR::p_direction()}}), which is converted into a p-value using \code{\link[bayestestR:pd_to_p]{bayestestR::pd_to_p()}}. } \section{Model components}{ Possible values for the \code{component} argument depend on the model class. Following are valid options: \itemize{ \item \code{"all"}: returns all model components, applies to all models, but will only have an effect for models with more than just the conditional model component. \item \code{"conditional"}: only returns the conditional component, i.e. "fixed effects" terms from the model. Will only have an effect for models with more than just the conditional model component. \item \code{"smooth_terms"}: returns smooth terms, only applies to GAMs (or similar models that may contain smooth terms). \item \code{"zero_inflated"} (or \code{"zi"}): returns the zero-inflation component. \item \code{"dispersion"}: returns the dispersion model component. This is common for models with zero-inflation or that can model the dispersion parameter. \item \code{"instruments"}: for instrumental-variable or some fixed effects regression, returns the instruments. \item \code{"nonlinear"}: for non-linear models (like models of class \code{nlmerMod} or \code{nls}), returns staring estimates for the nonlinear parameters. \item \code{"correlation"}: for models with correlation-component, like \code{gls}, the variables used to describe the correlation structure are returned. } \strong{Special models} Some model classes also allow rather uncommon options. These are: \itemize{ \item \strong{mhurdle}: \code{"infrequent_purchase"}, \code{"ip"}, and \code{"auxiliary"} \item \strong{BGGM}: \code{"correlation"} and \code{"intercept"} \item \strong{BFBayesFactor}, \strong{glmx}: \code{"extra"} \item \strong{averaging}:\code{"conditional"} and \code{"full"} \item \strong{mjoint}: \code{"survival"} \item \strong{mfx}: \code{"precision"}, \code{"marginal"} \item \strong{betareg}, \strong{DirichletRegModel}: \code{"precision"} \item \strong{mvord}: \code{"thresholds"} and \code{"correlation"} \item \strong{clm2}: \code{"scale"} \item \strong{selection}: \code{"selection"}, \code{"outcome"}, and \code{"auxiliary"} \item \strong{lavaan}: One or more of \code{"regression"}, \code{"correlation"}, \code{"loading"}, \code{"variance"}, \code{"defined"}, or \code{"mean"}. Can also be \code{"all"} to include all components. } For models of class \code{brmsfit} (package \strong{brms}), even more options are possible for the \code{component} argument, which are not all documented in detail here. } \examples{ \dontshow{if (require("pscl", quietly = TRUE)) withAutoprint(\{ # examplesIf} data(iris) model <- lm(Petal.Length ~ Sepal.Length + Species, data = iris) p_value(model) data("bioChemists", package = "pscl") model <- pscl::zeroinfl( art ~ fem + mar + kid5 | kid5 + phd, data = bioChemists ) p_value(model) p_value(model, component = "zi") \dontshow{\}) # examplesIf} } parameters/man/standardize_parameters.Rd0000644000176200001440000002654715066721002020227 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/standardize_parameters.R, % R/standardize_posteriors.R \name{standardize_parameters} \alias{standardize_parameters} \alias{standardise_parameters} \alias{standardize_posteriors} \alias{standardise_posteriors} \title{Parameters standardization} \usage{ standardize_parameters( model, method = "refit", ci = 0.95, robust = FALSE, two_sd = FALSE, include_response = TRUE, verbose = TRUE, ... ) standardize_posteriors( model, method = "refit", robust = FALSE, two_sd = FALSE, include_response = TRUE, verbose = TRUE, ... ) } \arguments{ \item{model}{A statistical model.} \item{method}{The method used for standardizing the parameters. Can be \code{"refit"} (default), \code{"posthoc"}, \code{"smart"}, \code{"basic"}, \code{"pseudo"} or \code{"sdy"}. See Details'.} \item{ci}{Confidence Interval (CI) level} \item{robust}{Logical, if \code{TRUE}, centering is done by subtracting the median from the variables and dividing it by the median absolute deviation (MAD). If \code{FALSE}, variables are standardized by subtracting the mean and dividing it by the standard deviation (SD).} \item{two_sd}{If \code{TRUE}, the variables are scaled by two times the deviation (SD or MAD depending on \code{robust}). This method can be useful to obtain model coefficients of continuous parameters comparable to coefficients related to binary predictors, when applied to \strong{the predictors} (not the outcome) (Gelman, 2008).} \item{include_response}{If \code{TRUE} (default), the response value will also be standardized. If \code{FALSE}, only the predictors will be standardized. For GLMs the response value will never be standardized (see \emph{Generalized Linear Models} section).} \item{verbose}{Toggle warnings and messages on or off.} \item{...}{For \code{standardize_parameters()}, arguments passed to \code{\link[=model_parameters]{model_parameters()}}, such as: \itemize{ \item \code{ci_method}, \code{centrality} for Mixed models and Bayesian models... \item \code{exponentiate}, ... \item etc. }} } \value{ A data frame with the standardized parameters (\verb{Std_*}, depending on the model type) and their CIs (\code{CI_low} and \code{CI_high}). Where applicable, standard errors (SEs) are returned as an attribute (\code{attr(x, "standard_error")}). } \description{ Compute standardized model parameters (coefficients). } \details{ \subsection{Standardization Methods}{ \itemize{ \item \strong{refit}: This method is based on a complete model re-fit with a standardized version of the data. Hence, this method is equal to standardizing the variables before fitting the model. It is the "purest" and the most accurate (Neter et al., 1989), but it is also the most computationally costly and long (especially for heavy models such as Bayesian models). This method is particularly recommended for complex models that include interactions or transformations (e.g., polynomial or spline terms). The \code{robust} (default to \code{FALSE}) argument enables a robust standardization of data, i.e., based on the \code{median} and \code{MAD} instead of the \code{mean} and \code{SD}. \strong{See \code{\link[datawizard:standardize]{datawizard::standardize()}} for more details.} \itemize{ \item \strong{Note} that \code{standardize_parameters(method = "refit")} may not return the same results as fitting a model on data that has been standardized with \code{standardize()}; \code{standardize_parameters()} used the data used by the model fitting function, which might not be same data if there are missing values. see the \code{remove_na} argument in \code{standardize()}. } \item \strong{posthoc}: Post-hoc standardization of the parameters, aiming at emulating the results obtained by "refit" without refitting the model. The coefficients are divided by the standard deviation (or MAD if \code{robust}) of the outcome (which becomes their expression 'unit'). Then, the coefficients related to numeric variables are additionally multiplied by the standard deviation (or MAD if \code{robust}) of the related terms, so that they correspond to changes of 1 SD of the predictor (e.g., "A change in 1 SD of \code{x} is related to a change of 0.24 of the SD of \code{y}). This does not apply to binary variables or factors, so the coefficients are still related to changes in levels. This method is not accurate and tend to give aberrant results when interactions are specified. \item \strong{basic}: This method is similar to \code{method = "posthoc"}, but treats all variables as continuous: it also scales the coefficient by the standard deviation of model's matrix' parameter of factors levels (transformed to integers) or binary predictors. Although being inappropriate for these cases, this method is the one implemented by default in other software packages, such as \code{\link[lm.beta:lm.beta]{lm.beta::lm.beta()}}. \item \strong{smart} (Standardization of Model's parameters with Adjustment, Reconnaissance and Transformation - \emph{experimental}): Similar to \code{method = "posthoc"} in that it does not involve model refitting. The difference is that the SD (or MAD if \code{robust}) of the response is computed on the relevant section of the data. For instance, if a factor with 3 levels A (the intercept), B and C is entered as a predictor, the effect corresponding to B vs. A will be scaled by the variance of the response at the intercept only. As a results, the coefficients for effects of factors are similar to a Glass' delta. \item \strong{pseudo} (\emph{for 2-level (G)LMMs only}): In this (post-hoc) method, the response and the predictor are standardized based on the level of prediction (levels are detected with \code{\link[performance:check_group_variation]{performance::check_group_variation()}}): Predictors are standardized based on their SD at level of prediction (see also \code{\link[datawizard:demean]{datawizard::demean()}}); The outcome (in linear LMMs) is standardized based on a fitted random-intercept-model, where \code{sqrt(random-intercept-variance)} is used for level 2 predictors, and \code{sqrt(residual-variance)} is used for level 1 predictors (Hoffman 2015, page 342). A warning is given when a within-group variable is found to have access between-group variance. \item \strong{sdy} (\emph{for logistic regression models only}): This y-standardization is useful when comparing coefficients of logistic regression models across models for the same sample. Unobserved heterogeneity varies across models with different independent variables, and thus, odds ratios from the same predictor of different models cannot be compared directly. The y-standardization makes coefficients "comparable across models by dividing them with the estimated standard deviation of the latent variable for each model" (Mood 2010). Thus, whenever one has multiple logistic regression models that are fit to the same data and share certain predictors (e.g. nested models), it can be useful to use this standardization approach to make log-odds or odds ratios comparable. } } \subsection{Transformed Variables}{ When the model's formula contains transformations (e.g. \code{y ~ exp(X)}) \code{method = "refit"} will give different results compared to \code{method = "basic"} (\code{"posthoc"} and \code{"smart"} do not support such transformations): While \code{"refit"} standardizes the data \emph{prior} to the transformation (e.g. equivalent to \code{exp(scale(X))}), the \code{"basic"} method standardizes the transformed data (e.g. equivalent to \code{scale(exp(X))}). \cr\cr See the \emph{Transformed Variables} section in \code{\link[datawizard:standardize.default]{datawizard::standardize.default()}} for more details on how different transformations are dealt with when \code{method = "refit"}. } \subsection{Confidence Intervals}{ The returned confidence intervals are re-scaled versions of the unstandardized confidence intervals, and not "true" confidence intervals of the standardized coefficients (cf. Jones & Waller, 2015). } \subsection{Generalized Linear Models}{ Standardization for generalized linear models (GLM, GLMM, etc) is done only with respect to the predictors (while the outcome remains as-is, unstandardized) - maintaining the interpretability of the coefficients (e.g., in a binomial model: the exponent of the standardized parameter is the OR of a change of 1 SD in the predictor, etc.) } \subsection{Dealing with Factors}{ \code{standardize(model)} or \code{standardize_parameters(model, method = "refit")} do \emph{not} standardize categorical predictors (i.e. factors) / their dummy-variables, which may be a different behaviour compared to other R packages (such as \pkg{lm.beta}) or other software packages (like SPSS). To mimic such behaviours, either use \code{standardize_parameters(model, method = "basic")} to obtain post-hoc standardized parameters, or standardize the data with \code{datawizard::standardize(data, force = TRUE)} \emph{before} fitting the model. } } \examples{ model <- lm(len ~ supp * dose, data = ToothGrowth) standardize_parameters(model, method = "refit") \donttest{ standardize_parameters(model, method = "posthoc") standardize_parameters(model, method = "smart") standardize_parameters(model, method = "basic") # Robust and 2 SD standardize_parameters(model, robust = TRUE) standardize_parameters(model, two_sd = TRUE) model <- glm(am ~ cyl * mpg, data = mtcars, family = "binomial") standardize_parameters(model, method = "refit") standardize_parameters(model, method = "posthoc") standardize_parameters(model, method = "basic", exponentiate = TRUE) } \dontshow{if (require("lme4", quietly = TRUE)) withAutoprint(\{ # examplesIf} \donttest{ m <- lme4::lmer(mpg ~ cyl + am + vs + (1 | cyl), mtcars) standardize_parameters(m, method = "pseudo", ci_method = "satterthwaite") } \dontshow{\}) # examplesIf} \dontshow{if (require("rstanarm", quietly = TRUE)) withAutoprint(\{ # examplesIf} \donttest{ model <- rstanarm::stan_glm(rating ~ critical + privileges, data = attitude, refresh = 0) standardize_posteriors(model, method = "refit", verbose = FALSE) standardize_posteriors(model, method = "posthoc", verbose = FALSE) standardize_posteriors(model, method = "smart", verbose = FALSE) head(standardize_posteriors(model, method = "basic", verbose = FALSE)) } \dontshow{\}) # examplesIf} } \references{ \itemize{ \item Hoffman, L. (2015). Longitudinal analysis: Modeling within-person fluctuation and change. Routledge. \item Jones, J. A., & Waller, N. G. (2015). The normal-theory and asymptotic distribution-free (ADF) covariance matrix of standardized regression coefficients: theoretical extensions and finite sample behavior. Psychometrika, 80(2), 365-378. \item Neter, J., Wasserman, W., & Kutner, M. H. (1989). Applied linear regression models. \item Gelman, A. (2008). Scaling regression inputs by dividing by two standard deviations. Statistics in medicine, 27(15), 2865-2873. \item Mood C. Logistic Regression: Why We Cannot Do What We Think We Can Do, and What We Can Do About It. European Sociological Review (2010) 26:67–82. } } \seealso{ See also \href{https://easystats.github.io/parameters/articles/standardize_parameters_effsize.html}{package vignette}. Other standardize: \code{\link{standardize_info}()} } \concept{effect size indices} \concept{standardize} parameters/man/sort_parameters.Rd0000644000176200001440000000232414227755134016704 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/sort_parameters.R \name{sort_parameters} \alias{sort_parameters} \alias{sort_parameters.default} \title{Sort parameters by coefficient values} \usage{ sort_parameters(x, ...) \method{sort_parameters}{default}(x, sort = "none", column = "Coefficient", ...) } \arguments{ \item{x}{A data frame or a \code{parameters_model} object.} \item{...}{Arguments passed to or from other methods.} \item{sort}{If \code{"none"} (default) do not sort, \code{"ascending"} sort by increasing coefficient value, or \code{"descending"} sort by decreasing coefficient value.} \item{column}{The column containing model parameter estimates. This will be \code{"Coefficient"} (default) in \emph{easystats} packages, \code{"estimate"} in \emph{broom} package, etc.} } \value{ A sorted data frame or original object. } \description{ Sort parameters by coefficient values } \examples{ # creating object to sort (can also be a regular data frame) mod <- model_parameters(stats::lm(wt ~ am * cyl, data = mtcars)) # original output mod # sorted outputs sort_parameters(mod, sort = "ascending") sort_parameters(mod, sort = "descending") } parameters/man/dot-factor_to_dummy.Rd0000644000176200001440000000050613641634603017445 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utils.R \name{.factor_to_dummy} \alias{.factor_to_dummy} \title{Safe transformation from factor/character to numeric} \usage{ .factor_to_dummy(x) } \description{ Safe transformation from factor/character to numeric } \keyword{internal} parameters/man/pool_parameters.Rd0000644000176200001440000001337215066721002016660 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/pool_parameters.R \name{pool_parameters} \alias{pool_parameters} \title{Pool Model Parameters} \usage{ pool_parameters( x, exponentiate = FALSE, effects = "fixed", component = "all", verbose = TRUE, ... ) } \arguments{ \item{x}{A list of \code{parameters_model} objects, as returned by \code{\link[=model_parameters]{model_parameters()}}, or a list of model-objects that is supported by \code{model_parameters()}.} \item{exponentiate}{Logical, indicating whether or not to exponentiate the coefficients (and related confidence intervals). This is typical for logistic regression, or more generally speaking, for models with log or logit links. It is also recommended to use \code{exponentiate = TRUE} for models with log-transformed response values. For models with a log-transformed response variable, when \code{exponentiate = TRUE}, a one-unit increase in the predictor is associated with multiplying the outcome by that predictor's coefficient. \strong{Note:} Delta-method standard errors are also computed (by multiplying the standard errors by the transformed coefficients). This is to mimic behaviour of other software packages, such as Stata, but these standard errors poorly estimate uncertainty for the transformed coefficient. The transformed confidence interval more clearly captures this uncertainty. For \code{compare_parameters()}, \code{exponentiate = "nongaussian"} will only exponentiate coefficients from non-Gaussian families.} \item{effects}{Should parameters for fixed effects (\code{"fixed"}), random effects (\code{"random"}), or both fixed and random effects (\code{"all"}) be returned? By default, the variance components for random effects are returned. If group-level effects are requested, \code{"grouplevel"} returns the group-level random effects (BLUPs), while \code{"random_total"} return the overall (sum of fixed and random) effects (similar to what \code{coef()} returns). Using \code{"grouplevel"} is equivalent to setting \code{group_level = TRUE}. The \code{effects} argument only applies to mixed models. If the calculation of random effects parameters takes too long, you may use \code{effects = "fixed"}.} \item{component}{Which type of parameters to return, such as parameters for the conditional model, the zero-inflation part of the model, the dispersion term, or other auxiliary parameters be returned? Applies to models with zero-inflation and/or dispersion formula, or if parameters such as \code{sigma} should be included. May be abbreviated. Note that the \emph{conditional} component is also called \emph{count} or \emph{mean} component, depending on the model. There are three convenient shortcuts: \code{component = "all"} returns all possible parameters. If \code{component = "location"}, location parameters such as \code{conditional}, \code{zero_inflated}, or \code{smooth_terms}, are returned (everything that are fixed or random effects - depending on the \code{effects} argument - but no auxiliary parameters). For \code{component = "distributional"} (or \code{"auxiliary"}), components like \code{sigma}, \code{dispersion}, or \code{beta} (and other auxiliary parameters) are returned.} \item{verbose}{Toggle warnings and messages.} \item{...}{Arguments passed down to \code{model_parameters()}, if \code{x} is a list of model-objects. Can be used, for instance, to specify arguments like \code{ci} or \code{ci_method} etc.} } \value{ A data frame of indices related to the model's parameters. } \description{ This function "pools" (i.e. combines) model parameters in a similar fashion as \code{mice::pool()}. However, this function pools parameters from \code{parameters_model} objects, as returned by \code{\link[=model_parameters]{model_parameters()}}. } \details{ Averaging of parameters follows Rubin's rules (\emph{Rubin, 1987, p. 76}). The pooled degrees of freedom is based on the Barnard-Rubin adjustment for small samples (\emph{Barnard and Rubin, 1999}). } \note{ Models with multiple components, (for instance, models with zero-inflation, where predictors appear in the count and zero-inflation part, or models with dispersion component) may fail in rare situations. In this case, compute the pooled parameters for components separately, using the \code{component} argument. Some model objects do not return standard errors (e.g. objects of class \code{htest}). For these models, no pooled confidence intervals nor p-values are returned. } \examples{ \dontshow{if (require("mice") && require("datawizard")) withAutoprint(\{ # examplesIf} # example for multiple imputed datasets data("nhanes2", package = "mice") imp <- mice::mice(nhanes2, printFlag = FALSE) models <- lapply(1:5, function(i) { lm(bmi ~ age + hyp + chl, data = mice::complete(imp, action = i)) }) pool_parameters(models) # should be identical to: m <- with(data = imp, exp = lm(bmi ~ age + hyp + chl)) summary(mice::pool(m)) # For glm, mice used residual df, while `pool_parameters()` uses `Inf` nhanes2$hyp <- datawizard::slide(as.numeric(nhanes2$hyp)) imp <- mice::mice(nhanes2, printFlag = FALSE) models <- lapply(1:5, function(i) { glm(hyp ~ age + chl, family = binomial, data = mice::complete(imp, action = i)) }) m <- with(data = imp, exp = glm(hyp ~ age + chl, family = binomial)) # residual df summary(mice::pool(m))$df # df = Inf pool_parameters(models)$df_error # use residual df instead pool_parameters(models, ci_method = "residual")$df_error \dontshow{\}) # examplesIf} } \references{ Barnard, J. and Rubin, D.B. (1999). Small sample degrees of freedom with multiple imputation. Biometrika, 86, 948-955. Rubin, D.B. (1987). Multiple Imputation for Nonresponse in Surveys. New York: John Wiley and Sons. } parameters/man/bootstrap_parameters.Rd0000644000176200001440000001075315066721002017724 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/bootstrap_parameters.R \name{bootstrap_parameters} \alias{bootstrap_parameters} \alias{bootstrap_parameters.default} \title{Parameters bootstrapping} \usage{ bootstrap_parameters(model, ...) \method{bootstrap_parameters}{default}( model, iterations = 1000, centrality = "median", ci = 0.95, ci_method = "quantile", test = "p-value", ... ) } \arguments{ \item{model}{Statistical model.} \item{...}{Arguments passed to other methods, like \code{\link[=bootstrap_model]{bootstrap_model()}} or \code{\link[bayestestR:describe_posterior]{bayestestR::describe_posterior()}}.} \item{iterations}{The number of draws to simulate/bootstrap.} \item{centrality}{The point-estimates (centrality indices) to compute. Character (vector) or list with one or more of these options: \code{"median"}, \code{"mean"}, \code{"MAP"} (see \code{\link[bayestestR:map_estimate]{map_estimate()}}), \code{"trimmed"} (which is just \code{mean(x, trim = threshold)}), \code{"mode"} or \code{"all"}.} \item{ci}{Value or vector of probability of the CI (between 0 and 1) to be estimated. Default to \code{0.95} (\verb{95\%}).} \item{ci_method}{The type of index used for Credible Interval. Can be \code{"ETI"} (default, see \code{\link[bayestestR:eti]{eti()}}), \code{"HDI"} (see \code{\link[bayestestR:hdi]{hdi()}}), \code{"BCI"} (see \code{\link[bayestestR:bci]{bci()}}), \code{"SPI"} (see \code{\link[bayestestR:spi]{spi()}}), or \code{"SI"} (see \code{\link[bayestestR:si]{si()}}).} \item{test}{The indices to compute. Character (vector) with one or more of these options: \code{"p-value"} (or \code{"p"}), \code{"p_direction"} (or \code{"pd"}), \code{"rope"}, \code{"p_map"}, \code{"equivalence_test"} (or \code{"equitest"}), \code{"bayesfactor"} (or \code{"bf"}) or \code{"all"} to compute all tests. For each "test", the corresponding \strong{bayestestR} function is called (e.g. \code{\link[bayestestR:rope]{bayestestR::rope()}} or \code{\link[bayestestR:p_direction]{bayestestR::p_direction()}}) and its results included in the summary output.} } \value{ A data frame summarizing the bootstrapped parameters. } \description{ Compute bootstrapped parameters and their related indices such as Confidence Intervals (CI) and p-values. } \details{ This function first calls \code{\link[=bootstrap_model]{bootstrap_model()}} to generate bootstrapped coefficients. The resulting replicated for each coefficient are treated as "distribution", and is passed to \code{\link[bayestestR:describe_posterior]{bayestestR::describe_posterior()}} to calculate the related indices defined in the \code{"test"} argument. Note that that p-values returned here are estimated under the assumption of \emph{translation equivariance}: that shape of the sampling distribution is unaffected by the null being true or not. If this assumption does not hold, p-values can be biased, and it is suggested to use proper permutation tests to obtain non-parametric p-values. } \section{Using with \strong{emmeans}}{ The output can be passed directly to the various functions from the \strong{emmeans} package, to obtain bootstrapped estimates, contrasts, simple slopes, etc. and their confidence intervals. These can then be passed to \code{model_parameter()} to obtain standard errors, p-values, etc. (see example). Note that that p-values returned here are estimated under the assumption of \emph{translation equivariance}: that shape of the sampling distribution is unaffected by the null being true or not. If this assumption does not hold, p-values can be biased, and it is suggested to use proper permutation tests to obtain non-parametric p-values. } \examples{ \dontshow{if (require("boot", quietly = TRUE) && require("emmeans", quietly = TRUE)) withAutoprint(\{ # examplesIf} \donttest{ set.seed(2) model <- lm(Sepal.Length ~ Species * Petal.Width, data = iris) b <- bootstrap_parameters(model) print(b) # different type of bootstrapping set.seed(2) b <- bootstrap_parameters(model, type = "balanced") print(b) est <- emmeans::emmeans(b, trt.vs.ctrl ~ Species) print(model_parameters(est)) } \dontshow{\}) # examplesIf} } \references{ Davison, A. C., & Hinkley, D. V. (1997). Bootstrap methods and their application (Vol. 1). Cambridge university press. } \seealso{ \code{\link[=bootstrap_model]{bootstrap_model()}}, \code{\link[=simulate_parameters]{simulate_parameters()}}, \code{\link[=simulate_model]{simulate_model()}} } parameters/man/n_clusters.Rd0000644000176200001440000001702115066721002015640 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/n_clusters.R, R/n_clusters_easystats.R \name{n_clusters} \alias{n_clusters} \alias{n_clusters_elbow} \alias{n_clusters_gap} \alias{n_clusters_silhouette} \alias{n_clusters_dbscan} \alias{n_clusters_hclust} \title{Find number of clusters in your data} \usage{ n_clusters( x, standardize = TRUE, include_factors = FALSE, package = c("easystats", "NbClust", "mclust"), fast = TRUE, nbclust_method = "kmeans", n_max = 10, ... ) n_clusters_elbow( x, standardize = TRUE, include_factors = FALSE, clustering_function = stats::kmeans, n_max = 10, ... ) n_clusters_gap( x, standardize = TRUE, include_factors = FALSE, clustering_function = stats::kmeans, n_max = 10, gap_method = "firstSEmax", ... ) n_clusters_silhouette( x, standardize = TRUE, include_factors = FALSE, clustering_function = stats::kmeans, n_max = 10, ... ) n_clusters_dbscan( x, standardize = TRUE, include_factors = FALSE, method = c("kNN", "SS"), min_size = 0.1, eps_n = 50, eps_range = c(0.1, 3), ... ) n_clusters_hclust( x, standardize = TRUE, include_factors = FALSE, distance_method = "correlation", hclust_method = "average", ci = 0.95, iterations = 100, ... ) } \arguments{ \item{x}{A data frame.} \item{standardize}{Standardize the dataframe before clustering (default).} \item{include_factors}{Logical, if \code{TRUE}, factors are converted to numerical values in order to be included in the data for determining the number of clusters. By default, factors are removed, because most methods that determine the number of clusters need numeric input only.} \item{package}{Package from which methods are to be called to determine the number of clusters. Can be \code{"all"} or a vector containing \code{"easystats"}, \code{"NbClust"}, \code{"mclust"}, and \code{"M3C"}.} \item{fast}{If \code{FALSE}, will compute 4 more indices (sets \code{index = "allong"} in \code{NbClust}). This has been deactivated by default as it is computationally heavy.} \item{nbclust_method}{The clustering method (passed to \code{NbClust::NbClust()} as \code{method}).} \item{n_max}{Maximal number of clusters to test.} \item{...}{Arguments passed to or from other methods. For instance, when \code{bootstrap = TRUE}, arguments like \code{type} or \code{parallel} are passed down to \code{bootstrap_model()}. Further non-documented arguments are: \itemize{ \item \code{digits}, \code{p_digits}, \code{ci_digits} and \code{footer_digits} to set the number of digits for the output. \code{groups} can be used to group coefficients. These arguments will be passed to the print-method, or can directly be used in \code{print()}, see documentation in \code{\link[=print.parameters_model]{print.parameters_model()}}. \item If \code{s_value = TRUE}, the p-value will be replaced by the S-value in the output (cf. \emph{Rafi and Greenland 2020}). \item \code{pd} adds an additional column with the \emph{probability of direction} (see \code{\link[bayestestR:p_direction]{bayestestR::p_direction()}} for details). Furthermore, see 'Examples' for this function. \item For developers, whose interest mainly is to get a "tidy" data frame of model summaries, it is recommended to set \code{pretty_names = FALSE} to speed up computation of the summary table. }} \item{clustering_function, gap_method}{Other arguments passed to other functions. \code{clustering_function} is used by \code{fviz_nbclust()} and can be \code{kmeans}, \code{cluster::pam}, \code{cluster::clara}, \code{cluster::fanny}, and more. \code{gap_method} is used by \code{cluster::maxSE} to extract the optimal numbers of clusters (see its \code{method} argument).} \item{method, min_size, eps_n, eps_range}{Arguments for DBSCAN algorithm.} \item{distance_method}{The distance method (passed to \code{\link[=dist]{dist()}}). Used by algorithms relying on the distance matrix, such as \code{hclust} or \code{dbscan}.} \item{hclust_method}{The hierarchical clustering method (passed to \code{\link[=hclust]{hclust()}}).} \item{ci}{Confidence Interval (CI) level. Default to \code{0.95} (\verb{95\%}).} \item{iterations}{The number of bootstrap replicates. This only apply in the case of bootstrapped frequentist models.} } \description{ Similarly to \code{\link[=n_factors]{n_factors()}} for factor / principal component analysis, \code{n_clusters()} is the main function to find out the optimal numbers of clusters present in the data based on the maximum consensus of a large number of methods. Essentially, there exist many methods to determine the optimal number of clusters, each with pros and cons, benefits and limitations. The main \code{n_clusters} function proposes to run all of them, and find out the number of clusters that is suggested by the majority of methods (in case of ties, it will select the most parsimonious solution with fewer clusters). Note that we also implement some specific, commonly used methods, like the Elbow or the Gap method, with their own visualization functionalities. See the examples below for more details. } \note{ There is also a \href{https://easystats.github.io/see/articles/parameters.html}{\code{plot()}-method} implemented in the \href{https://easystats.github.io/see/}{\strong{see}-package}. } \examples{ \donttest{ library(parameters) # The main 'n_clusters' function =============================== if (require("mclust", quietly = TRUE) && require("NbClust", quietly = TRUE) && require("cluster", quietly = TRUE) && require("see", quietly = TRUE)) { n <- n_clusters(iris[, 1:4], package = c("NbClust", "mclust")) # package can be "all" n summary(n) as.data.frame(n) # Duration is the time elapsed for each method in seconds plot(n) # The following runs all the method but it significantly slower # n_clusters(iris[1:4], standardize = FALSE, package = "all", fast = FALSE) } } \dontshow{if (require("see", quietly = TRUE) && require("factoextra", quietly = TRUE)) withAutoprint(\{ # examplesIf} \donttest{ x <- n_clusters_elbow(iris[1:4]) x as.data.frame(x) # plotting is also possible: # plot(x) } \dontshow{\}) # examplesIf} \donttest{ # # Gap method -------------------- if (require("see", quietly = TRUE) && require("cluster", quietly = TRUE) && require("factoextra", quietly = TRUE)) { x <- n_clusters_gap(iris[1:4]) x as.data.frame(x) plot(x) } } \donttest{ # # Silhouette method -------------------------- if (require("factoextra", quietly = TRUE)) { x <- n_clusters_silhouette(iris[1:4]) x as.data.frame(x) # plotting is also possible: # plot(x) } } \donttest{ # if (require("dbscan", quietly = TRUE)) { # DBSCAN method ------------------------- # NOTE: This actually primarily estimates the 'eps' parameter, the number of # clusters is a side effect (it's the number of clusters corresponding to # this 'optimal' EPS parameter). x <- n_clusters_dbscan(iris[1:4], method = "kNN", min_size = 0.05) # 5 percent x head(as.data.frame(x)) plot(x) x <- n_clusters_dbscan(iris[1:4], method = "SS", eps_n = 100, eps_range = c(0.1, 2)) x head(as.data.frame(x)) plot(x) } } \donttest{ # # hclust method ------------------------------- if (require("pvclust", quietly = TRUE)) { # iterations should be higher for real analyses x <- n_clusters_hclust(iris[1:4], iterations = 50, ci = 0.90) x head(as.data.frame(x), n = 10) # Print 10 first rows plot(x) } } } parameters/man/simulate_model.Rd0000644000176200001440000001164714717115325016501 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/5_simulate_model.R \name{simulate_model} \alias{simulate_model} \alias{simulate_model.default} \title{Simulated draws from model coefficients} \usage{ simulate_model(model, iterations = 1000, ...) \method{simulate_model}{default}(model, iterations = 1000, component = "all", ...) } \arguments{ \item{model}{Statistical model (no Bayesian models).} \item{iterations}{The number of draws to simulate/bootstrap.} \item{...}{Arguments passed to \code{\link[insight:get_varcov]{insight::get_varcov()}}, e.g. to allow simulated draws to be based on heteroscedasticity consistent variance covariance matrices.} \item{component}{Should all parameters, parameters for the conditional model, for the zero-inflation part of the model, or the dispersion model be returned? Applies to models with zero-inflation and/or dispersion component. \code{component} may be one of \code{"conditional"}, \code{"zi"}, \code{"zero-inflated"}, \code{"dispersion"} or \code{"all"} (default). May be abbreviated.} } \value{ A data frame. } \description{ Simulate draws from a statistical model to return a data frame of estimates. } \details{ \subsection{Technical Details}{ \code{simulate_model()} is a computationally faster alternative to \code{bootstrap_model()}. Simulated draws for coefficients are based on a multivariate normal distribution (\code{MASS::mvrnorm()}) with mean \code{mu = coef(model)} and variance \code{Sigma = vcov(model)}. } \subsection{Models with Zero-Inflation Component}{ For models from packages \strong{glmmTMB}, \strong{pscl}, \strong{GLMMadaptive} and \strong{countreg}, the \code{component} argument can be used to specify which parameters should be simulated. For all other models, parameters from the conditional component (fixed effects) are simulated. This may include smooth terms, but not random effects. } } \section{Model components}{ Possible values for the \code{component} argument depend on the model class. Following are valid options: \itemize{ \item \code{"all"}: returns all model components, applies to all models, but will only have an effect for models with more than just the conditional model component. \item \code{"conditional"}: only returns the conditional component, i.e. "fixed effects" terms from the model. Will only have an effect for models with more than just the conditional model component. \item \code{"smooth_terms"}: returns smooth terms, only applies to GAMs (or similar models that may contain smooth terms). \item \code{"zero_inflated"} (or \code{"zi"}): returns the zero-inflation component. \item \code{"dispersion"}: returns the dispersion model component. This is common for models with zero-inflation or that can model the dispersion parameter. \item \code{"instruments"}: for instrumental-variable or some fixed effects regression, returns the instruments. \item \code{"nonlinear"}: for non-linear models (like models of class \code{nlmerMod} or \code{nls}), returns staring estimates for the nonlinear parameters. \item \code{"correlation"}: for models with correlation-component, like \code{gls}, the variables used to describe the correlation structure are returned. } \strong{Special models} Some model classes also allow rather uncommon options. These are: \itemize{ \item \strong{mhurdle}: \code{"infrequent_purchase"}, \code{"ip"}, and \code{"auxiliary"} \item \strong{BGGM}: \code{"correlation"} and \code{"intercept"} \item \strong{BFBayesFactor}, \strong{glmx}: \code{"extra"} \item \strong{averaging}:\code{"conditional"} and \code{"full"} \item \strong{mjoint}: \code{"survival"} \item \strong{mfx}: \code{"precision"}, \code{"marginal"} \item \strong{betareg}, \strong{DirichletRegModel}: \code{"precision"} \item \strong{mvord}: \code{"thresholds"} and \code{"correlation"} \item \strong{clm2}: \code{"scale"} \item \strong{selection}: \code{"selection"}, \code{"outcome"}, and \code{"auxiliary"} \item \strong{lavaan}: One or more of \code{"regression"}, \code{"correlation"}, \code{"loading"}, \code{"variance"}, \code{"defined"}, or \code{"mean"}. Can also be \code{"all"} to include all components. } For models of class \code{brmsfit} (package \strong{brms}), even more options are possible for the \code{component} argument, which are not all documented in detail here. } \examples{ model <- lm(Sepal.Length ~ Species * Petal.Width + Petal.Length, data = iris) head(simulate_model(model)) \donttest{ if (require("glmmTMB", quietly = TRUE)) { model <- glmmTMB( count ~ spp + mined + (1 | site), ziformula = ~mined, family = poisson(), data = Salamanders ) head(simulate_model(model)) head(simulate_model(model, component = "zero_inflated")) } } } \seealso{ \code{\link[=simulate_parameters]{simulate_parameters()}}, \code{\link[=bootstrap_model]{bootstrap_model()}}, \code{\link[=bootstrap_parameters]{bootstrap_parameters()}} } parameters/man/model_parameters.t1way.Rd0000644000176200001440000000420114507235543020053 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/methods_wrs2.R \name{model_parameters.t1way} \alias{model_parameters.t1way} \title{Parameters from robust statistical objects in \code{WRS2}} \usage{ \method{model_parameters}{t1way}(model, keep = NULL, verbose = TRUE, ...) } \arguments{ \item{model}{Object from \code{WRS2} package.} \item{keep}{Character containing a regular expression pattern that describes the parameters that should be included (for \code{keep}) or excluded (for \code{drop}) in the returned data frame. \code{keep} may also be a named list of regular expressions. All non-matching parameters will be removed from the output. If \code{keep} is a character vector, every parameter name in the \emph{"Parameter"} column that matches the regular expression in \code{keep} will be selected from the returned data frame (and vice versa, all parameter names matching \code{drop} will be excluded). Furthermore, if \code{keep} has more than one element, these will be merged with an \code{OR} operator into a regular expression pattern like this: \code{"(one|two|three)"}. If \code{keep} is a named list of regular expression patterns, the names of the list-element should equal the column name where selection should be applied. This is useful for model objects where \code{model_parameters()} returns multiple columns with parameter components, like in \code{\link[=model_parameters.lavaan]{model_parameters.lavaan()}}. Note that the regular expression pattern should match the parameter names as they are stored in the returned data frame, which can be different from how they are printed. Inspect the \verb{$Parameter} column of the parameters table to get the exact parameter names.} \item{verbose}{Toggle warnings and messages.} \item{...}{Arguments passed to or from other methods.} } \value{ A data frame of indices related to the model's parameters. } \description{ Parameters from robust statistical objects in \code{WRS2} } \examples{ if (require("WRS2") && packageVersion("WRS2") >= "1.1.3") { model <- t1way(libido ~ dose, data = viagra) model_parameters(model) } } parameters/man/factor_scores.Rd0000644000176200001440000000215315066721002016313 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/factor_scores.R \name{factor_scores} \alias{factor_scores} \title{Extract factor scores from Factor Analysis (EFA) or Omega} \usage{ factor_scores(x, ...) } \arguments{ \item{x}{An object returned by \code{\link[psych:fa]{psych::fa()}}, \code{\link[=factor_analysis]{factor_analysis()}}, or \code{\link[psych:omega]{psych::omega()}}.} \item{...}{Currently unused.} } \value{ A data frame with the factor scores. It simply extracts the \verb{$scores} element from the object and converts it into a data frame. } \description{ \code{factor_scores()} extracts the factor scores from objects returned by \code{\link[psych:fa]{psych::fa()}}, \code{\link[=factor_analysis]{factor_analysis()}}, or \code{\link[psych:omega]{psych::omega()}} } \examples{ \dontshow{if (insight::check_if_installed("psych", quietly = TRUE)) withAutoprint(\{ # examplesIf} data(mtcars) out <- factor_analysis(mtcars[, 1:7], n = 2) head(factor_scores(out)) \dontshow{\}) # examplesIf} } \seealso{ \code{\link[=factor_analysis]{factor_analysis()}} } parameters/man/reduce_parameters.Rd0000644000176200001440000001134115030725674017162 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/reduce_parameters.R \name{reduce_parameters} \alias{reduce_parameters} \alias{reduce_data} \title{Dimensionality reduction (DR) / Features Reduction} \usage{ reduce_parameters(x, method = "PCA", n = "max", distance = "euclidean", ...) reduce_data(x, method = "PCA", n = "max", distance = "euclidean", ...) } \arguments{ \item{x}{A data frame or a statistical model. For \code{closest_component()}, the output of the \code{principal_components()} function.} \item{method}{The feature reduction method. Can be one of \code{"PCA"}, \code{"cMDS"}, \code{"DRR"}, \code{"ICA"} (see the 'Details' section).} \item{n}{Number of components to extract. If \code{n="all"}, then \code{n} is set as the number of variables minus 1 (\code{ncol(x)-1}). If \code{n="auto"} (default) or \code{n=NULL}, the number of components is selected through \code{\link[=n_factors]{n_factors()}} resp. \code{\link[=n_components]{n_components()}}. Else, if \code{n} is a number, \code{n} components are extracted. If \code{n} exceeds number of variables in the data, it is automatically set to the maximum number (i.e. \code{ncol(x)}). In \code{\link[=reduce_parameters]{reduce_parameters()}}, can also be \code{"max"}, in which case it will select all the components that are maximally pseudo-loaded (i.e., correlated) by at least one variable.} \item{distance}{The distance measure to be used. Only applies when \code{method = "cMDS"}. This must be one of \code{"euclidean"}, \code{"maximum"}, \code{"manhattan"}, \code{"canberra"}, \code{"binary"} or \code{"minkowski"}. Any unambiguous substring can be given.} \item{...}{Arguments passed to or from other methods.} } \description{ This function performs a reduction in the parameter space (the number of variables). It starts by creating a new set of variables, based on the given method (the default method is "PCA", but other are available via the \code{method} argument, such as "cMDS", "DRR" or "ICA"). Then, it names this new dimensions using the original variables that correlates the most with it. For instance, a variable named \code{'V1_0.97/V4_-0.88'} means that the V1 and the V4 variables correlate maximally (with respective coefficients of .97 and -.88) with this dimension. Although this function can be useful in exploratory data analysis, it's best to perform the dimension reduction step in a separate and dedicated stage, as this is a very important process in the data analysis workflow. \code{reduce_data()} is an alias for \code{reduce_parameters.data.frame()}. } \details{ The different methods available are described below: \subsection{Supervised Methods}{ \itemize{ \item \strong{PCA}: See \code{\link[=principal_components]{principal_components()}}. \item \strong{cMDS / PCoA}: Classical Multidimensional Scaling (cMDS) takes a set of dissimilarities (i.e., a distance matrix) and returns a set of points such that the distances between the points are approximately equal to the dissimilarities. \item \strong{DRR}: Dimensionality Reduction via Regression (DRR) is a very recent technique extending PCA (\emph{Laparra et al., 2015}). Starting from a rotated PCA, it predicts redundant information from the remaining components using non-linear regression. Some of the most notable advantages of performing DRR are avoidance of multicollinearity between predictors and overfitting mitigation. DRR tends to perform well when the first principal component is enough to explain most of the variation in the predictors. Requires the \strong{DRR} package to be installed. \item \strong{ICA}: Performs an Independent Component Analysis using the FastICA algorithm. Contrary to PCA, which attempts to find uncorrelated sources (through least squares minimization), ICA attempts to find independent sources, i.e., the source space that maximizes the "non-gaussianity" of all sources. Contrary to PCA, ICA does not rank each source, which makes it a poor tool for dimensionality reduction. Requires the \strong{fastICA} package to be installed. } See also \href{https://easystats.github.io/parameters/articles/parameters_reduction.html}{package vignette}. } } \examples{ data(iris) model <- lm(Sepal.Width ~ Species * Sepal.Length + Petal.Width, data = iris) model reduce_parameters(model) out <- reduce_data(iris, method = "PCA", n = "max") head(out) } \references{ \itemize{ \item Nguyen, L. H., and Holmes, S. (2019). Ten quick tips for effective dimensionality reduction. PLOS Computational Biology, 15(6). \item Laparra, V., Malo, J., and Camps-Valls, G. (2015). Dimensionality reduction via regression in hyperspectral imagery. IEEE Journal of Selected Topics in Signal Processing, 9(6), 1026-1036. } } parameters/man/reexports.Rd0000644000176200001440000000277714716604200015527 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/equivalence_test.R, R/methods_bayestestR.R, % R/n_parameters.R, R/p_direction.R, R/p_significance.R, R/reexports.R \docType{import} \name{reexports} \alias{reexports} \alias{equivalence_test} \alias{ci} \alias{n_parameters} \alias{p_direction} \alias{p_significance} \alias{standardize_names} \alias{supported_models} \alias{print_html} \alias{print_md} \alias{display} \alias{describe_distribution} \alias{demean} \alias{rescale_weights} \alias{visualisation_recipe} \alias{kurtosis} \alias{skewness} \title{Objects exported from other packages} \keyword{internal} \description{ These objects are imported from other packages. Follow the links below to see their documentation. \describe{ \item{bayestestR}{\code{\link[bayestestR]{ci}}, \code{\link[bayestestR]{equivalence_test}}, \code{\link[bayestestR]{p_direction}}, \code{\link[bayestestR]{p_significance}}} \item{datawizard}{\code{\link[datawizard]{demean}}, \code{\link[datawizard]{describe_distribution}}, \code{\link[datawizard:skewness]{kurtosis}}, \code{\link[datawizard]{rescale_weights}}, \code{\link[datawizard]{skewness}}, \code{\link[datawizard]{visualisation_recipe}}} \item{insight}{\code{\link[insight]{display}}, \code{\link[insight]{n_parameters}}, \code{\link[insight:display]{print_html}}, \code{\link[insight:display]{print_md}}, \code{\link[insight]{standardize_names}}, \code{\link[insight:is_model_supported]{supported_models}}} }} parameters/man/dot-n_factors_bartlett.Rd0000644000176200001440000000055513641634603020135 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/n_factors.R \name{.n_factors_bartlett} \alias{.n_factors_bartlett} \title{Bartlett, Anderson and Lawley Procedures} \usage{ .n_factors_bartlett(eigen_values = NULL, model = "factors", nobs = NULL) } \description{ Bartlett, Anderson and Lawley Procedures } \keyword{internal} parameters/man/model_parameters.mlm.Rd0000644000176200001440000002661715066721002017601 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/methods_mlm.R \name{model_parameters.mlm} \alias{model_parameters.mlm} \title{Parameters from multinomial or cumulative link models} \usage{ \method{model_parameters}{mlm}( model, ci = 0.95, vcov = NULL, vcov_args = NULL, bootstrap = FALSE, iterations = 1000, standardize = NULL, exponentiate = FALSE, p_adjust = NULL, keep = NULL, drop = NULL, verbose = TRUE, ... ) } \arguments{ \item{model}{A model with multinomial or categorical response value.} \item{ci}{Confidence Interval (CI) level. Default to \code{0.95} (\verb{95\%}).} \item{vcov}{Variance-covariance matrix used to compute uncertainty estimates (e.g., for robust standard errors). This argument accepts a covariance matrix, a function which returns a covariance matrix, or a string which identifies the function to be used to compute the covariance matrix. \itemize{ \item A covariance matrix \item A function which returns a covariance matrix (e.g., \code{stats::vcov()}) \item A string which indicates the kind of uncertainty estimates to return. \itemize{ \item Heteroskedasticity-consistent: \code{"HC"}, \code{"HC0"}, \code{"HC1"}, \code{"HC2"}, \code{"HC3"}, \code{"HC4"}, \code{"HC4m"}, \code{"HC5"}. See \code{?sandwich::vcovHC} \item Cluster-robust: \code{"CR"}, \code{"CR0"}, \code{"CR1"}, \code{"CR1p"}, \code{"CR1S"}, \code{"CR2"}, \code{"CR3"}. See \code{?clubSandwich::vcovCR} \item Bootstrap: \code{"BS"}, \code{"xy"}, \code{"residual"}, \code{"wild"}, \code{"mammen"}, \code{"fractional"}, \code{"jackknife"}, \code{"norm"}, \code{"webb"}. See \code{?sandwich::vcovBS} \item Other \code{sandwich} package functions: \code{"HAC"}, \code{"PC"}, \code{"CL"}, \code{"OPG"}, \code{"PL"}. } }} \item{vcov_args}{List of arguments to be passed to the function identified by the \code{vcov} argument. This function is typically supplied by the \strong{sandwich} or \strong{clubSandwich} packages. Please refer to their documentation (e.g., \code{?sandwich::vcovHAC}) to see the list of available arguments. If no estimation type (argument \code{type}) is given, the default type for \code{"HC"} equals the default from the \strong{sandwich} package; for type \code{"CR"}, the default is set to \code{"CR3"}.} \item{bootstrap}{Should estimates be based on bootstrapped model? If \code{TRUE}, then arguments of \link[=model_parameters.brmsfit]{Bayesian regressions} apply (see also \code{\link[=bootstrap_parameters]{bootstrap_parameters()}}).} \item{iterations}{The number of bootstrap replicates. This only apply in the case of bootstrapped frequentist models.} \item{standardize}{The method used for standardizing the parameters. Can be \code{NULL} (default; no standardization), \code{"refit"} (for re-fitting the model on standardized data) or one of \code{"basic"}, \code{"posthoc"}, \code{"smart"}, \code{"pseudo"}. See 'Details' in \code{\link[=standardize_parameters]{standardize_parameters()}}. \strong{Importantly}: \itemize{ \item The \code{"refit"} method does \emph{not} standardize categorical predictors (i.e. factors), which may be a different behaviour compared to other R packages (such as \strong{lm.beta}) or other software packages (like SPSS). to mimic such behaviours, either use \code{standardize="basic"} or standardize the data with \code{datawizard::standardize(force=TRUE)} \emph{before} fitting the model. \item By default, the response (dependent) variable is also standardized, \emph{if applicable}. Set \code{include_response = FALSE} to avoid standardization of the response variable. See details in \code{\link[datawizard:standardize.default]{datawizard::standardize.default()}}. \item For mixed models, when using methods other than \code{"refit"}, only the fixed effects will be standardized. \item Robust estimation (i.e., \code{vcov} set to a value other than \code{NULL}) of standardized parameters only works when \code{standardize="refit"}. }} \item{exponentiate}{Logical, indicating whether or not to exponentiate the coefficients (and related confidence intervals). This is typical for logistic regression, or more generally speaking, for models with log or logit links. It is also recommended to use \code{exponentiate = TRUE} for models with log-transformed response values. For models with a log-transformed response variable, when \code{exponentiate = TRUE}, a one-unit increase in the predictor is associated with multiplying the outcome by that predictor's coefficient. \strong{Note:} Delta-method standard errors are also computed (by multiplying the standard errors by the transformed coefficients). This is to mimic behaviour of other software packages, such as Stata, but these standard errors poorly estimate uncertainty for the transformed coefficient. The transformed confidence interval more clearly captures this uncertainty. For \code{compare_parameters()}, \code{exponentiate = "nongaussian"} will only exponentiate coefficients from non-Gaussian families.} \item{p_adjust}{String value, if not \code{NULL}, indicates the method to adjust p-values. See \code{\link[stats:p.adjust]{stats::p.adjust()}} for details. Further possible adjustment methods are \code{"tukey"}, \code{"scheffe"}, \code{"sidak"}, \code{"sup-t"}, and \code{"none"} to explicitly disable adjustment for \code{emmGrid} objects (from \strong{emmeans}). \code{"sup-t"} computes simultaneous confidence bands, also called sup-t confidence band (Montiel Olea & Plagborg-Møller, 2019).} \item{keep}{Character containing a regular expression pattern that describes the parameters that should be included (for \code{keep}) or excluded (for \code{drop}) in the returned data frame. \code{keep} may also be a named list of regular expressions. All non-matching parameters will be removed from the output. If \code{keep} is a character vector, every parameter name in the \emph{"Parameter"} column that matches the regular expression in \code{keep} will be selected from the returned data frame (and vice versa, all parameter names matching \code{drop} will be excluded). Furthermore, if \code{keep} has more than one element, these will be merged with an \code{OR} operator into a regular expression pattern like this: \code{"(one|two|three)"}. If \code{keep} is a named list of regular expression patterns, the names of the list-element should equal the column name where selection should be applied. This is useful for model objects where \code{model_parameters()} returns multiple columns with parameter components, like in \code{\link[=model_parameters.lavaan]{model_parameters.lavaan()}}. Note that the regular expression pattern should match the parameter names as they are stored in the returned data frame, which can be different from how they are printed. Inspect the \verb{$Parameter} column of the parameters table to get the exact parameter names.} \item{drop}{See \code{keep}.} \item{verbose}{Toggle warnings and messages.} \item{...}{Arguments passed to or from other methods. For instance, when \code{bootstrap = TRUE}, arguments like \code{type} or \code{parallel} are passed down to \code{bootstrap_model()}. Further non-documented arguments are: \itemize{ \item \code{digits}, \code{p_digits}, \code{ci_digits} and \code{footer_digits} to set the number of digits for the output. \code{groups} can be used to group coefficients. These arguments will be passed to the print-method, or can directly be used in \code{print()}, see documentation in \code{\link[=print.parameters_model]{print.parameters_model()}}. \item If \code{s_value = TRUE}, the p-value will be replaced by the S-value in the output (cf. \emph{Rafi and Greenland 2020}). \item \code{pd} adds an additional column with the \emph{probability of direction} (see \code{\link[bayestestR:p_direction]{bayestestR::p_direction()}} for details). Furthermore, see 'Examples' for this function. \item For developers, whose interest mainly is to get a "tidy" data frame of model summaries, it is recommended to set \code{pretty_names = FALSE} to speed up computation of the summary table. }} } \value{ A data frame of indices related to the model's parameters. } \description{ Parameters from multinomial or cumulative link models } \details{ Multinomial or cumulative link models, i.e. models where the response value (dependent variable) is categorical and has more than two levels, usually return coefficients for each response level. Hence, the output from \code{model_parameters()} will split the coefficient tables by the different levels of the model's response. } \section{Model components}{ Possible values for the \code{component} argument depend on the model class. Following are valid options: \itemize{ \item \code{"all"}: returns all model components, applies to all models, but will only have an effect for models with more than just the conditional model component. \item \code{"conditional"}: only returns the conditional component, i.e. "fixed effects" terms from the model. Will only have an effect for models with more than just the conditional model component. \item \code{"smooth_terms"}: returns smooth terms, only applies to GAMs (or similar models that may contain smooth terms). \item \code{"zero_inflated"} (or \code{"zi"}): returns the zero-inflation component. \item \code{"dispersion"}: returns the dispersion model component. This is common for models with zero-inflation or that can model the dispersion parameter. \item \code{"instruments"}: for instrumental-variable or some fixed effects regression, returns the instruments. \item \code{"nonlinear"}: for non-linear models (like models of class \code{nlmerMod} or \code{nls}), returns staring estimates for the nonlinear parameters. \item \code{"correlation"}: for models with correlation-component, like \code{gls}, the variables used to describe the correlation structure are returned. } \strong{Special models} Some model classes also allow rather uncommon options. These are: \itemize{ \item \strong{mhurdle}: \code{"infrequent_purchase"}, \code{"ip"}, and \code{"auxiliary"} \item \strong{BGGM}: \code{"correlation"} and \code{"intercept"} \item \strong{BFBayesFactor}, \strong{glmx}: \code{"extra"} \item \strong{averaging}:\code{"conditional"} and \code{"full"} \item \strong{mjoint}: \code{"survival"} \item \strong{mfx}: \code{"precision"}, \code{"marginal"} \item \strong{betareg}, \strong{DirichletRegModel}: \code{"precision"} \item \strong{mvord}: \code{"thresholds"} and \code{"correlation"} \item \strong{clm2}: \code{"scale"} \item \strong{selection}: \code{"selection"}, \code{"outcome"}, and \code{"auxiliary"} \item \strong{lavaan}: One or more of \code{"regression"}, \code{"correlation"}, \code{"loading"}, \code{"variance"}, \code{"defined"}, or \code{"mean"}. Can also be \code{"all"} to include all components. } For models of class \code{brmsfit} (package \strong{brms}), even more options are possible for the \code{component} argument, which are not all documented in detail here. } \examples{ \dontshow{if (require("brglm2", quietly = TRUE)) withAutoprint(\{ # examplesIf} data("stemcell", package = "brglm2") model <- brglm2::bracl( research ~ as.numeric(religion) + gender, weights = frequency, data = stemcell, type = "ML" ) model_parameters(model) \dontshow{\}) # examplesIf} } \seealso{ \code{\link[insight:standardize_names]{insight::standardize_names()}} to rename columns into a consistent, standardized naming scheme. } parameters/man/p_direction.lm.Rd0000644000176200001440000003002415066721002016363 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/p_direction.R \name{p_direction.lm} \alias{p_direction.lm} \title{Probability of Direction (pd)} \usage{ \method{p_direction}{lm}( x, ci = 0.95, method = "direct", null = 0, vcov = NULL, vcov_args = NULL, ... ) } \arguments{ \item{x}{A statistical model.} \item{ci}{Confidence Interval (CI) level. Default to \code{0.95} (\verb{95\%}).} \item{method}{Can be \code{"direct"} or one of methods of \code{\link[bayestestR:estimate_density]{estimate_density()}}, such as \code{"kernel"}, \code{"logspline"} or \code{"KernSmooth"}. See details.} \item{null}{The value considered as a "null" effect. Traditionally 0, but could also be 1 in the case of ratios of change (OR, IRR, ...).} \item{vcov}{Variance-covariance matrix used to compute uncertainty estimates (e.g., for robust standard errors). This argument accepts a covariance matrix, a function which returns a covariance matrix, or a string which identifies the function to be used to compute the covariance matrix. \itemize{ \item A covariance matrix \item A function which returns a covariance matrix (e.g., \code{stats::vcov()}) \item A string which indicates the kind of uncertainty estimates to return. \itemize{ \item Heteroskedasticity-consistent: \code{"HC"}, \code{"HC0"}, \code{"HC1"}, \code{"HC2"}, \code{"HC3"}, \code{"HC4"}, \code{"HC4m"}, \code{"HC5"}. See \code{?sandwich::vcovHC} \item Cluster-robust: \code{"CR"}, \code{"CR0"}, \code{"CR1"}, \code{"CR1p"}, \code{"CR1S"}, \code{"CR2"}, \code{"CR3"}. See \code{?clubSandwich::vcovCR} \item Bootstrap: \code{"BS"}, \code{"xy"}, \code{"residual"}, \code{"wild"}, \code{"mammen"}, \code{"fractional"}, \code{"jackknife"}, \code{"norm"}, \code{"webb"}. See \code{?sandwich::vcovBS} \item Other \code{sandwich} package functions: \code{"HAC"}, \code{"PC"}, \code{"CL"}, \code{"OPG"}, \code{"PL"}. } }} \item{vcov_args}{List of arguments to be passed to the function identified by the \code{vcov} argument. This function is typically supplied by the \strong{sandwich} or \strong{clubSandwich} packages. Please refer to their documentation (e.g., \code{?sandwich::vcovHAC}) to see the list of available arguments. If no estimation type (argument \code{type}) is given, the default type for \code{"HC"} equals the default from the \strong{sandwich} package; for type \code{"CR"}, the default is set to \code{"CR3"}.} \item{...}{Arguments passed to other methods, e.g. \code{ci()}. Arguments like \code{vcov} or \code{vcov_args} can be used to compute confidence intervals using a specific variance-covariance matrix for the standard errors.} } \value{ A data frame. } \description{ Compute the \strong{Probability of Direction} (\emph{pd}, also known as the Maximum Probability of Effect - \emph{MPE}). This can be interpreted as the probability that a parameter (described by its full confidence, or "compatibility" interval) is strictly positive or negative (whichever is the most probable). Although differently expressed, this index is fairly similar (i.e., is strongly correlated) to the frequentist \emph{p-value} (see 'Details'). } \section{What is the \emph{pd}?}{ The Probability of Direction (pd) is an index of effect existence, representing the certainty with which an effect goes in a particular direction (i.e., is positive or negative / has a sign), typically ranging from 0.5 to 1 (but see next section for cases where it can range between 0 and 1). Beyond its simplicity of interpretation, understanding and computation, this index also presents other interesting properties: \itemize{ \item Like other posterior-based indices, \emph{pd} is solely based on the posterior distributions and does not require any additional information from the data or the model (e.g., such as priors, as in the case of Bayes factors). \item It is robust to the scale of both the response variable and the predictors. \item It is strongly correlated with the frequentist p-value, and can thus be used to draw parallels and give some reference to readers non-familiar with Bayesian statistics (Makowski et al., 2019). } } \section{Relationship with the p-value}{ In most cases, it seems that the \emph{pd} has a direct correspondence with the frequentist one-sided \emph{p}-value through the formula (for two-sided \emph{p}): \ifelse{html}{\out{p = 2 * (1 - pd)}}{\eqn{p = 2 \times (1 - p_d)}} Thus, a two-sided p-value of respectively \code{.1}, \code{.05}, \code{.01} and \code{.001} would correspond approximately to a \emph{pd} of \verb{95\%}, \verb{97.5\%}, \verb{99.5\%} and \verb{99.95\%}. See \code{\link[bayestestR:pd_to_p]{pd_to_p()}} for details. } \section{Possible Range of Values}{ The largest value \emph{pd} can take is 1 - the posterior is strictly directional. However, the smallest value \emph{pd} can take depends on the parameter space represented by the posterior. \strong{For a continuous parameter space}, exact values of 0 (or any point null value) are not possible, and so 100\% of the posterior has \emph{some} sign, some positive, some negative. Therefore, the smallest the \emph{pd} can be is 0.5 - with an equal posterior mass of positive and negative values. Values close to 0.5 \emph{cannot} be used to support the null hypothesis (that the parameter does \emph{not} have a direction) is a similar why to how large p-values cannot be used to support the null hypothesis (see \code{\link[bayestestR:pd_to_p]{pd_to_p()}}; Makowski et al., 2019). \strong{For a discrete parameter space or a parameter space that is a mixture between discrete and continuous spaces}, exact values of 0 (or any point null value) \emph{are} possible! Therefore, the smallest the \emph{pd} can be is 0 - with 100\% of the posterior mass on 0. Thus values close to 0 can be used to support the null hypothesis (see van den Bergh et al., 2021). Examples of posteriors representing discrete parameter space: \itemize{ \item When a parameter can only take discrete values. \item When a mixture prior/posterior is used (such as the spike-and-slab prior; see van den Bergh et al., 2021). \item When conducting Bayesian model averaging (e.g., \code{\link[bayestestR:weighted_posteriors]{weighted_posteriors()}} or \code{brms::posterior_average}). } } \section{Statistical inference - how to quantify evidence}{ There is no standardized approach to drawing conclusions based on the available data and statistical models. A frequently chosen but also much criticized approach is to evaluate results based on their statistical significance (\emph{Amrhein et al. 2017}). A more sophisticated way would be to test whether estimated effects exceed the "smallest effect size of interest", to avoid even the smallest effects being considered relevant simply because they are statistically significant, but clinically or practically irrelevant (\emph{Lakens et al. 2018, Lakens 2024}). A rather unconventional approach, which is nevertheless advocated by various authors, is to interpret results from classical regression models either in terms of probabilities, similar to the usual approach in Bayesian statistics (\emph{Schweder 2018; Schweder and Hjort 2003; Vos 2022}) or in terms of relative measure of "evidence" or "compatibility" with the data (\emph{Greenland et al. 2022; Rafi and Greenland 2020}), which nevertheless comes close to a probabilistic interpretation. A more detailed discussion of this topic is found in the documentation of \code{\link[=p_function]{p_function()}}. The \strong{parameters} package provides several options or functions to aid statistical inference. These are, for example: \itemize{ \item \code{\link[=equivalence_test.lm]{equivalence_test()}}, to compute the (conditional) equivalence test for frequentist models \item \code{\link[=p_significance.lm]{p_significance()}}, to compute the probability of \emph{practical significance}, which can be conceptualized as a unidirectional equivalence test \item \code{\link[=p_function]{p_function()}}, or \emph{consonance function}, to compute p-values and compatibility (confidence) intervals for statistical models \item the \code{pd} argument (setting \code{pd = TRUE}) in \code{model_parameters()} includes a column with the \emph{probability of direction}, i.e. the probability that a parameter is strictly positive or negative. See \code{\link[bayestestR:p_direction]{bayestestR::p_direction()}} for details. If plotting is desired, the \code{\link[=p_direction.lm]{p_direction()}} function can be used, together with \code{plot()}. \item the \code{s_value} argument (setting \code{s_value = TRUE}) in \code{model_parameters()} replaces the p-values with their related \emph{S}-values (\emph{Rafi and Greenland 2020}) \item finally, it is possible to generate distributions of model coefficients by generating bootstrap-samples (setting \code{bootstrap = TRUE}) or simulating draws from model coefficients using \code{\link[=simulate_model]{simulate_model()}}. These samples can then be treated as "posterior samples" and used in many functions from the \strong{bayestestR} package. } Most of the above shown options or functions derive from methods originally implemented for Bayesian models (\emph{Makowski et al. 2019}). However, assuming that model assumptions are met (which means, the model fits well to the data, the correct model is chosen that reflects the data generating process (distributional model family) etc.), it seems appropriate to interpret results from classical frequentist models in a "Bayesian way" (more details: documentation in \code{\link[=p_function]{p_function()}}). } \examples{ \dontshow{if (requireNamespace("bayestestR") && require("see", quietly = TRUE) && requireNamespace("sandwich")) withAutoprint(\{ # examplesIf} data(qol_cancer) model <- lm(QoL ~ time + age + education, data = qol_cancer) p_direction(model) # based on heteroscedasticity-robust standard errors p_direction(model, vcov = "HC3") result <- p_direction(model) plot(result) \dontshow{\}) # examplesIf} } \references{ \itemize{ \item Amrhein, V., Korner-Nievergelt, F., and Roth, T. (2017). The earth is flat (p > 0.05): Significance thresholds and the crisis of unreplicable research. PeerJ, 5, e3544. \doi{10.7717/peerj.3544} \item Greenland S, Rafi Z, Matthews R, Higgs M. To Aid Scientific Inference, Emphasize Unconditional Compatibility Descriptions of Statistics. (2022) https://arxiv.org/abs/1909.08583v7 (Accessed November 10, 2022) \item Lakens, D. (2024). Improving Your Statistical Inferences (Version v1.5.1). Retrieved from https://lakens.github.io/statistical_inferences/. \doi{10.5281/ZENODO.6409077} \item Lakens, D., Scheel, A. M., and Isager, P. M. (2018). Equivalence Testing for Psychological Research: A Tutorial. Advances in Methods and Practices in Psychological Science, 1(2), 259–269. \item Makowski, D., Ben-Shachar, M. S., Chen, S. H. A., and Lüdecke, D. (2019). Indices of Effect Existence and Significance in the Bayesian Framework. Frontiers in Psychology, 10, 2767. \doi{10.3389/fpsyg.2019.02767} \item Rafi Z, Greenland S. Semantic and cognitive tools to aid statistical science: replace confidence and significance by compatibility and surprise. BMC Medical Research Methodology (2020) 20:244. \item Schweder T. Confidence is epistemic probability for empirical science. Journal of Statistical Planning and Inference (2018) 195:116–125. \doi{10.1016/j.jspi.2017.09.016} \item Schweder T, Hjort NL. Frequentist analogues of priors and posteriors. In Stigum, B. (ed.), Econometrics and the Philosophy of Economics: Theory Data Confrontation in Economics, pp. 285-217. Princeton University Press, Princeton, NJ, 2003 \item Vos P, Holbert D. Frequentist statistical inference without repeated sampling. Synthese 200, 89 (2022). \doi{10.1007/s11229-022-03560-x} } } \seealso{ See also \code{\link[=equivalence_test]{equivalence_test()}}, \code{\link[=p_function]{p_function()}} and \code{\link[=p_significance]{p_significance()}} for functions related to checking effect existence and significance. } parameters/man/model_parameters.cgam.Rd0000644000176200001440000002052615033425412017714 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/methods_cgam.R \name{model_parameters.cgam} \alias{model_parameters.cgam} \title{Parameters from Generalized Additive (Mixed) Models} \usage{ \method{model_parameters}{cgam}( model, ci = 0.95, ci_method = "residual", bootstrap = FALSE, iterations = 1000, standardize = NULL, exponentiate = FALSE, p_adjust = NULL, keep = NULL, drop = NULL, verbose = TRUE, ... ) } \arguments{ \item{model}{A gam/gamm model.} \item{ci}{Confidence Interval (CI) level. Default to \code{0.95} (\verb{95\%}).} \item{ci_method}{Method for computing degrees of freedom for confidence intervals (CI) and the related p-values. Allowed are following options (which vary depending on the model class): \code{"residual"}, \code{"normal"}, \code{"likelihood"}, \code{"satterthwaite"}, \code{"kenward"}, \code{"wald"}, \code{"profile"}, \code{"boot"}, \code{"uniroot"}, \code{"ml1"}, \code{"betwithin"}, \code{"hdi"}, \code{"quantile"}, \code{"ci"}, \code{"eti"}, \code{"si"}, \code{"bci"}, or \code{"bcai"}. See section \emph{Confidence intervals and approximation of degrees of freedom} in \code{\link[=model_parameters]{model_parameters()}} for further details. When \code{ci_method=NULL}, in most cases \code{"wald"} is used then.} \item{bootstrap}{Should estimates be based on bootstrapped model? If \code{TRUE}, then arguments of \link[=model_parameters.brmsfit]{Bayesian regressions} apply (see also \code{\link[=bootstrap_parameters]{bootstrap_parameters()}}).} \item{iterations}{The number of bootstrap replicates. This only apply in the case of bootstrapped frequentist models.} \item{standardize}{The method used for standardizing the parameters. Can be \code{NULL} (default; no standardization), \code{"refit"} (for re-fitting the model on standardized data) or one of \code{"basic"}, \code{"posthoc"}, \code{"smart"}, \code{"pseudo"}. See 'Details' in \code{\link[=standardize_parameters]{standardize_parameters()}}. \strong{Importantly}: \itemize{ \item The \code{"refit"} method does \emph{not} standardize categorical predictors (i.e. factors), which may be a different behaviour compared to other R packages (such as \strong{lm.beta}) or other software packages (like SPSS). to mimic such behaviours, either use \code{standardize="basic"} or standardize the data with \code{datawizard::standardize(force=TRUE)} \emph{before} fitting the model. \item By default, the response (dependent) variable is also standardized, \emph{if applicable}. Set \code{include_response = FALSE} to avoid standardization of the response variable. See details in \code{\link[datawizard:standardize.default]{datawizard::standardize.default()}}. \item For mixed models, when using methods other than \code{"refit"}, only the fixed effects will be standardized. \item Robust estimation (i.e., \code{vcov} set to a value other than \code{NULL}) of standardized parameters only works when \code{standardize="refit"}. }} \item{exponentiate}{Logical, indicating whether or not to exponentiate the coefficients (and related confidence intervals). This is typical for logistic regression, or more generally speaking, for models with log or logit links. It is also recommended to use \code{exponentiate = TRUE} for models with log-transformed response values. For models with a log-transformed response variable, when \code{exponentiate = TRUE}, a one-unit increase in the predictor is associated with multiplying the outcome by that predictor's coefficient. \strong{Note:} Delta-method standard errors are also computed (by multiplying the standard errors by the transformed coefficients). This is to mimic behaviour of other software packages, such as Stata, but these standard errors poorly estimate uncertainty for the transformed coefficient. The transformed confidence interval more clearly captures this uncertainty. For \code{compare_parameters()}, \code{exponentiate = "nongaussian"} will only exponentiate coefficients from non-Gaussian families.} \item{p_adjust}{String value, if not \code{NULL}, indicates the method to adjust p-values. See \code{\link[stats:p.adjust]{stats::p.adjust()}} for details. Further possible adjustment methods are \code{"tukey"}, \code{"scheffe"}, \code{"sidak"}, \code{"sup-t"}, and \code{"none"} to explicitly disable adjustment for \code{emmGrid} objects (from \strong{emmeans}). \code{"sup-t"} computes simultaneous confidence bands, also called sup-t confidence band (Montiel Olea & Plagborg-Møller, 2019).} \item{keep}{Character containing a regular expression pattern that describes the parameters that should be included (for \code{keep}) or excluded (for \code{drop}) in the returned data frame. \code{keep} may also be a named list of regular expressions. All non-matching parameters will be removed from the output. If \code{keep} is a character vector, every parameter name in the \emph{"Parameter"} column that matches the regular expression in \code{keep} will be selected from the returned data frame (and vice versa, all parameter names matching \code{drop} will be excluded). Furthermore, if \code{keep} has more than one element, these will be merged with an \code{OR} operator into a regular expression pattern like this: \code{"(one|two|three)"}. If \code{keep} is a named list of regular expression patterns, the names of the list-element should equal the column name where selection should be applied. This is useful for model objects where \code{model_parameters()} returns multiple columns with parameter components, like in \code{\link[=model_parameters.lavaan]{model_parameters.lavaan()}}. Note that the regular expression pattern should match the parameter names as they are stored in the returned data frame, which can be different from how they are printed. Inspect the \verb{$Parameter} column of the parameters table to get the exact parameter names.} \item{drop}{See \code{keep}.} \item{verbose}{Toggle warnings and messages.} \item{...}{Arguments passed to or from other methods. For instance, when \code{bootstrap = TRUE}, arguments like \code{type} or \code{parallel} are passed down to \code{bootstrap_model()}. Further non-documented arguments are: \itemize{ \item \code{digits}, \code{p_digits}, \code{ci_digits} and \code{footer_digits} to set the number of digits for the output. \code{groups} can be used to group coefficients. These arguments will be passed to the print-method, or can directly be used in \code{print()}, see documentation in \code{\link[=print.parameters_model]{print.parameters_model()}}. \item If \code{s_value = TRUE}, the p-value will be replaced by the S-value in the output (cf. \emph{Rafi and Greenland 2020}). \item \code{pd} adds an additional column with the \emph{probability of direction} (see \code{\link[bayestestR:p_direction]{bayestestR::p_direction()}} for details). Furthermore, see 'Examples' for this function. \item For developers, whose interest mainly is to get a "tidy" data frame of model summaries, it is recommended to set \code{pretty_names = FALSE} to speed up computation of the summary table. }} } \value{ A data frame of indices related to the model's parameters. } \description{ Extract and compute indices and measures to describe parameters of generalized additive models (GAM(M)s). } \details{ The reporting of degrees of freedom \emph{for the spline terms} slightly differs from the output of \code{summary(model)}, for example in the case of \code{mgcv::gam()}. The \emph{estimated degrees of freedom}, column \code{edf} in the summary-output, is named \code{df} in the returned data frame, while the column \code{df_error} in the returned data frame refers to the residual degrees of freedom that are returned by \code{df.residual()}. Hence, the values in the the column \code{df_error} differ from the column \code{Ref.df} from the summary, which is intentional, as these reference degrees of freedom \dQuote{is not very interpretable} (\href{https://stat.ethz.ch/pipermail/r-help/2019-March/462135.html}{web}). } \examples{ library(parameters) if (require("mgcv")) { dat <- gamSim(1, n = 400, dist = "normal", scale = 2) model <- gam(y ~ s(x0) + s(x1) + s(x2) + s(x3), data = dat) model_parameters(model) } } \seealso{ \code{\link[insight:standardize_names]{insight::standardize_names()}} to rename columns into a consistent, standardized naming scheme. } parameters/man/figures/0000755000176200001440000000000014333155112014632 5ustar liggesusersparameters/man/figures/card.png0000644000176200001440000015251014173745737016301 0ustar liggesusersPNG  IHDR'm5o pHYs  iTXtXML:com.adobe.xmp RnIDATx{TSw0rj2+AXGy_ӏd>@ -:҂ -:҂ -:҂ -:҂ -:҂ -:҂ -:҂ -:҂ -:҂ -:҂ -:҂ -:҂ -:҂ -:҂ -:҂ -:҂ -:҂ -:҂ -:҂ -:҂ -ul|惿c{ $UcxDiADiADiADixJ MIYY^*Vm>=;$Q$SVp(b}8hke٪_˲U:4Hju@LUlU Gw@qؐVQNB@r@DiADiADiADiADiADiADiADiADiA:\Vgrt\B(1Q|NO``m~C2ru\?'JFAy9όX tTJL\"9QVg\4^Y5b@Ls;~Y !:[~:9p9:%|0G q&?{ YW _,7Eߨ/>*30J$3:SeVʲUt!* ~J|1_bVgM*W( ~1߀;%tҔ_ʲUtg_QlWc3٪lOcߘO~hꔕ*| ao/ez,[B6ߘr:[UZ*3B^ol_5A/TBH1Ob3~^e7yyp5`X:{n]bRe~Qsa_jY~H!>uS?D{$|eE/3Ĥ3V.f%Ǡsb|\oso"-Eb/ T"s H I(/ Ԣm WگĤ|}Uuo1tK8輎O #nX=:J>w{%&4NOju߁?RbRV/M,P@]YՓOm2u^3p7DW2W H:[հ*U?{}cgE=Sl%o{:[U,zT_s~e2'_sI@KS$0,1)KLhگxcզO嶉g$cO%BHQT/UW/U2u JbD3>:sj-'Gk57b֙S)ql~eoHz#ڴziJ*ru_j|vy&E)V-Oi$y R*CJHհ*aeU4r7+,GfŐS.ڑzRubU˥g [<uڨ}Å+{eټ[YKճBqW˖7yZ,qz͝C "lSxb;l#LNI|"ĤܿV+me٪lUê׺IRz;ՙS:I ~e}1oeKzRuC ?9Ϥ\Qt<E+GWV=\E) ?\hܽFéD*Vk4Ϥ0Q'6_KSR*ΜL:C39`N.,P2-KlU31~Asu`FR#%(NlH{}M2^\t!+ !wVJLʎg$d&y:+KG٪gb۰2u IoӾ.wx"畴.l_j$G8[ ^{_)29PgN9!-9lx&JLʿVVODzNIy|&++)cwNzS6 L װ*U& ^8!-N?cr> )o K:or%)H)I)Ϥ3:BnO7i;XQ_oxeR23V߻F!\w2Q u,>#.=R} O.G:IJW+΍bl7gOSt.Kj@> Kdr`8Q܁QgNMH:#N)j5r@.-9ꢺJuf]\RO4 x0FP +1)尌zfT/UimyJG;!ő_NOm+P|hʇC,oKFG3VY$Z,њWw_NFIr`Kձah7I1ael7LcI1Ĥ|s|'oȡOgoW'gdxldFmJ}X(30i%&>U!d\FZMݢ+=zel~"0_YގȣmK7;p~[  yo~|O1^W"/УFHm%K"#%V|t8󽹖ϐlyn'nq_)h'p`_Y&uxwSĆzի][\u1q\uLõlORv{263d%&ԝ/p_rGa|YN!) I8vyJp~|5/lb*ӣ2!* %&Iol:=#/m#5 Uگx7G簗_($'B+)_+ 5NyRpj7|zgǾ㜇yJSO9HV¿3}c>YU!FGE\9V= C5LIZ㏀aώ84'UWY^MX~2:=W*'l ?WRO[eZvll_) ^&d6%vsR8}e嶑EI|ޡ{=9̤>#SW΁HNG!'Y\" +#w_͟Hfdh$Qb~CR-9I핋 $xQ{eEC眜DIH@@. 3+,3kU/Mz6Xf5P\k%~ar~gTb]\ QO8Fr4<`qb|xd 藪G*? sz-ί̓N>p~VܯWz$]t{y$cҍ=zHt0 ^<_>Vq9_l/KLJy~Gm|'3=&[YΜ?F;گ َOBH[.LJzeepWhE!t$jYcm*MJp>S Mbr#t^/IpQ7Othd[^+ggگxc(z4r@Ժqmt <@"W<~:&*[bY>!<).F!#̴`_" r=Sr+1)9%g52kd17.ڢ357aˁLsxow$v_rAcxeۯx6{/Vc<  f\?!ߗbqu}ہҀ{,'QGLy5MBIqÏLs/:廋E0(\B*X\O[|f \WIueɣlF:\#1~ @2v):~;JoͶu2Znlmʻ:Hƻ<4Χer`\c l#1Ə~ ,[Xcf%8be*r1[yGK?\߽SFqixPV_˗ Jklӥ?uz1$ổGXdM{xE%f}vw#\iə(H\cEΟ88ɊGD^2_)r՟x &^Fh}ًrVk ?Byl9:%BHk%!7$Vh }wY5|5+<Vbx1oܲ 2TN.ě35 t "h&ȵU 4YZ$/Jb4G_5ѵ_u` +SN؂B2e)MdؤrS-M\ؾr'8'm`_YM$q͙IX(+WP\1߁:"'K92)+ TU_ʓmkgǟLzzo}ta@ԑ Fmc()uS[MxI W$cN0- [>!M{`D BnaU*GysF[}c/{ۯI( !sh + FM &U<8oXz, bkoTOI (mRI5!ru7i~L S>H DSYKM:rlHr6\Q8Ӈ|4R  UJHʂdyb@RNO#ׁfX]R58- D:mRD`ـ,V"ПĆ481/ xeHx+d00 %g;jo(\Eճ-QJ􅩕lAR!c^Y@Imۤ]F4`W}34,P)_;N=F: ".L;c>#⾲+ %'@Qe7 KLOmd& zuDY≞W(pQLCԑ r| S_^_k}\6pk7ɅW/h(I_w̧(*L-nEFqڪ\"ާ)ASחzi wݫ]Wܰ0)bqǟjh @[N옫2']IٰA|#AX,5k2p3W! :!+ Hx<l7y~zGId:3ϮobYW$9 }H+IhvW;?x/%=Ձf\+ (ǂN:^~egm_ lU2$mB!輲@Qz+M&2r>8#YB D(mIxÝv>r [ϹDP3qW8TML1g;Gfiy+@Kx4$clĖ'3W7^N""W}&Ȱ,7惤v{ᕕۗN+v!yj~1lOy9*0/<^b8=WiyOڭ\N:".>|_y6[xp)v)`cW :l~NISzĤL@!GŸȀ9┘<(`LZWVVWX qS3W81 Nr:Be*ٮ .5Od!+wP/:{F(W6^yWǔGIqSQ\R"A5ؕvx|쵳n)VVs?5cx|m] 18RG>^R%X_%&e+< *{>L"DTXQlGJGBQ$+OR0@?\z4X ?. #p~OFbχJV-t9$8:TIƥoOG6>цUR /A\x FooͲ)5I!A77%@x3HBq=e˯_z>I-)X]'0ru <W<:$69lz;f%݋UdR{Oq?X&SuMקd%k&؟\5:s ͌#^B R)]/Rqg9x}ѣGruޝLZI*v([/>r.P簗GU-&ԝ9ojuz $jd/'JHT޾<[6*+mͦfUQ8dc^i$?goae*dD\! lao4Ģ|KQ\++s4r>uקqf{o/[!ϥb!\Ygӑ(V6?dcFZxDܔ1oW(-oSޢrg^Y)(.VOE&7v~C Vc!ZM?c%TxtUcGV6͵3:B6?Xs9;2pDfy\R8곝K8CP^ؿV 2V~e"\%`w%/xo>ROU*WIky$9~l&e\(fXTд5o8y-@G ]PIyɴd<%ƪCcǟL0k-(uG /WSufA!G簗wEW^?Æ` xeNqK?)v1_vӼO4/:G½qLQ+1)|fJ&cUa/丹^8!- \LՙSx _\} xH(\}]@^'Yr\3Ht6H`00xeoql`/[9s|Η z<SNO@H9\NLzF4f^^"^_iD֛k5r_4JLJ6' 0^M"hNlHgutI&yJ|c  ^`ŁSښMEpTZ,ssVFy@Km/Ԙ3v䗱f%BKPe㙴F:k4/ CjFSߐvbC42p%N#u,[- D*SN{|C&a3qޭV2-a3c^Yi g;#nqTVhEOrFq*h[hxr_/ꨍbQ0Ij Ir_RkBΜL_יSEPzUY ˹bp~g%]Rv!++8ש4tQ+ȁ.(F:k44eĤ,*1)Je*W68imyJuߟa)zd!T)+KLͧg.kgݢ4#WV")suU|eY Wg9韨\aejTpw^na}7 uDy;J(Ǖ%& B Nl~l ]rT`?ճ)βX ?>1z,[EyҎـ?J9d%&e0' xe5jn}~p`:٪]Fگ̕"'\x 'TbRSxzB(1)uƐQ_zMQT/UGysɌ]suwtk`O+IRXNW pE+8˂\Y@ߘoD8_dWɰс `ճ0Z`XQx#Q'Ex/~NT~%z"GWU+ crAW!t7N'uקKUfT>H섟7x W tWB{eN5g6`'F%z.gZ,s !$gfllr؁󳕋ղ}e_Y?ftU@jvoRN(+)!u!qz|OĪ(ʲUuxȯ0Icu'n_n4!YM]$B9~BuLί,޾Hmp`M!nT!Gz{DmS9p8I,c͟\M%W&gK?$ Wx8=s ,Ur.'?M zrn{o=ICn4XΕU8c1lftJ;PFsj'ٺAl+LtBLnOE4N*ʜN冼2v:+2r9*I4OvZ;+G[OL_6EsrDOrIe%?mKr"WEANJRQW۲:7+srh>r |χZ,srWMVՋ_%jNZsr$jsW{y;9>H,W;:M-]n"/ɷ;^%W[,gxg阥q1ڨ#.NjP"verZHvOښM/>B}d7M|:6=ŲQvgm.9aouNRGMbjKu4`W}YaoG?~e.&D|?:l_Ysю$3dUtr4q^ ήlQDO#D q 7=6QZ[nTOˁLc:;hVx N簷s&0Ugt<:|d^YJNz{sz8lmQ@^#77&y<'Ӣ])1_٪ULa+s헽IubՖ  ^Ұ*5 Kr7HV¿Le5I6V& ؘW67 V z~-ʒXIڐsz8?[4,P#l+sþخxp.{êTx#\V/K+_KJ*1)̩yz`x$+sW* ԿZ" t^Xfe9Jg[ENbWS\:=&:)[yjxnv,?E 9>~_| BiiiFFF%W?W%&eҔu{U隷o,Fg/{VX~;qNQT-\N|d6.7Bzi/ Te?D2~śT ^ꔕ*Q>Dh~Qxvπ:nߘSxkوW=$enR|u Q˪ ܷ*M8gk(!bŎɑdfww7oEirͩk <*ls,[x9`-a%vW֙Sp~96~NO2bkիsTe*m(o n_j BUj! d~'1|g#ؐWY>5lUI:[UE\iKʵE4oG b=/sUU f%\B>!q|%8VJ?):svl4/jC)#>7N!baPtĕ{=3qcjٰIliF= M?IIYO6F51/QC(ݾL{yr؞}lU2ӽmӽmڢrykb}88b 2rW !Ge$X\:|mpHͳVwG6~$P[odz₨P\R-CM{k~j%*USo;"/tҴ9xv~ c wr2E8VBKdƧңv<.xc>똏qJ(J1L|ujrq;92VeԚou@q9dU1*.Jq=v-]];܃X򵚼Zs]츹Ǚ>`D @rOw*v(sL!x. !4x?2q#Vnԛ祥gP̺Mzupp\?y~ O%:I,R m.('q޷E)ڐPj=1|Gy&n˃Y^e*г1(o*7==v#ga_V DduoneqV㾽oߩrܤGSАhG6EgaM ҫX2U[L)X'$:tZqvCҐM"$5QX_uuWԅή7wX!dŦKX<Z`3K!bI+̷ xTɑ*V_[*3`-YF{`9*+R';(!.̲Cv`pF|tvݷF^Gt! H搣z}z.bu"Vh(~ E: :B](!ZePs)OzUF$:ѦliJrYVpuvn/7n~:)bL["2% $+!s @X(v|t"ҡ@܀jrآr Ɗ:8]r~`M)sz~ˉ:pk?>wm&ֱI&C춉{&>* @U,Bp?u2\3hc@!lt45rm~вkb4V/m}(9L8ۃcَ;tcuXw<IxC⢔3Hrc)G]`r#k5-%\RT$,;׃ 1 o7ܞg-WU\. q)l $Rl'ex?ftw\+Ԫ*f_A9J)]pǾoe7:By C'x D19:?p(4-MmcNVC#rDԖ,S|nP[[2px}Zt"q{FⲉMeӽmήÜ Dbr;dێoӗOht}m86/O\id&9/E|Pqī={v?7n2<}[#8Q#ƐBq4bRAb3ܨ;N.vw98`GK/1s0Ýv鲭^_ wU{Kȓo֥ukJYVx'G. QA7ɥ 9ק(Go}B`Dﱈٶ~_[L; (6CZc~)7:/^k.GvWFFCDZq[tƖy+x4AF>84U?rS+} )h4hBq|Lq {;6\;"F]UcZ5ϥr`́GpS)8G /g*!; r&DvGrjm5a1BIo~;ۖ? 9 :'dQȐ'Sf$08y ).V|uWE)C$+T;NP\!qcƛXN,Sqtձ 9gBBӍn4!q ig;)U\lQِ_6 %pv?{rs珝]ܡb37XVHJKKEQWWZ" b G1deRD?<A@BIB.T'; cj!g38Fye !☎V!Bnr vx!dP_AM9 A@R\NAmP:x Cهf8l[C5yM^]QQ!twws@T{MQiɫ¢C lrˊOQ\Fww6yq!ԧp|*O F)(-IJ%+ӜrŋKKK?tSO=F!9bjq&cg @@}YyCF*..g 96,cy,sxFUt{NxqζFltD͑.[T!O!0GV/ y7vD@$CruRs 9BPBH*X|H Ў"%rӜrQGĦUMEytL#c; sdUlC xQ/F]zCk+kca񽁼Uk?2_!_i.v2{GaΗ?y}eo۩tW9Bήg#-C-D +9/p:bUFUG۷;v5#Q@@"S|u.^.uȁUt79o2ϙK YĦ@nmsC4ԩS. j›`=v}(Tee[r= iş(U>i1m)+o<=[R'Md)fRԪ*fÐ :+ȑWq̬K/@bR\:l'寊R 9jm}<*r{gVCYe:*.7CR;:Sr,%f֥71bs:9%w9C7of)~2=ԃ 5!4Cw~5KD$ۗv9RښڧC׎}A7wȁ0_V n 9zXKi)ym؉ Ɯl߾ LBe@Φ{q\ w[KZe֥lhrė’s@֏O 'Øqs*`Hói)jLnj.)Q 7qĐBFc__6K~8 rx}#3nb9p!$@s_pغ*xF%W;ksD6=ҁ$:ǘj9#B ծ;? VVm< p|V}< ٱjt΂[StMrF !!b1!T)!xZJV>;̺KC(m!u=׬AD a).V|{?7##= ف%+72dp"Qg/㹆Qg}+ο}|.r 6Qin:WxscߵQU$2k{ :@).B11c(oU`U?W/+N6P&7[ţiqAl)!Fȁ6<]Dc'F[k5yZYE7~:uJ`(O& E jYwB7K7|#s q LFf5G\r9@w7=v.H!Bq jXkwl|mƼ4UF}Q>̢={D)uGE,Dp2UQg0?-UW iq|׼ R$ۗN)YR:É1ɹR|ܴȾO.%2HH; M.> ۦ5׽ D yŧ;f^|Nuph=84kѭq#E)9YMj7ZOLSݾlU Ӻ}YNiZ 2%bȁr]XG.% HSSSO=w^Q%Teɣǩrym s '?nT歮Rq_.'DeEb&Ap  ͶCSVVhHZe6'KsM: HewC0TyO9K}fVh6|B H( }+zPND سgs/]Ԫ-O>UZ5ן`wrDQWtU/>t}{۝z9]eECQ;DEUg\zv<|88xh=1Myl{#0T֏"D\Av B-na^ePcNHr`7ܞmƲ OfpFwwwƐ\3U,gyp C/5׬-&׬]R}u<@CS֏b 'QA {o2<"Ҫ(Vn=1zb=pv{x9><Ź*CzL\1: cFǽ';\tWDF 9BeсYM,C7644 _~nN84rp))t+/^r yV3p# Jrx}=vgLDIۛZf2͠-*O[R-)QC/+> jm&r<84+N"18jph뿾qBqQ^đBhtkY|:B٪2jˆܪ72O<] r:1oN n?C_K*7E\)-ٟ{^xqKKKFFFN:}SX}TZEwNz饗8& 'tYW~h}#ȹC#nO&$>)%:@U aStv؋R>iQpmDOn ;gnLҎӓu{5MvlO{\q4Z,rp)+Y!MȁrDNrR*+fC2@mܸqƍ---Nw63™>O`ӷjGa̺4͡[j,{{v# :z~^U5~A⯇ëVh*Q>iם'ocvд5G݅G ⢔⢔Md#oUOo ͽ֏:%~8MhRˣXAɉ^6txt68-FvԪE{7nXQQ:7n~={Z[[|lܖQ|lr𨐠ǚ n=6Ag#nϙ {ĔMτN8;ͯ qe!/>w踯r !o ͵sVh~no;:M m[_5 igA.dI++'!484WY5`Bi f}S6myvڵ}v}}F)&ֲgԪ撂K|GwC7od % jL9-uLx'G&>:Qaݶ'9և:@`~;7>St7ܞC7oY HZ4twm&8tw[w-gHQ- ^-r"zh;9]&mQy>-wHfu8dqz}:YS޸Ǯ);jm37v] m.έz*n~pgާҋRjmɚۍ{Ꮌ=!׊sq:n_V7٪{crB!bWG88I)'[) F`xocߵ-Y ޥ2eqr]: %BN KFܞ3=SNǜO{gt'v׽w\E+@j1@wK Y|dؾ \A.q]d[Ͽ2__u \ȁgo}_Heښz~жfSO\_N'nuz#cÿ1<[ ޒpy҇]QGCp)ҹq[x@@--- i0$ 7a84BєՔu; r>h?'4nc1qw͛(DL~f/nݍ:ϸp)|!84Wi^T+[~M^|NGlґ⢔{vdR^|NmYwQW|l=yʐ&X\!apȱ`lz)ubؽ))8>܈⒑qFn׮]ܓdݒewb#1ͺ,rx}̄GŎ}_"wG6HTjFGnaph.E.?; inl:᪩t]ϿwOWHEMuxtEK״ Dgy?tp=_NJ 9 +9,O9-oP3 7nH9ږj2eZMSQ~ȅ7:H}=(L̠Vd>C =5ӽm!gt;!Q4!BG]wr~2/!rBW߅#o'㍃G֎GZ6.i([~__~g-vc^|9}A1X"3>b M(pr#Gk*$µR.*Xmm-eސ̱LHXn׍=`Ud*y#+`[C.Z&?!~+@\+lVN2:.&|ӕZ{v<,[z*²_$k6=@aENz}w2g~?"A1Ug;NSBK Df{,5,|x1:!b^2ϻO#aӖ ]ezUiiiii)K/ 90N9=k 5qVm[y󇊷d|mC67{ۜ]n3ۆG D Pu0R$9Ӈ~HyM|[ɕwNjˆ/^m'CMOWf珤yYzBK~ oCB`xH^`ZDt:ɑ=p:h)ӫx7|`p̺6:\k?$Wn))XNwNr &> X:@"w6Hxݏ=င\dܑ8:BيT똏|Br').J%3rT!ё>`9B: pE EQ9VHuaP.)O;$fׯ_IOWGVh<7|9wFaTVh_@.[OLА984N ͅD5̽ly9e4Yr~OX%|CLB #Oᴨrx}!TUbﲼ2s\Z\R~QaUF:RR)Z;tlz#{wWzh%NIx6kQYƱGF/>S}{oH7OYSoË匿{k~[isո>84eO'0KV:V984r?u4MryoƝi7O7EE\[on8>URWQcP]^ߡ[t7Y9xc&$K w:B(Ƹ{!N7B{5X9~z[駫~28j=1Ķ+xXxOtH@&ʼn"uU$oO]3!ӽ n{cw(:ytׯ3w*>Tu~16ZrH9?>w[}nF2h=1=84swg}( /T^^~ƅ{VVh_|N=Ӹ[BMO~}]F5/J*lz[ Mt\"+:3w2{k>r QrwZu"Vh،pO M~8k:f=WIW؉}ã,f;nTUqۢ*QB(//--t?̦o|rƈ-!T9ʬK'=4rv,SMV&2ز.21ev  ܨ7Kߖwosl|亇z{H  k G,ՋuZ|{G.cqQv {PSoro#424\87=VhVhphx lUNvӼQg׋_|fphqv<2z$ ұ">$&G`FXAN0 q.v86F6N {DŽܨQC["t$nq!(8XɭfQmC7o=Bmmf]:ޓI:8X:tN/!GLpo;ʍ:t.] #%F}=yyZMQߕ}V) "@DuR#pN(z*}ʝ<ɚMe+4ڲG=]Pݽsegi?V5#G8 EؿIp>X߅ٲ 'd^=[=G>q8; ,13˼rx}t)<؝ٝ $+:"޽)qsG!['<8l+.J_{Mv<\ sM޷8(.J8 0;84wa pUqQ{o_$5:exvSn꽷ۮ !qmS]a!mj"DSx {5bq%Tr*E E:/^,N0U-ؒerx; (r,9 ב3ñǎ;6>q[F,eߘpx}ׂTe9=vciE)7ˍiGp#Ņ:;׽D@ިJC#Rve!ԸǞm—}184۴ˈўp}qUi^ps9\zb:<6UVh#q޷:ݵdw| ->0+':V!N`2̝^`+Cf6W=SN|ơx2tlvf\R\Z;> l+hmmfsM.`4.G F̺tUF]Qwr؉Kӓcw؝d(chZ4.1m` 2FLcwcyОˌْ>*lw OuS\D(B9lnnc'sJ#ZzbB[i^qQ X;C+-BReR7OҊI N<w]pt!Gpn6kg'qC}gaޏE1G.!GmmmKK Bhjj/f9J=kmய; sB2Թg."ry~ALj\;pt̺t\twx;c8CMK vx}1, jՑbjn/ٌ=cߟ-jpQ\0@ 搩pN[UYrFN V7V)872 똯l&.Brpzwe+4wCw7aqȱ`/9Y*\w>:v-tyn=e+4UVN]u_eO4NCsCs1w)RqEGxUg4wNvZO7%+I?d/ďx(Ebɏ3W{2?Wsx}t 3ؽ{wdݽ}v̫cGaCV UFx5Z%™x!AO5]x% mŽ)TŽœ;Awf;9ؑ`UD 9`\x9=x*}_딕Zܽ*'Ku#2< ZUVhҎq_Ɏx!Dv͒&l=1MkB6!1NzNʳCqkC73JKKwQ***۷ӥ-SyZ͖,C^srjru2 !-oa΃F<Ld\R}:|fGANưIA@Η u\\ܠV f:?H!1Q#{&?j7_YKgnv jLd9'7n|ꩧ֔ԪXEE/*;+NՈXG R>]*W#aP-:#%.i5 o|b(-Ym}ہGs-U} kZ A+߾RE 8ewo#VKj9j`QH.E);_6|0:fHqQ >݋~uhp]fdht]>{8ra֥)VPnԱQLj~ahiiyG/?3ao jUӒXQww7Ànx5J0g=*Z'yE.~ QQ31.Ơt(ׁ_NfH>q7 > 2$bȁD46ЎdfcDՁr=Q[[{঺c^e2҅4jϞ= % 8.B$F jŐ'_aŎ'9`Q?d(B'GǽM}%d(`i$'R*[LGצyZ b" B#߅\RZZ*ofg&8ɾ( Um蘻"^um66(IRfNDiH V(́`Ҙ>(fd|Dckcx"gt$g\u20Ad[r4|XEѱ098Wd?h8}T^ցk\Crҩ4)g8D ՞ ":@\mpqh1: ܋'l D/ʝQ%9BD1_Ԫ|vBSʍzGPxa:p-d0vu2.K;p߷\ jUs ccyy # A\@XO֗')a)8nゼ!+U.xM M1|6-dܓ^ N_^QQQ!|Fzrtmcjjyܨg=[= %7NqxDFR ^rQ!CF [-X&Eȁrtf-n[ļ;%DYMv~[4q[x)ņ Uc{,4Wf]:;'cBNpQr"m²8D^QǎœZ[ ">9q}xțU6LWqGSpd!jϬB-Y&ϰ4 iXPAٽjƍ\Aww7<>&N!D}]+k5 ;y};@TI gULGLfl>QHCsv߳$k<&#>(t۽jjjꩧՇ"%lvokD蠬e}ã8hmɠV}P1Jf]XC!ι@,es-<{ph.B5w{ο8Pֺl.xIkm X0boCN( i|WRD2}ٶ1ێʍQ摓z)ʵE g j N/EzLs>˴KqB^c'x?8g=.9r0Z`26Fdx?Q2;aM(xX6dS'\?4t Q{ҕsik֥h.zA/(сIB mQ6 Aa%;m' @4A7C6b,(g24@1O idPФUkIKBDW΁_&*7n`u^FtK !(:xvֶE !M@ rXD+E+B M G <} \L!\;ڿR8dK}\:t"UGPQnԳdvEXVNsI7!6ʯNC+d wI] z'u <u|VQ+r3ݝF~`:!^jݙl2ub IGHMKwESlc! !S* U<:itdp-Nww!{>}M)YJ!7PGӟ^XPľF 'N!]9֘ltv>U(́Tj<9jr+]eȭB(g/~͍2Jm:A1F]m.J@<1fz;222Vp^V~my?$dgUCeBzE ={fbxeSqV z. ;JHO6@rx}[V :@< <}eͷɟD xG ,wJG$2s)*Sn0{KFG۸Y.R ʣߺjߵч*FJZSbRww7C݇^_۸/(<,XGn֥ Y(X !=v糖?>wc2 |+:pF3tLz01r 92{ 9=t:և!;|c 2 60;9p!Zu¼$⊪}2S3q{zę ;d*JSSSׯ_gNyZ-GYko=8&۔%txm^iserإm5 A_ o[xQ1ԅv)vE4@:d]`ɺh?4G7:d5<άK\׃ au2Ojnkv!z섅^uuuC|F"rs2\6r gߵ}Fdj2f{[L3 :|֙ XZ8518ĩbBB/|,h4 hPGW#~D`Y o%Z.JGUr[Vu% y7n`s?SSS۷ogsx#VZ$i5MEyMEyx#:)_y!Ƹ>ڔMpxLDeddڵŋt)*ז1g!\,Wy)a<@\G䧥FHx0Utr|Ȁa=,q=mZ84"$XxqKK7|C&!##xKNwQ߸qٳgϞ=+bȁ CebM.u 2OuQEBJ;8055%փ#$Yur~^yKP?:7nܵk K!6nH7Nn[vuP0=vg92uxUn#GצRW~:9|jj6~4yiƬK7Լ4y^zV#b= F:`ytL؏ۤ XxqCCCmm-T\mذ.蘰$J zm+C+WJc''P+8Bk5UmyYk:uK/XltTe?4/uZ C@Vn B߼E*7Jt!p G]=ﻠ-7ܳO;6ncyg-Wy(kv.0C:#nϾ߾e1<<|٨E@#nցk%.9 3C:{NP阰wLˍ9+ ۷3otvFM[2U!:6RL%C0{G蒬$m%;q-~$i^@9ݖzij!'{-Y|6-:) IVCL8t+<7MiitFR@Q<Fܞ3# Yp1o>mg&'|cF=]K͈!f~wHȅ Ea!\mcҕm nxb)g۸MSj +uan&䨭%OMMS 7qx}C#=vy8_ ~wbW\q_?*'PQi؝tˍZyI/fƮ]tj5 wՄD)mSxܨ[̕u_= VNƍ?9W !?0 X0° D@1z OqeVZZ:<<|B"p:!+Gq,$PAQ3AʲNd[-ѷ6YrʭDDL`N{)7 rWn ϳbd֥lt!屏@:&n~'JU0Mq)qYmty+d7A9c +;ơ QeEGVr\S0'd#usҗ2?˟< zk{FDŽ:pMHȑ#*8Q20o5ظr.Ų쇾i58uZMs& N}dŇ%~(̑  zA~yk8}Fb5 Qbjj*|, BȠVq5|0}Icb*ӈÏ*޶hl ~̺m61HF*:ݼ߾W;3}DCDu[]3/̥Vtd KU-Yȭ+s i@(7ʍ̄oZUnԛui8>A:Z͇%Q<@ YU53PA@@Q%!B6:BzoME! S.Ci5-m Fܞ3=SN CѕyFYNWzmC#B j Yx'o=/'uhqWkk+FB[nޢ[#@?yZMnlP6|f.ܨ3ϻeA__TdP>XFݗ?ymƻCqjrgк Q8@yp,sY.GMEir`-ܖw?^j5Y3v\g՜0/_ųVw͛w?mgR4E2SYT"@[>J;J[N1#vzq7;g3F?]ȕ:BA4fq.Giuӌ]HmQP.9B*6)R(&O>pe7\CapRX0<"܌}5uB ewTkL1pXxW__2p8rPuAZ |fW>MK#BL5|QY/wdk]r}cdbX[Wk<:,y~*d.?2|>:8_;+t!jLSwV @94;l]g!++qU3k&rvϏ h'5S,';=f=Ƽ%_1L\9{OGmo;7IC7[=L&bgk_H*bl&ם;w}֭[n*ZN,v יiOGսթvz}79^4us(;>\&SmrD #S;G\ɀ0Ƞ^ do왦LeeonVkosy}Es:Twa6KKK%53֊w42p<#,NZd>z^43R*/0?̺~V:WU2`sE.x!^\ݛIlE73c2rZ='R;GP7Ρ):TQFI7o,}s52ۿ9ޕg8[K͏ ZZLg[#+u>ʛ.S(6? SisyXlj.>RVR~3$}\CaK䨃h/쾨2@崺Vɀ\ss%sX8ԈjJ%˫aUn2v@3f['q+U,PW_|be Źm§{|\"IѲo߳yr7RZ/ͻLW;*t(Z6g`Xl?rU5"r3%o=cQ!D{:#ڻW5e_kYcwUwj'~5ZL|wmQ9H1S}{'_mo.C_}_WG_$iwwNBzorvM,;ʁ||Cbvuiu]Ca*@S9 P`F#%S3n޼Y!{#+`?52}ٞ|o90{Pɭxn-I=TsArHL.;N{kGZH,v"~318K9#(O !@쾐7 }!/q=?Cq>}/r9Î9@_4ghWBqnsۊ |k.Y !JX9=`?:p^2Т8a %?޼yիL9 7?vOzg:dmZ;^uCVBq1 x-!aC'Mzǧi H,wb 9r\^h$\6ON!J1T3H,wby%yƢRrZM!gwe ŸVfic8þ Sy~BPǗ;z^cLXH^)4d.hٸw;9&t&)wT2y=E~vڧ2ek-ۨQ @H &?1#c#Ny b!o@UH yǯ0̋zR9#(jL}D:44{>FIԑ)6)w.`0Dk(FW9.Lq)wNe3pe?m uuAC"o<91WEl.l|o }!GP4Rgsy{ƢYQh "УHlq&UU&b=cQ*H1c%p=v?osyěDh@|jL82}?oz8g_CaKw0@O{p!Sx^2?7U>i"6: P9l֫nmnO*qx],B>$IrZ5w3(G/q\[KV>ISUHGMj>ðjKSUHڊrF._y%hjKAVj)w$VnQUU%GPT Rvc!0q'BU핕jOV8Um8=cQc*Zv>;̑)&߾QFw|fHڐQ9Nm;8s#+w{sG>@BsVwSw9|U} &Mc+v_Ȣ=R8zJѲ]B,vhrud.QY?^Um=XIlU-S(ZV%e ř[#+-G#go*DlEÏOM\܄|7!j\ܞIlhsyX4Rezǧ~v|KCVU-9A]@;&bw铌<6u7a,o!8:34ܱ#j32~%Lm=qskqWQc8:?ff9+!廉\>r+7:=,Xt쾐ҼE$& tpj WVV/*#] \ZEki{Ԏ}W 9X2/sFm.Y(RN{}6/"r}nO=@,vSpvϏ %bLJ_O>0NgZR3u7ڱ]"~ȠeBѲo38w] iv^cb?z3~w@xZes?mKfiOK_pu !ֲ9Dj;-PЉk?w˗oS5 70d.کoUk'4 @/O_/EKM~JWT@`huС_p 3_?JDR2WMxd /8m\~=\f~vyG=}zT3 GJƹq_9{J\56\o0JC]!D/%O>'sս9W?:݀[O mw-W,h({ܱil8-*wɞ’grvd. S((5EnVR _TMk"j'yJF<'60Ѽ7Nw͏ 󸯭%fDZTAѲSpڪI-::Bq&54X/]9{&,y@ uqUсE[~)68@H`Lb}uET5Vha*a0A2IlY=ho2৸LgjKpvGR4wt! u2B*}=c«W6嗸&0WTK-qH`LVOi{6R/aPLWj\|j}=n-{ܜ UMѲ~V>5^3n-HxBQѲB1jBWɃ[q\: :!y͕ @7zp> l6G`-Rk:XZ"uHE`-Rk:XZ"uf/.IENDB`parameters/man/figures/unconditional_interpretation.png0000644000176200001440000006440314333154760023354 0ustar liggesusersPNG  IHDRw^_3sRGBgAMA aPLTE " 2& 3&#,-1%%%778P6s A0Q11p70cFOH:mL2Ya>mk9- Q.R28K4k.;qM7Jo8OM>j4GR6MjyLUkWUtnpyihzxxE_Rolx_qwy|{XmyzVre[qVnWtzᐕϒɌٖ͒曙ˬȪڷƷع鬕⻅Ϝ䝢ʳ٤ɻ׻覨ƻŵǻÚÚΔċƗ˯ǩǷ˵ոͱ˧ӫקع۶溋Ũχɗ٫̈㢻Úڳнæۺޑ pHYsodeIDATx^ @g.{vvy{ٕk pm[ZDU*ݳ**k=n$"FԄTEhEX-(ڦj^#bHD@ȼL&P*L&_><BE SS\SS\SS\SS\SS\SS\SS\SФ(dR@q  "\WM `B7a~0! eN{`?d`T` cà`B! 3lp' 2N;No0D Yo@(0fBp#)|?d4  l "2^jPP?,Y)~kd.zyr?i.X- l6G$s+|F?X?ds +ً@SԵ.;m"3s.<=5 a yOvJFfl?vi6;'Ri +K>Ùd2 V\#Z+%ßM ʈ@+ދDb c73Yن[^*XƓ:zh"p{r7\*oox42 !+pogg7 ^) IswGr᥂Q eAK@+s&H prDUB3w^/0 . `w,Xa'\wKu@l+/S!XF.KP+a)}_!Nídž-0;J˧/:4o rg&pРA?í0CyLNΡȊ_ߤ 8; K{~ꯆ=tۃ@u=c~<6~_~ @;ӼɸlڥS󡿼o.N8`"!I/q)8\}o?S!(+\Zn`9&rX܁Aj4;Xص?7N_w@֫S[_3s$7s'Ov=%쐿R8 '\ɿp;P`8l;XܵN~fX:0'}"GCK#y-wjNС+%)َ%AfZzA\T;ЯCʝ ̭vICN@syQɠ&ND 5Nsw;&܄vFdPlgRJoN%'T A܉<ywB Oᇔ>ll$83u<6 ^ǝv / szw0ȎCVm 5?_˸p~C˸UjA;7|F.,F7P3Wqqɡ|;Yp`ѵHYg_)ь)U엶e^s{ifIncHĞNBlV]6Q\pTDJw"3\~iKI_FFbCo2#Df'].ѪVYN&2X/Դm} 77"p35L'΅O>A*6e %m r~7ZmVVr#ymQKnhc77lFZGgfLd] oF>9v\ӭ28`Hn K%f!&yG, |嶏mw|u["o̲R@=;pM'g>Am×AJw5gi0(wJ[~{^YRD@s9G qLMGve[W`fqn۠Kr18rjo&_+f|<> yhl.,=ݼ8~;:6fMl>oV bXWh^(`)7 Q5ɾD j ΃5 rl?R2\Xw yr ]u0r$/$N_^k]uJ) қ{3pu?fNb w_7onxrþ̱3n.F/Eg72FӘ~#)UnJ(ϔkuڷҞ3=d31ȏV ,bϱQU` ypovU`]rsךݪUKڰqi}s6vqH9y都Ew[A? 2|W;qLf/mK5_w(n=88mK};\8s4fÚ[ݱzw\?}s?%wUM!h.Vuڊ VQ* )kXlۂfC>2Vd%\A&e_m,M!;4FvYׄ(7>wQͽbDBUh)D>Tm #oUy䗫l$f,Zpmsr:pf5=7+ڑn3[HMBVطڢo#E噚d(\Kp)}G &%U!|O@ܹM]0wA]rw0^Wm~6mɢD9oVbdPć)R9L\5~6jEZ„^#Rmrl[p!(#wIR/( nXP)AT$DDlЗ/ֱ\ƒ'wt%#/¡'qY%DٙK̶ˆ'yw$ #Lݛo-\ko;±WB|1৶-(Da=zqvlG-N6|Tc8eQk): 9k9>Abí[ bYFz Dui4 ۋ7%_x{pCi+`Ib@ϱExk?nVI |~Q*w9oS+׹sA6(z@pFN/4!Bi>9@ ʭ*ٰ Řsܦ?n=\ϝrѾʅ,(i &dD 1MyN@6JߏH7 wǀY|] Z#΢rxNF\l ]UiKQsJvPjYЎ/E=Nqc _Z'p\ 14ܲ$w" FJw;D@<I]I!?<װ`2$ufBa"Ve%Oc\KIڱq?Ґ}t!P5ˏsr-C+h=xEkYDCҍO #rUe`ډ05GjH8ʳ#f2oKj9ȃH;c}k]zwy$v0Jc c~] c/ȭK,Jp|Cl&2Fy;%3f^l$x3 q.,.ko#-6Co`y;C1EwgXp`'p=@GYugmEºwd=rt9$-pW\ځ\4.)5$%}gS('%Ӂ)ӗf}fX<9Yy;Yø[gZ?snӎy fŃ/+ܷ`~ʐo] ;RGOܑ%R<ǝ ߿†Q'Ypŭ\z' Q\gBЇ*EҟQnSCPǥs_Yq 1w98H(wͭ~ p7ڬ+^go86?a~vlujNPYz"EAdxw%K.P^"hƚa)3U_6Ƨ$TK8=I\UXxfa\nJrx{ID)IMIJ+B`>cܩéΡoAFֺC2]ճ^+|Q2g^˭8fG/͇rg;3ܥet1XywJV$/sW@wFmhxf0z; e|j-?pxmk?T zC>U8x=gpwCk몱{ouj'O#:} wc߲G2U˒o/|Ayt0?xU,pP\+s CvN%F,%c9_agϚU2ωɨ{5$&k ?3vLp81JNy/-fտR?㷙g?@]3/,/_jy]!˹g~,˶X*Y"Nc GM⸁33_SyU(w1Fy< ҾU]s jf1y,҈;dWfs*ɹfY"?R+%fpԨT$bՅsN~C5_DN"QCvfDcqqsW9~Ǫ+A[W1pSZ=sVo ,Y8:X,`J`<ĝ4DzKq ]e2_%It*E# e=;R竔E<  E-{; aXwխ|aG]of\s,5ȁ4⾺nyHyȱ+3>9өC6 w6BJ׳VNc쵣UE4'Ib:eVWEwlj ؛s!XJ2A= d!(a1X,m۪Z_WwnAXgWg>V`r#'ծ *JH:C=ĝE]E' $ⒶwIfv,w)3mf'\raKz&\OڎE 4ApGim~W?.ipmq'x ݳhŘ 6\O}w"1_e)IbX'~H%?Jsc^j2|Kc0e$9moUrw w/9-8s, T ,.p'2&[+_<1?u+߰ira-w w\m)}%]$1>9%1J?-9taԤ} gQ3pg%gwu`yH:tnww;lp*S={12;AEOg{?&&즤RBcJ'ij#Ī6\UFp V)F&nd\Ɗ3];wVQzzTS=bՌv:mdZKA~G&;T]v|be׽YZ>k<uI=YqDz1Gz[_9E)}q2QB`;l=N+[ ї L&'d5LKJlN~ +4@|HaJG5w-)2"ZOuDIWPhBIK;XK؊ 49;/w)XvݝŤ3*OZUSdx;\C{;ay*3{MluTgMR<6I7,I 'V\PU99=24{;w^Û#i1ϩM@`S mG1>ܑ7;;D&/sfu w?܍x hloN 3p { \s+eQWw0Ẩ򔿳U{;˹Ē96q4?QFجh)ܑ5;D *9v~$䎦M6j[K<),asG5$^PeFzz^;'ճ+pG6R<:wx|3MW38]5~A4\ t?}<8 5-6D&7Uj#H2#C<,m@D hܹ*OqwNJB[+ dgsѿ ? Q@VI#x/~QgV˄<.f!(/9ge"l6Jx!{Cz6{>?q@j?s9v?pHW,1|wԳwj70bIQqIU΢ i-4¨i~Ak%Nc W-P hߙ x+N%@9܎([jk* Ia)*FPLKXElv#6gs1ʇ^=3ۍab1jS%b%\GsH&1|PGe˝^^bV-6@vHA+.?9 mbU!UVi@{ָw;4 Ej(t؝g;Rp쮵˫#wpNtcYsg.3I) '#7d`6SÉrPw~=nRcB&;Z>IJ=eŝz#(ilf>tPFS'4$E3Odx{$noH-A>,YMSٟJ0sGgQUO=5t Os9{# r݂8tsYpќbP+V Nx?%Gi;i[/IYHy=t"w!C1_:<:D>nP-5j#23V'Ϥ,7h? =4VY~Y<&:D,-(kf+eVyۂ@ wT'NUH+1-LvO{;w {Wu3'fw0frI;Le?0$EfwJR_{ߤJ Cw7VmڨvtU:$:DZ$Pa]uXClYL[rŰԎHXuDP-,LMw406w7bjXHl%Amm5UU5ۜP7! ¢b^ů{!~BU+ &k*@m{TGYJ@mai[X\\rj-?- , Xge% Mx'd(Rΰ*:`z=*IUeBlX[[x{Ź+WTUmUʴWLi+3OpǷߐBߕO/lE໺Y pJ`E,t}}CTUf#KmWllSURه'ތ -h e}^85~d;"{;Tn~~P cA~Xf)aw@MζSAquS  x# 3iݿ"ADVT7s:ٰ*J *~>ٵg-&Չq18\DKV w nKlꨕ,F2',)5ezN%!w݁𗰕ow4=UԖ:FJ?_ {e񘌕!'f.ʨo-Xw083;v^Ыv0y;{g~~Iĸvh'4nEйԂ(U16 Y'1jl'i'VU8w`Nac:]݂l4ξB[u30p.`ǻL Կd-)Sќ6۟Ѹu,O?0I$cqks$!*GP53"zWuQ YScMJxNjT54s-m5M9GsT _ŸȈSdZ29Mka<}A{-p-)(ɨ}:Ă`cbO<& ve,8kJo޵l Xed2d+\\0^vF_ HZAy8iKB;fFfȍ^ kOIi JG )r1Yg=(ֳյuu[ؽ;-w_OXњNV<n==fsT ,=žw7 -ßDVX^x;␏K؆}EDw*JW2ڲ=EW;L lΊOԛߢQՐX *<ÝݡPζ`DپZΞԽgAiw鍞EeWzzMDҮMG %w]$o}Rx祸bO٫qj>բSw_݇rw6'**XvYcFc ,~IG2QhH<ÝހY+kOTMZ3(/j@yt!g/}|_<ܩw"y7Ӕ$$kpp(n FжZv/ݾSp8鴜dΎekH2L}U[Rdfdzf=xg~y& ɨ[+gSww]\~-鐼ڹx_k>y/;%1" 6`.@ip't v|(%;;UGΫ:;vub⹶sP~T,Q8v ِ̯:%ۖDtZ㣿J7;oUIYUgYY El4$VE޴g"-"殚`ZS;cTKO}c;TχHo#:}v2s&fWoq #4]e vnۗ~2\!H[b;OXbܹNUe/PoZݼLadހ \cDqVU,q6(rd Crd Waܙg*4wH"s&XΞKb}Y[v2>YAkVy;NEkzf@hsɺu<1 Ă9u V4j<&0{0Kç(U7b%CuW*U[qvr_ftӭMhnŸ ;REw8-pŦ )CQl)3o!I1c'.XǼh]||Zd~g֨vUӢ gCUi _ŞZZhhH׳6!NɯU+6 ){;M4@e|>~`O&LzYԷ=e#%+PϙIb\ U){[ARO`a˸J3*ٞMфgwLɸ(YaOjqwgOوBNz;8$W*ɹ Ez.5w/\]Bg>xrwiQ(w{WUpg&ѯ $PpYR6]Y 縣,,myRI^XRDžyRh'C%I`V- V^:*;RÂYq\ʒř9g ddJ2gٯIFDTU:V ζ'?J ?]Z`ҡg>V-{y ="E9(7ftYg8*R!~ZDa>ECHޖ_?ʨxtBJN='THZjlQIC|u>?xcu}FIZR>%E$3H%~MFS7EdhqV2ĈȲ=IHRĸo ̚ߘYY\;<~o2s%R<h(^iWs+*|I֧}oV2l Me' ɾNkfomusJc:V3O7f.CxNদYd3dHUdCѺAP2s}%Z s4LqI%wE%?4pW=˜cuŘ@2;Dc爪m- !A<%vxl;M[c,$p6JlfaȪAϣ!Ydʨ݂ى!f(~[(3Y-nؗ(ܝG{m}4pY؆A[ű\;A{SYl8dD~$ pe#ضE`a<˝lbM=|ށ'O}4Du1sYTd{S(_PR(wpb0Kw,ӗzw*tl%VGM=o-MuѤuy~N*XGrh^W` 25EYujb-rBQ3(̲Yei$]{ KC-ceb\Ƨ<" -sc - d.ss Ӏ.?˒Vڊ=8| Qj:cjUg]jrlW4wY-iLi'IA>Cd`]qjr"+x(f֝`!OsGb5;i{6Q<6<'0Ѳ`1{Tfj6A8wh@n'@4[S,hE4w::8"ɶ48(Pb3". =ϝ".Xfjé,K(sHW*;V'-w*m!a[Վ՜߁M`gULhsFCry;G\Iϊ _CF|5BFe;pE>*VvjC4\. #P2NjvSGDܑ X`6۶8kK,# ";mxw,hLp.x%;u;PXy_>O?z"OD4$!E6"c.oO1ŸNUD0( ;F^B, @bL8~эq.| ]M  `N;.4%dcm6r9qr܁oܿ"m0{΋C,Z q+HC"yw0?ܦBTr@v*R^'_CDZb"!wlfH 7{C a̛xywT ۔?h' m,MiVQm*@BtRp_;Val)o-"O;NVತS<3G.PqhTexxjݔ)h &P*ywX!9i2@m!r{+RsX<>pEBw$6`vS {w:XsHB2Trzw_0dd;c&$ 8[k?h|{[ܡ}P~@1!OdB hz8Di(_ Hڑ7rȡl4I!.+CIdSKÜ/;[^pkGXKCkESI 95y'wrE5)! 8 ./L@/&r0-y ̑NmEtFȕ!1ܡő2x*AwA.wy/wh?^4"sD9$^HËԣ!j3ޅ-, FlJ$.BGűY5}FF[tEy8@`vB,"Q-6vFJz-. fgGu;|>wg^6َߢu#0H>nAFQswd `o ' i:]Y-]sgnV#[aɟox 99wdWrM*\4] wpBi>mS[ۋ\,+9#dp.-l_ 9&2;$w zuG6oFŷDVd)rjӟڻ "3j Hg)CʤRX~A$>:^̞.pln[ޚW2]Fi@r;2۸qo͝[d*lF.dkp%ncMPg 麻E ioμ},WU[P/ίh""&b V9kGV !&Z r ܁KN3}#RwDB?JmG8{d ;:Nҿ L^ѿKMF pҊ;abKnѦt"i[ ȅ!,Ϲ݅h=Qwi-kf tjcph;} x(ax.u)ݴhGHE8{8I_Fvx@_Rd8ZWhٻG<`G@GZ3klmHĮ6; Pe wS tfFʅcnɝڴZ3}E` #wc}L@y[bv';] x<~R~g ӵ-ardNKȝI#DA΢' |7{6_˺ɉ/B抜-Oo:-ƍBu=XÞ Eq5Fe| eq\r*:q ;c8륒 l!dR,C, :-;:r+h;.u͆xnƌlj˻Ć.%`dL!e)3qYYpwb&|I!X#3 dw p!vGpuwmS1ur&`me4r>0 ZysLVeR/͊8*=;>XrgM9w}VMp- ƮHIA.!TNI]&QBi 5 TXLI;A8L;v&)%  ;MavoZ`]!]i|.edsNJ"BJnYpXcge+@ 3-rm mP?/iCO#2Ug4 TBM& f%dAl\nO8򿬜 鋇V#ٽӡEQ(OL>P$LVE(ֿЄZOҝFd[ \* B>/ DrUC8uYp1_VSe0jÐ8 P봳~zи$:仴- ;C#u{/(, w}Pm#'#rWDB,~RNT你i7R8 F&?/ 0Irf|R:fL=SSOws$:rʷao{ 9!8LlG:CyWx/p] W{OwȂ;)Sl Ҳ~ھ;a ݓw]G?)؝>4]-0]Z;,hؗ9XTU^>쮸[! ]!ͫ8#nBԗu\V.irw e͹gkr#Jjm/nmr^˝~!"Yr7+,ɭOuQhoN_6#Qۇ/GCеuOԬ3B!(ii d3RgE0>F*ڏl֖#]ƀdg!JM;>~oUҷs,5n&T7,۪?uv۷2&]9>nnw.&#7O7ɒ;Ds^u3廇ZV6zMm2}l$1Iw[wXܖ~mPkLֶe5 R<#kpp(+&姰)uxF՚݊\aOf WZd.i 5Wt%K8#C=\*Б"p/HvA{'d )N fl&Yq\́T3]>KuKNZs{?sd0m{ 6ow2ܗ*Ow-Qh"4P}2խ;MLʹr`rW?tǠ})w!cV.u#!Rʋ ˝g |Ow:V{ d`\ׇhZ!#`*p+ЇI`{Hw<TM}CڻCR¬f U+2:<#w}g9ӝ"yw/l[3 w59B,zvjS=K"dQ& soUf;dǽo |Fϩ9=lde.Y=gF{LcM4Zû^WNe8tN1zvA~?@ɷcVיs-#@dM,w05_K֢}^cIH1Eeg1~bOqUvwZXAi1#wjQKc ?͠V,hb][ OyڤGK=)TۑQ!hCݜ;2|!n>}?da{Ma>2uY WECh㒶 ⧛雑((f_>Y]tt_  hs-i/ 2SHZY9rfV'+^بfҿ6gw| ߈4COBP.rڝPm|0X;^pp{v Ph`*ź A,&m#|qcwְ6' Uk{%;>SLb ҊWkFig u?|fZx O#s9|>~iiP}Ўt2rf2I{Y\=wi 6~1qƃ؛%G#TkgAs_~<ѪakYO3T~=̠X*5kƝ7uOAPiقmC0d7HæP®޵(!HښO f-fG_5t8*bU=g͙Y ?t;%)tMvνtgm@+4lƣA*PW+ruCrG= l_ׁ^mhg7""03RA~vŕ,:'iȑO.nD $@~ڿvx !Tk1֗k)^]MFFbo8/H[ԥjpn|0cj4.!Uݘv<|C~W qjV X{7;ҟ N}W I\\~:g a{@N弈Yw-Rs]𶭥m݋%v#.)MX5dks7%Q|H_\~m+ Iu+67fn=EߞpJ2v35r8<?!AO>Fu#{hףO "j+ тZ2FG]h/C: +Tw;f?IG[˝iVGN|Vڥӌ "j+-D}ܬ|LcԌz{c`26=.S=)6£@ l?; A)Od|x-Mgz7?"әTD4#Rܽ۝p;'"Asw}]wu^.x:gc]_|1~6w"CYκ =/IONrp^.y3܀G AuˡB{G΃+!RSnA;i̧;dUnyTZ=-LJ~~{ E1;mr6.bb$w]+0͎1uB +E'z)| [*Sl5 1;mIBYu?r, ˻L=SuoK F i2}ZEN%bwZ1}fzC5]}f(nk4k]OLLp1GfXZwhGrwLw4kznŭ']ʰ8T1IZ}I'my)?yFr4F.]Ꝗo[~lw}CHP ~П;Ty j2ۈ4𖾿ϭɀZ`w.~=ܾ,A3T9_a+Ԡ|lPdu>Jb&wGܤt⬑rԵeT |lp'b0ّ,U\Mb&wH[Ue;c0RB+E;h~KtIn)7l0t YDV7-D;u>ۆyW\k96Jc Xq g Ź. %|'ݡq1%nǎu3Қ~~ 4(f c>7|Sp]T.wKoW;X+fV _?[Ѩewշ1d+0;N̍td/©U Y"(EhQ_qP>N \mz;Dv׮6ٙ~\qV6jEnÝYà7Uٌp]Vb}eo S~vߕ?B؁Ya̧vֿۭJ>pqv0#;KK~\D;:mRsܻޢALnq(A!;#w#?wH~ ;ӘOp|_d6֝b"w!MeM܁t贘ŝSG1;#sg~eau>Q; 3s1;}"sMc>SiD}ʝXG}u3qdw~)wwjnMIDt:iwN}}pT4zw&?3)Nښ %4s|5 o4z;TF7{>Z:SL ;;aCu奔3?s;P a2wyf \v W}ߙ 8C5j(._;|PlC=z!d5CHEzn>3p|Zr9 9P c,wy;L.<&BLYoqܹp.1;v2rqӸSP J>ܩWpJ0\vj 4[q1;[DWpF.cw*w@pB.Lbwv #qF_JAuN_}}l\=tLtbhM7{qW{ :%ꥹ5zŃMcwKI4I܅L-+~W7ǹ윈-zuzr)Ld2ssۥ?-.V'GtK[r?rv,7cñ=H~Iz{0ߵEu<7<*-@ٝuEC| :ժpH#;=ø3q\tsttJJ=e׆sNXY9=Ggcݎ3 |2v00!4:SxsӃú/ul~A/P3x7'LҼcՓ snw1;Xn 5:XǺш[M|0'?ϻt<5Uo|(@S[`w Zȝx 2_NɃSW@x{~XE[z޾6{h:0]XxP:ˮU3(/[Wίs8*OXU{l6SMGwpkVshuR+NúUy`IwQ?B_Ǚ+t IW>q?}zo}) @D߁}vܳpC1&$3Kh7l:w~.7g}Zj4eOY_G՝E/}}̦G{ȉzL|Qz9{ WF|pbj=Z0\Ko+ ]f-r%F~oש[=Wwn-=zsvc|,\h)w4r"cw.Qc߰>L7ʂ9ܹ޻M(g* F; 8$cs|: 95+uP fwΏ&s10;K@Sѹ>+1;;*ZPLD 疮`wFcAqArLn$rcw;)k"/ݤ?E 1CCUZEۇ{1zH w*YQ;C 7mx<34a]/ڹs<~=rvp#yӃ?S.7֠Lt|ܻ(øx=~S_nZ iSB~@{hE[r߹1;rכ~z x/w°$=p~u(qGOag=m}p u30{5Kҵ6UeUKK.ewV;Hkn#C4RmӸsmR٠ގܹ6Ƿ>+=0EǝݿD&r4Z7L@f wzq @Yq2;<8y;&sgܲ#Ckܰc2wSh=SɝahxP̧8blxnZӈ܁ ,lx.-h&rȅ:ގٴ(-+\c>w\]]S\k/lE.sddE |!׻9x;z<7њ(zO1;o؍D;x9`-(s<&ӹC.*:&IuusE^M~ .hRr;D3nrzN:&^SzSM"IENDB`parameters/man/figures/figure2.png0000644000176200001440000012202413620205633016706 0ustar liggesusersPNG  IHDR:DsRGBgAMA a pHYsodIDATx^|IƳ:Un+(9w]BŭbJKB'ߓ/4i!Iͼ3;]JP@  l@ E"n@ )!P @ B@ E "n@ )!P @ B@ E "n@ )!P @ B@ E "n@ )!P(r: Ν;L@ }7KJf'(.:xd_IӧOw%66 ޼yp·o2bРAϟ?g"_KΝqƜZ q;v=* kS5L?)+ԣGm۶Q@  M1 !!>ĉ#F{Μ9(n Jƌse޽ˤlo߾RoٲN:N̛7/,, EjժKe"Di ##{8ށ=3zPB xKH&X.((V)T+өKW6eh ȕ(\l&3c#ȜB#+i/noҤILDu1ߟ(>pb75h:zmz6mZ` 0`ڵkeu*n G۷a\x+W(͛7o|~TTWڱcG&A4@$ 6lٲ-4ϯQFm!O׿r%fw?!X@yرcժUc"*֭f1*@ >t']\\֮]ۣG:0VprҥKdQQQbtQiΞ= }ĵ=+Vxxxܸq$$$*g>x cʇ7oBzjԨQ/^4h:ށC+܀f͚-^'[i5xlVg1W>{uy&\͒]י&]&T~ܜ( ѣ?~mٲ% Rw~%K,]˗>NՉ;&Lӝ;w2/5ܵkrff&k׮ͤ=B3f̰gLZ|dg ֭[XU`ks-xuQzmfddؼys"&!t6s޼yKn۶-.;;'/(V(X)/`RZyrV<ʆk׮Ϟ=;x!C:vիW!k۴i$H$233AqغukŊH&Mcʔ)@ GAp|4hٳϟ?vZƚ?f"DfZB *59r@ hM9[<edUKrlM26)d*Qb[R\6+W[7}nPU<%[[[sϞ=.]ׯcՅizz{ʕ+7l^rÆ cߒتU+H131c1D̬Yt>uOn&͞:uj% ֪U'R( z̿ >[Qow2bJ >'P6=-Ǭ 6Qs~%xED-2U~x1m#'g5w RUookQ$nݪ_~>}vŘcǎǗ*Uj|,+Vݻ7>W ^:ͨQvvv#Gscƌzݻwͱ?s5d;uꔅ]?_3dUtܹsuƘTܩSU @ .9s0ȱ5aۙR59U8ӊݡ,k yX)/Ɣt8c[giL!( :)+q I58K(MrK,U5j0ƍ7z٬Y3ԩ-NNN;pl6W^322wk&E%hBN ФIB 'עE ^*TH4bĈcǎBc…Nyfk|lѣG1GtРA666޽[N($$$\|ɓDH]!@(RWn@ !P @ B@ E "n@ )!P @ B@ E "n@ )bX~:L ի{{{3@ 8/nӭ0@RV^=vX&B Mnnȑ#0@RzѢE &B p&@ "YPL HA @ "7@ gϞUz>0V .\l۷oS!8}tٲek֬)HaAdDFFNWn :(;;1BH$ڱc~rnBK+@ E|ӧO^]Rd&q.]0o@nLMM)իP(d D.O4 GGJް0>!tcժU&L`"ܹs۷766VV͏?jy8͛7r劍ͬYP/:fG@(p̙?ٳg3glժDTrDEVVs圜SNǗ.]Zg*$ˉ'ʕ+5k*T+88x˖-˗OMM]|9VVV|>_s_eʔE=ztݺukذa:u0r"hÆ h suus>< ZjffܹsoܸJaŋDk׮ܹ322Nm՝;wPЃ"%ڵk5j_( 0bĈ-Z4n8vp<.O:ɒ%K"HH %&&5yyyd2ylZիW8PAAAjׯ(*Kك3gPUtzwA|#V_ZAo… mSTA(33iӦxc۶m]LС۷o};w gt!;wn„ +WNHHh۶-$4MT5-̟?ɒ%8cRȀNJgϞwޅqƍC tP(6jUfMhu,Zh%JuVXXڞgS p8&Æ hٲ%%>;ALOOGj׮v0o666F:`yZG.=}4W+Pu+V O٭[7tǟu~*>|8~m3O)%>gLj4huAdff2 fpL:uҤIQfYɁ,Lm֬_uԩ'O:99!h $LչoggGU=ZMOfggSΝxb:: T^OM!nݺ֭C7onD4P,Jhˀi|8Zڥ^=]|VZʕ+quӺoJ_zɵ|r ԨQ" v=UJ&h:?R MRڿ @ !jժ͞={Сfffpݺukٲ%f-br|Tm(ԩf͚> NwC0q > ;_zE_2dÇ;6rH6vutuQWnQ=Ϧheel/^A@\pᨘz;:99!nkkcҠ<@ 6lԨQ{A \14v*SRL̹[jf&('Sjŋ9rC'N Uw^@Af8((~TT ?*Amޔ:hԠAq6mT682m48mێ;E0E۶mV ֭[Ro]v8m3O)%~R%JP?h_x o @'yzz"\p*]nݺsK$Aõ<[`;vKHHvOy׮]߾} w!EǁPBhh(} z+-- eNOOG QRJѾb^ص7.Bˈ-[Ћ9YYBԫW5Ξ0aB>}`TobŊmڴYK.f͚ᐢ0knvM낽+V {AU! !^xqm)CA 15kV֭m29ߞ;wyg޽ʕ;w.f>""nE(XS_~E3$ ݻpum֬m #@ qC Hq/y@״lْ~@ ?tz.@^zرL@ ?r@:+7'57@ -@ D@(RqC H[ڵk<3fezzBزes?-{( !C(M:`.]jff˘ [8tm,2#B7&&&s̹wٳgƍgeeb sss& 5Ν;QHԫWؗ/_YݰԩSffc2XfΜ㓖v#G&&&`}ZJ(z{{A@DEEYXXIGDDџ1cԩSҥK-Z֭cU}-00QFɠHIIvtt|)c54d2kJ*hǎ۶mۢ:@ #166^lYpppPPŋʆ]2q Qxq&dȈD:@h@ .i 'L~z6;  XbNC ?E >Yf7nLLLd~n޽۴iSWW]v :vm۶nݺ{tN,!!ƒk6S5kX,իW )S0 ?742 FPݻzsa~!!!Gp8۱c,^hذ!@ ?M||͛+T/o߾˗SuĉpЄǏ߿Ν>?` Hƍxyy5I0\BkUVY[[E[n(#B!!ߖ"P(hA1@ A @ "7@ D@(RqC HA @ "7@ D@(RqC HA @#/Œ@  ō@ ppp]6F$DnjciisSx!:ٳgWnggWRϟcǎbeeղeˬ,hX>}8;;NX [RCÃ|{"{*;Xr %" F dΜ9݃# 7n|Mppp||X,2dSg(((Ç52vX4y9 H4hР/_]L2sLtGIKu4*666::ޞI(R͑N*x&U͔W0Td6RluT~0=z44ͤI`_$ʔ)+?M6A@f͚8,+N&LXsG\U 2(PtA HL陈:IOO_hÛ5kqFzՂB8p <߮]*UD zԩSO< h%޽{G'C_Ϟ={+V\v-c54(zxЊӵkWMN*(2WHG>~a+7-ٯ᪦k&δ$,L G)J32Eg\1q]|' ȦMV~(b.];w.ZNNc.ܹsU?M+͛͘->|p&Mљ|}Yb1^Z L2M6m޼}t'O 4;m_ŋpl߾sδݰFDaaa!!!GWގ;`yUDDDÆ :ZT;իJR%b5l\-ݢ%DwKlؑˁđ٠$ b\&9֋/f }]p![lSw[Bѽ{9s`teLߌʕ+'&&`՞6mZ͚5t6߿7664i8p Fxxxٲeb*p ?ڵ355uQ+feeգGB:aٸqc4 9r!bҥmڴA !!nݺh#t',h8Qe2" y y544!h\u&zl HES[6e0 |U.FK{-\.?{/2OūEg_KdrڦuWT:0k5!mI_*3_,38!F/0qCܤ3/Xٳgt`|&0դ8ߡc7w)Ug"iF ZY=`b4̍޾}C7-&uqʔ)ժU4-_\=:_lmm s~c @ E||:ynB\j3ڔV^@i"fs˧G-x3 X?guM9:3& wn~!7Sz  ҄G\})Z~_!7zz44+:Ig'w} c6EX'^I_mnpr8oaA[˔)sĉڵkө'O`SM6]r%,OHR;;7v֍1&Hcǎ{}ߚ5kΝ;kW^P'PQYYY>>>2FbU@lݺUsS۷q -`#W?xÇ&En|{lhky G!K5]gAf_s/@"r"?J,[ {d\N&%(K#D@ȕg5y=|`%ޓd7)r('̂}*r纚"Wy5&=j۶-Wzq4({͛7/)PG\ r͙3g=zTSـK. 2L~{  ZbE w( x^Z< D5e˖A4hЀ淑clL\ @ B~lpw"Ae_\A޻6/:7 9P[vX =b8kaɮXcƣ:s-2A3J&ɞ&+*< >p?s$VrKdg 72,::~CWSSSH> ^-:AVt< C'b;v0aӧOwX?{bbɓ TfĉZFŋzZ "nBP.]6_,\g@\vۤ5)R0 ?J,Fbc%|`ה'r_ EQy 066ҥK@@@ZZǏftqO4QFFR*?rrr! Ȭ&Mу~PghڪUPO؏9~z: ̰_H Ƥm޽8n%J`Lg#ߡ"n?H _P2vSIǼ%o,\7 QoG#߿el[6e&A! X _ 6ł: 'X:rJqT,c-+}eCecVW**țp8߻wbذaW^ݶm[n k3k, aGGǓ'O xw'xwU\'11UVmڴYnml|ԩgӧIօF^DŽ~AyT\X&7n \b1i@1I았[P;tc+$ I]n@/ܚg7I?#~rymJq47Yi)̍QrlylA&\{TzY1|-Dݻw5 P-_|Yti58wܯ;fH(U^F6bȑt8? W5􄷷ܯ[ñcK*~ٳs+VsvIγ8Йj׬Y3zgΜ9L@ 4߽);UrD$T.1Bǫp7-[xnS~WG3inD*)?_e\h;P,Okvʵ5bAYS8]JsFqԹloMu9ۛQTiT |wKʻ8wF]tQZNsؚ(\-[foo=P+qƍ={"\N,++%K"PZ5:055Q̦sNNNt* ZBNa͛\333(Zj@hDsss޽w#t*vϟ?{n##=`+7!/75GŷKT]m W^C}$lGV켇&?|\ܯ{X͓1Wvڵk߻w]vyĚ5kYF_K @?LžGş_\ We}"[#ߜhSNo1D@Yc}e4nnn+V믿Zn$ 3f8::1%D2 -dȐ?2~J.\ئMS˄ԩSffw>|hԨѲeT̟A|fD.y{ G!KO5}^|Ya_7}@}P+m|9g6-hڅ֡@ L{[ s_sss蛨(|ᘘ(ϟ3Or#%%۷yx]z {ݲe 4'DtIm41!+ XWagỆ3r͓Gm5ŧi8ŵw4nAAkn=ڹsgnJ'##cթvZ|ATRp4#ɒp@D*jz}*Ο?M6A48zQJk4D\#}8/;ԝ71̞7 qIhEڏlOej8t9D :oKЁ87(*۬ H͛/2VC<,,l„ LG&_d (Ě9j?$lj:?Z OBjG"MWnӽvyW^i~=ر?)S|;vˮV WG@.iDGGWPݻw@zСC/]Θ~+7aց (ȳ+}Q&{)d+؄gͥB,Y=quadPHr3k,XWSL ߹sgvv6Ѝ7ޝ+)'~-;wN("ٳb*hr=qD++[n]p9s? 󡘌 &hۿ.$Ӵiz1±k.33N:1q]&Wq_wu~wnbb=@FFFLLLD_*$DQ׀}љ6fϞ}}iӦ_U=y3qC&<|&[RN89& XpanLMM1qիҤI^RX|:j*!&R8P͛_ra֬YР)LZDs/kR<߂sεo8..nٲe|>߳ j8sa? :СCHںukrr2~8ӺvJ̩E5kroߦ~␥q_|ـ>qST)|}}U7nMT:C`ܹ3 qݻlUVMHHw^˖-܆d&bLw7p|*g\&&# tR ;1=zt||u׽08jii _S}7lؐTBG9r\rH.D"Ο?oaá!333%%J*yR 3aGp6l*jРAK{x i ;w7 Vqa4ŋDk׮ܹ322ֳgvMa1 =x 29'NXv-6VרQCիWlPPPZy@HHJ(qҥ={]p݁: e˖ejD3Ozz:/Q0`#ckܸ1z響蠧O߽{L2Pb)z|5AMDfwq)qkt8 vծ]Fc'O`AgGGpp:Qɓ'!bc={?Yd ো!:?!C@3 **U;&RTo)J+33Dx www茺u¡"`ojΛ7r'ĉׯUQ1Oé b1\BK"DhS5%jl>eɒ%3220xFEEسgONpbApr!J0cSbbx_hޭ[<I8 P8\Æ dɒ h!A1xb$!ziӦA >|̙ƚ{vټy3\>jZU\ՙCيNwՁ\8|0BG{G}" z} h+6mS ~KnҤG'k}wޝcߨяy^znnn7C Kg0 𓲷a֭,bC}14Jk&_pb>~8S`@~?tķlfnc߂͍ 7քH*!U_7,ڰ[nW\9hР;::b۫W/8NhC tG;U&L? Δ)Z@[zDp(\:*u|yᚇb Gi $2VT#s>k-QveHX޼'+ODJ=Kb˝qϹ-5Q:-T TŚQ[)9gw쥆#͡fr2V*݋NaҌ Pݘ W]))GNg 6b4j#Q X=L\133DgX?Qp6cVQ Eᬁm޼yI:S eʔتU5k`ԩNتU.^f|^ZoTFP"~S3J*!36=zTmޔ: SaHM6; c$qdƌ b;[n?+Y|yQE"ܽ{~ɓ^8,:㱵[@ͱw FQ{G%5۶mߣ} <0BJҗ R;t)mv)\'e @ͪg,E^Ɔ ^V7(*M4S۱beu_Jqӄ,3**]*U~7^1.Q~U)fj;νoePB) :՘VokʦkzlQ@wd2˧˚}4^)Z'WJ)Efb ?ZTQ~=EP< 7Sft!+&Sg9Yp$Rr NUsIJD'PP$p' NEqWMaCǿ5ihs`NڗR8=qb&$$L>wHL[8CCC;Jp=׮]èKZL3 #F9koJ]"-[ĸ2~ {,V -RD ;(׬Y3.lǏ]ktܰaC06ݺuklV;ƫۣGp _l>z!;;{„ }꽷if͚e˖E@JR:?1Ts#M@zFKq uKR⧈Å"hkCPL $y^״֞͡)'^I{U-/6Vx'X7($IvI]BTڱ,w3ɌF/ 40Zv_ɾ0IXq*R&]*\ڙR_|2GZ*a%fj;`pN*y\7BӬoiދD:; ]jV ʦ;w`e(&Y{$psmn jkgOog2ՐyOwԟ15kܿ: ?POzԩS{CrC MsHPHYert&Ojr.Gy@$˖NGI&Ȋ)gEHM*"Rϡ t䟥Qilf6CjɎT,/¦Y;(+a=KW*+Ƀ%&C~9ZfeL%(NEI *aaMc يt +pTeQ,Tmܷ$W Ud\$-ΩWn~fD"ѱczef FGGC^x MHHXlB+7 ƪpvdPELl^sB63B"$+7B 7ŕUgHXo:F"M}7AAAL@ N:/8D9L Aܜ8q}L@ hGX!~$ҠKU)/~%7߈+n$:2ox/"""||~ Nz4~-$&&Xή9(޾}u۷oZZZ2Vb>|pjj*6ܹSg~"O= ϝ;>qww]`` ~J*LIIVdf6hBG˵>Еҟ7w=[#:+]+K.Smt_%~$ G[p+,Xx._gfffrlzzz2e?{n;::2&tc[lyCoC 6lpyFـN=A 2t3gP!Cȯ,>LHH6lX۶m5hN~ТW2&aaa>4'NЃ&8qv+ݻWeuP[M赻~s…؋ cU.)Էo_04k׎)P0 F*ҟ#GXg Lmm}?D0L\hR "#/70q_|W@A2qPFFF:_\Q+.+/~vr/ ,+7P(_(s`чK`}Gg|%KWo999VTB@^:**jرƌɐA׮];k,&9˗/ի=W CkժUzጺqz6B :n8 ڨW-[2qK.:t(@&^<-I.f9_Էv_\>^{{s u@($I?8EQ\S\J|@jw__ل_oJ.˗=MFFȘ~>t$ ~iJLMMss~ 0V]Gh 0ߔ|GvLvΝ 'Eb H-&A :u _W[={6m1t/^ r]v}aǎƦ_|۶m{ŋY$p1Ow-(Q[Kqذa=Bs6mڄv9lٲ!Cx<&늜]ta EbL._CYf+Z|M}ο%閞=}Ŗ4P(g1@0VI60.6rgtց);xtzN7, x|tc6/_ZϞ=K1t)>>zAs3779~Yaa(6:z@2dii9;7ϟ?mڴ:tIbwA@g]8p̒x}(wޥ" eJ8iҤ$JLZ+QK.7&AaDU\-Z`xeJzY׏lܸgϞ?+l6zsg噏Y sJ$RP\ZA@|32~87yS<{f؜}ooW&onVw4-owɓ'רQ?c2cƌݻw׭[gp88>Oj(f͛7?xv/^i#(h "gΜɁPt_ccc[lI/(^l1.(۷/K_;w.Sñc݋߰aCF sяsSNedd 4Q-['ZۮsQWD< =Ɠ_ᰢnZ{Mr|L9,궒P|M\әͷk ɼ>F6 MU-fmfZH,fdDqtxu롽[SG/m۶oݺ')N[ ȑ#? :DZ={,ZhСpU0љ =oCA@(V^AP9k׮ѢB{n1c8;;s\3++͛7IKqjۗ,Yg:@ԬY̌2TФUTQ7G 4p^t5 CЧ#RN GYF=nKzԮ'gheeE*T3|M(Ӎ=v{<ء^N\g>%Fp an}E3ZD!@*qC 73&+I[݊1XYL8{.jdu[ )&-5ϒ˥QYv#ڦ)ˆWc~G{[Nf~_z [O'~! hT&T[Ζ_sZ75e>@R呑Smi'WPªК%S\N9ORPl*A;2 /f/a,|UsK<-w\*ztBeMmk#lWޫ"+E*+KR~΁CZ}mIN7222ڷowƉtR333Ϟ=[zu;;J*}78̙s=O@@q!n͙|Wxׯ_G?7dhnݺjŋM:u$xAΝ}||r֭;w0"Q .lӦMhhhpp0(۵kl4}_zv_wQ6%. pIG+63BR޾':xZQVT! :F̠w*pKYKuОXؾA&byo2Kq E5+z<:&zLg3tϟ:qD od.XEdy>s=j1 %0xvׯ_3 ̙3ъ㇚mffB8((Ç5?W%R 7oѣmll&M#B+Wp^-[vxɩȯZD  U9$99Lspv7уuń( =$%%9::"m߿(l(6]H󃏪\e¾pV"pye)K_\)k8la{F/iGQl(~T]&H'W|GHFFܹsǎ{ѕ+Wj~LMMm޼Tzm)UTHH}7 3kڴiO<4"رc!홸wT,-~IzQlcK:Y)F`ѥw^GYVN$w}|%̹z[ʙpR{K6ǿPT6[+RL9nt0Pg-0&?[HJP|v)'\eq^PERbKI(G;7O}?fZ| ⬬,Hs ܾ}'NT7j:o߾ ؋Q ajP3!]=z4,,_|MzzEެY7&&&2 ٳ/^y/^>|lp@pڴmۛIPdҥ I0t⌺vsvvEaX} b$zdWsXEd fٖ]8:gcfgʚޭsJŭ Et} S"WvupAط*s=CPWsO)>fLlAiW#S,Do*WQ/R6`ݺu]w mmm1ޭE`` Ty:  xRxO +VĔ1FFFfff t466k6W^:uɓ'ѕ|DžYfb]z@ 2e 9ahAΝ;1ݸqyk=fe@ɃIII5j᧛ܠA4|M( dtMNLJ2kONO[o3;cQOjt)G R~Zխ-YWeW\ĪԞS<~Nt|ނ9M8Vջ9TȕԬq"Q_-mTF!XR1Z̹.J~KGb)q͸\>K!P E?mKΔJ9J:Mɱtf_Hؼ;(eLl¿nWEsF.6qNŴUKLΘhV.p7ַu1jJ.ξz-3OtjMDp&D\$ͣ7lcjg>z„9YYNeS7bylʖϫm]l^iחu}ǺP6hgަYyS#W>ɈW֔_ܴe n[̤e-3?EҧOeC |O!~<_GpxcNTH!gI嬴 yv\.c,ahX0f^Z|=Klֳb.cX5 $ B*v26`zų\E9]ݜsft }eX8'hᜯQmxz׫{Ovsuj* ucWqC¢6D+`afJ?D<4x=%S./ዎ!3D?89$ꌩ)Ԅ4Jذffl>ejʖXR8E399rvGSz5ŊIݽ+}'+V,T/67|ZU1Daf_ͭobmm={֬SN$W3]BP&-.~CzD$w ȑ)5:zjc[lE磾e\2|V _ug2痽'~j*Ăr!?D?l(J~&&U^ssYI2+e1?{[,6Jdw#G%|ݺ)|}jGFYx咚|K)٫ JлwoT~̨c\κ7qlMoe4|HեᨔĐX&?RŭW.VPW*-.qMKr̺Ow^qL C @pv 91u}Lge+rXak(d2?.b%^]mmF*0 d/\^LmIT6Prz乙ҏUOqC[@Ƚ%E,?K$rŚY Q5OHH~)ncmZA,fwY3ӧY9>.E*kU.}$%=N,E% kNf匜+iYrE2#۝>Fl.JDZbfCl7B |'NjL TRˈB85S+ʑfpeCŪXrԋuXNkguV0w䒤l#cyvRʶA1dI0gl}$9D O{ϝҺ8) XQ\nŊ(xҁR̼ɖ^Ѻx{#~K[KW>7φ ^HX_Lj%lK (cbn-(ʷCǼ91I,ŠFަF7jh45.ZhL DEE2l-]ڛ[e"RDಕ2&>I6^*\tr.Y#r{ְ MN-D\Ϻl{{9Gu7@E%-}Gw$"I(v(WWO_ţGFRhm-VfGHnlr|cn,T^j{˱xXݧ ?[vT~Rǿ[4OfS+xNswh`c9quB|2@(qC 29%Qtlc֣ }+)rx} cL̂B"_K<= f֧ǹe WarAFuK}X"evnHRi6ϯ꾽{ݻ0}_;#k׭{1վ1R#Y*k Pdl4¥z1X&)}rryUwCyr&Fڒ`/Bqɯ\ ;kYA؅OwBф568cDqH}+-ͱ=zb Y)= !˘:{N#~3Px]NmwQnŵP6խnng}w)>j!X.?5.k׮_151͑Jۖ5W~'O(E YC] žwFP _(0ba#JMmխ2ђNUy|~=H WPvw 楿x* RHsW/Fh,3܉L}i]9=6 qnn&+`avriBv6tﮧfz̛ۘqs`jtr+C `ժUii|6W7B=LQ}jAf6G9Z6y]F>|BF).j׮-WMƌciiP(lЩ=z( aqիUTUT*rUA=::I&lذΩoB11cgV\*T_| ].],771%dRE<ǧa[ŞCqTXJ˒GDK˶` Lun3G}6,yvFީ s1baz*:W66UmޫW/ gT(cjپxrrRNNNVVuȓ$_!QǦVRbS}E^*2]3+Wedɾ`?DsiMLLfϞK.ŊCRv|\g:),,!˗@O>pFp4SNe*ٓӇN'fiӦ1Vرcpj˖-տFAq=nqaΜ9Îq27^bčrC~E͛7H"ٰ022 CF Hׯ_Bòp6mڄOW D4xAΝc*qCc͚5 ڭԩSff&z1=,YO7p)=˂"Ȉ/]$Hϔ,Mnw}6M,%r1RK\>Ą=6ܹs{f)cB*Z(![,.>^b$=FhYY7Jgw>0*gnnnχ8q"ߑ#GŽ=cD:y$\ &"`-Z" AI⌌ h;t v" ,Z1cǎIX{z v|/*=zͤI`_ȗryNeˎ|"p*ӧO3"M6\{a-}*))Qsi bŊL\-tLWV 7n|\C+k2;g]^N+ыp'N=.&yeuIL&zsK6 #m:+Ƽ6&%%Ϝ9Y2w "]rlfW*SϛY4P/фao{|vs8cSM,u?()|`llA?Sv͛@2mڴ'Oi>v옾QF-YDgŠJٟ{kk5k4lذe˖gVŕV,H, .x0m~? >k:7IÕ׵ZE(Mo"B"ogi>.N4m4ν7BGgHUӘsfGG67гec+n%_U%3uܮTvFT ]`z8y3ϓ4'y&}r~o9EM~g["ʰ 2rK.-UԄ  \ U]gٳ8(oh&,Po]v3 ND8jժ >ŋIHNNvqqîD{.tN:PY>""bժUt4O4fܹCվfp=z4,,,rSRR>|aA@g7n2vŊ?硔?Nyf*UjԨw^ LAQ(%T}N9FR+J+55X5gF,`iO>ݹco,6G/ |]L&z8O(BaL:Z[O><8Wqȗ7 ~]mBBB6m ߠL }za0>_^{bt"j_uօh`ccc]M p=:$&d-Zl֬ƍr҉vW^]|Ç8qqq7իSN,A[rҽhkk$N.]@M4 C :pȑSNA_WZO={ N]_(ᛣ+ BIȅʻ"?"$[6ᮺc2sLLf0W.fZU4e:Pj%m(lA"WUaLs9ůhH.Ug/Sd;flTv49U{8>[[}8aշ}kU\WέD.*=wǺ9MlvX޽'0ӧw?ճ͛_K̀cLӰ7|}qnI g UȌnݺ*U %,Ү];\H^X1 kitʡ>}>lذ¬[0bRM{??Ç3 Q=A *00PVp <̙3˔)ccc#Kڃ0]*IP @0rdB*MPw鶶 LjuVc&C\7Vlr#5I -T)حJfQ~R$q4rU&(p )mfdLHebNբysO&</x%}cm,QhBN2ј[:HQlͭ]τm)h>j{ЫNwjs^u mdl^  >ӳ6EBR˺Ǽ/aV0BԻMX*sK:Ehۯ..z9|wqQ__xiѢ 6SN%mu*` Zʆq~Wp$5RG^Al ڟU%/̨[og(*ё- Ml %?|/dffzxz2?%0W*BV)Pj*%`‡[险6.+Wv%mU*h@ĉ}E!lbSoJxisx?X/WRxl6IK9L@ DEɫ"%']/؆PlݜW;991A,^di)Wp8&^U!P0$ E`\7jUvU9w7>lFTtGp"mM,sk6)SثÓ9vik$ђ5\6W5lU2c"7!#ϟ,Mg.[9*\g:E #o&l Z'<=*K|ˊr=:}Ɯa\|sk;KS#y @]^ʗS|S,LQԐ}#v278ŵ +ˠޘB qC >YY;'<3G.7-1oedž|r?Bq痧%$[+{Ϟ}RCq7Bѐ77S3;Qs_;ֵ.ib\R,"A,y+1$d`Z]r;z17YYuh1]gv,C~@ qC oά6u1w_7Wq…-Zԭ[/5=/|H;*g)Ƹ9.(}pԱA"ZӄMQy2Sw3P{pi͔P ;l6Fmc(W~/@DE ۊk%bd_SX J!WJrf-M3mHGX@anվaGN] #l#SamE>)RܯY }Ӏ+C}~M\ܨc52&`qC 9$bÊ;%y.p,Jy(O?'t7ݭ[7uL5O(X,xwŭoes^zƲ(ۮ\Yf=aC"嫴9Jy%q,2'zD4B*QPX1{?v.,^k9p!@((R;6J/JyNr\U}v)wKT5rC%m}E8E< )S#PmxofG D+cvOV?㍚Zg^B`CQlr/}9FJI釽E~䐹ze7rA˶T)k[J  DEūՒO߈Ó/95Wu|i|W-MӷxM SlOG//p( 233vOyɴRq9*v)됛8+dU7@6*Vb GȠ{%j׮MR3fezzΝ;˗/yC4ٶm]ښX!7c :J5ʪqƴ@,"ʤUpMΞ=[zu;;J*=~bŊeʔqww4hZ\5ٳg h]tѣMNcTopʙmb+do(as(k`B*0օNٗ/,Aj$vf֡n7B?}ucZ0l+ml3к<GGDŽ(76ԩSUT6mܹsʢ?VȿH͚5!tZjuD[[!C:;;/X~2?`תU+,,Lo?Pw􎹹9Z3g+*hnݺ5bĈGq-ZcL!ѤIOOΝ;w sClD+?G~tD <G!`dwiybWrtw7}keSL?bSsrr _$D~nf-;UT(H{mc/M]k'JQ}FMQ<v쮽Y&@ Q7Bs\4=t:uT1nݺU .2xÇCfʕ+󈛴2e<{ޞ1i#˗-[{nٲ%((X˗/ @800ݻ;w 5J/]MqyCZ6Eb|:k:jSaF)ϭ3 ',o)P^4]ˑ44+AFvTnz\!NFո6|#w?}9嗞T`lnŴW.1CFS@ i荍ᕡ`/^)S"##CCCV eØT, ܹsǎ{QS6&?~N:T6PB;v7noN>/0J Fv]ve-Djݻ$dsf..[oR _54wzޟo.f|(O+eL"j5?>/>+)S( q ̧{:ִH(E~. yȶ2=y%(T6'j:#(-,G!~ Ё/_  {6o """VZE,LMM9MrۼE۷94h?ΰ &0qZtuuݳgc5 NMI qfggwҥG]VmğrU66 q adlnOw_|7N0nfYL+T4_(P(Řיx6犲&jz3jYBF]ŏ|oK{yya)_zeooP6pp۷g~Bg&;vs/_ݻlru&kٲe7nؤIƤ\zuСm;w.obcc=< 6Q,*W?>opgΣ. _Q_;{ ddg"+7fzj@0eM6߿jeC{q uΝPE7n@ hɓ8r:|eFQɓ' T6貵k&''D"P ͛7~ \]]e2$۷/ bŊ>zuްaCON1K ;~ 17eF;&H/ &嚘UeFf4 KHtǦ2nymZ4.׮T J>Q6hڂ]D?-M||<\=S~;T:DEEĸSN m͛#ko߾زeK:r ;ep(4o'gφiР{ }6!!ggv{zz2euhceenݺ-Z@t|$X,ʽgZ%.gEJ p|~\\!q8̛"s5=;c^^B1ջ27C~.{tq,J:&j1 *Pseg/A["J ~JK'D"C%ڏg,V~W\΢X,.'œGǘL,Vu+X8CʍP+]*=cagsWcma`#R6˛SSXJt(H/h{+DXYmʣ╔+zlFL|6/>EBs&l:;WGq#RWٌyx/mqD ~z!:xڸF2Lw$g]/lbǕ(.W~IaLal6F!V9%fn.H-kln ac>@ TqC A]<(! boXPJTnإۅnrҘ|*(.kj^ͼZg㲍ԍRoC_)ϗ*s(kkG i_=N 8DV-X(3Zp$Uǟwi̗APiXyn:3ٹs&T/;O(1˻3CV|7p:ta3V@"n?xNiMmxdWyF)nÿBxROt~ES>hɅ1.!EbG=l&TQ\^ciIiיͻ@ tB @bi]eNH$ҼWYh ~o :su}#+NӪwe7z `!ET"piBȚuM|gi 㒓0ъ&w q'k*RR^e@ F(Xlٲ;wFΝ;/_?~YYY7nlԨѕ+WԟOʯ2@T>L7DsW9˗/Qȫ[.mC ¦LJIIA71VL&sqqY~}n/_G(+?_݆TbQZ2 >ܟ6 _Q03cwҋelX TXXX*Uƍ0y[nծ]NBU>{,&&iuɯ322٠ō\#rJBܘ9+y <۷B2n'|I/5%/W0*lwZ(ͨA%@ [l5ekNl]vvT*?~UkralkˇTANG)J |oKޢElz W^mٲ%N:ǻkڰaC׮]ԩS4nKZim\]]߾}kkk˘T6P(lbʔ)zLG"QQodnיݣ?ˢc%DWnΟ?ewJ>]@ Μ9sIII999cƌqvvrիW777^Y6%Kl۶-Yʊ |}}+TD H4AL211qssáQ\VVրիW֭+PH݊OԮ]=ٖR@s>LnӁ]{՝·!57@ Hkn@  "n@ )!P @ B@ E "n@ )!P @ B@ E ݟ_ Wrr7wپ}{ժUe2ٱcǎ=c>Ovݻw/_^P ߹s۷]]]--- X|s3r9zʕ+ׯ_qㆺ49u=<< 4 EjԨ4mǎzyy_2/P}.88xP-C=s挙ِ!C`;wnڵ% S "HBt3(IIIGf111ժUbT̞=͛666ӦMC7ׯwtttQ; :_~辫W2&aaag͛ʕ+CO7i?Ԃ@ E E><N4iŊp waTOp88mۆ0 _ :wL[ T3&9q5b"<}rb1/[lIdu=~8 2&{@ ''i;(PwM  ?=vA0 ,Vjj*\\WZ"p'OV-"~͍@ c"z ٳg!Ir9cdľ}ЛL\p… 2-ESpPЂ B]ll,EQѿ;˅3h/U\&5t ƍgaaX5XfʹiӘ^:tǏ I=Gߐܹ~@[vmz۞V@9s&''ZK.VƶlBԩS:tr{DOch=7IIImcƌqvv^א!C޾}+ ;uaii Sdɶm|"VVVFY, *0qtt G֬YC b.%%-PztA@0[)>uqqA+TB?Ocll﹡[Xǎ={fjj_xq}G "+7@ JAOK@ D@(RqC HA @ "7@ D@(RqC HA @ "7@ ō@ ppp]\.Gt̘1 b˖-oϯΝ;(GGGͯol۶.bmmc:%22oƌtT(:u*mÞ={Є2qk~XrJH ׯXgVXήRJ72RZ <<<@((? @:u Κ5 e˖լYlٲʕiɓ'mmm+FgɯHRR|pƍaGq}~M>@!{(ڮVڹs0&Ahmn7o8qڮ\|:Fݷo_$ ~bXN L07o܃ͤI`{Iy\.?~<:eʔ͕d)RD ###OOOSSS+44bŊLŲlԨ%Kh̜9ssL\PW BikeeEQ Cڮ4m:i @ ~7/[,88ŋ[XX0 S@H̪UZ[[3&:ddd̝;wرG]rwP]lvuoJb*|x۷9 PQݜAZZÛ5k$(ڵӹFg8=zt6M 8vj?(˖-ӹdРAǏg" 200Id2YӦMgϞĵl]~:|ȑ a$ϲ-nذaL@ E|.̚5K,ի)S\@ȦM߿y۫JCܹ^z )d'::ɓ'D0wwwCBB]]]!bUwƍCUp`{]z9s0Og"H$*nŹsB!Ϟ=+]F_`KOO?t,@ "J~fҤIttիW2T8pL2ƍY$<<:ɭDDD̝;K*@@?baaajj@TTTdddƍ5GI?t6mEh"=|Z AuuFu 1Tt *ԫW}ڌ>n~**VxMXjT~@(PO:@ z@ K!@ B@ E_0@RFٹsg&B Mzzz³zc2@ 8/nR[0@R@ ~Qp@ E @ B@ E "n@ )!P @ kh?=>H "7@)r "7@)r 2BŒ6IENDB`parameters/man/figures/logo.png0000644000176200001440000003612114133222153016301 0ustar liggesusersPNG  IHDRxb]esRGBgAMA a pHYsod;IDATx^]|SU~t7i.e@Pʦe" *\ P  ZdCˆ-t#;=I @n{y;ψ#(щMH=!>qFɻ ^Lȣc-7ݙc .ƦkNd# +,q h =Hq+=h*spG H$'5<*6\v Աw+tB+te %VID/$_82ݶ .h0`mm* FPϞO'Fi::1ZXmKpQT:Y#=aIۧd.5!'/CgZ$S1GMEn;"Ãȁz^Nêeb{!֧xo LlDQ WEHMXmCp~N2; TmZ\vϠQ*s2V g CR7c|VK"h4~BGbn .ao/jr ۍB!ܺl?aЅPhͥ׀k.kph+H e;j*&{ }~aH͟Fd m)G654(9jVvb]Ѓ/&Jm3XgJ?J&0 Sf'3KIo{a^4mȉv6^Ƅx68kbMI%;ro9Nyt{c(Y:GÄz`55_>n. W`-ֆU`g@2t{S<-mg .F*8U I+N2$aݙqVPd[=\ڰHSQgȤpmdHbG,gnn9v:Kƻ⨖9 :.  ۾k7`@q&yK/c6l'I #;bo}) pc֦BaXa܆Qpo\*bV?Q֪PٶD}EdR3EBC|;%R%H=͒HkTLZ׃Bo_I])g. d;x7;+3Ik}v0T͌!":f}^ >GJ:z;zV'NsHv eaύU{5GܓyvD#򶫌jzkYed7{wQhg[o;;[W}Lߋu9sUSh4v V.']hY"̞">?N6g{NUGC 6QRwCRˣ==hWӋ.6KO!$ V ao>sw+ٹNc .[b4ZK.3#{ػ#0 ZaQpꭓDӎԸ h5_4$Jgi!8#wsɿ=88>9N8_TR8bg(r-{̔ubBꌶ^ D)Z{H!5`sU.ExcBy96[͛Z %qMw]cҫ YC؉{/]?Ӗ nC]1ה z8{$5\>7D#q#0\B w nGI )cﳮ0o. n7n~\%n [lb pOCCOm!J,^f-O. L5'5Y]1,=x=+ͪd_G7䇫#e_졾;)aٹTxZJ Gj""|C3rRhq؆2l v|X4STlH0"U_:n@WeĐi*\XI5BKRlV5Ԙ`yk 76 F23tNؗd]+_,Q:OqRusJḚֳ +%Imsڎ򡧋sGL^gah$:v:T_5.[Hb:9e,Hès3q1B #r6$؋d=m:yts%DsVFVs5;j8lT*u vfCg̓P,(k1YE( dY?jT̞bS+cV.D+ 9ɉ/9\ 8q5 ._:Ԙ`VA!'} ggػ\~)5΂r3 r:8A6;p??(n!FmJ֡W#\(qۼq;8EKq(6g{#$Ϧ;(aR/0r|wfnȹ6F6("r94&:#H߫>qUJjNYb X v^2FHpgQ#-z_kOA!²*lnSܐvyԫce=rxY0!]o'Y^4fFEq:efGE eꙟ#g%˜ d]Mz4ψ0c5D׹=S=P]+sSP_03~]q۠:$ծn &iJƱt1L~ `ɕH%1R;s ze6: tWRv*fD cZ2>oEg}pqZr2/ M3n;%[l+X2O v9}Zt(=r~~'`\dHH9ء]h.CZw^Cڧ+զ* :HFb+INQ'p&Yp';C}ۀo[zA7c ;;I*řR+(g1cO~vb[N.kupn?F·]#sSQʛB/qVT O^L 5vĮXpM,,#"i\Ikwе-k.$dDİtrXDS[5tַA{|冉Opi>~υҤfQ\lރj,^ºЌ\ cT3;QW~{U!F7= o ]B*4/H˻+߄t&8N݇t{`( 8vRO%jxIa3K8<qabPc]𱯨CG3>nM=Hnp  /)0Fq5:zBC/p˟jlʸ+`l=յTJ uMx6޵$2f4#y O6cìYV"9gc@ FP1!FnL@'BS(SFq=i. w ܡ,jd_C2 =S5Bal7>P:#B!ͅcF? ʚ;9ԇ!сp:w8۫nRsu9/sV.[I6!܀<0[-tU K9O_xihE*~R˦$Pxt*cBhXqmRN(=\rq\:cS'4+k+yZ{;W/ mYx^m%7NrΑ`;9z-" :ɛD + h!*Yz~a:m_"ݵ,1< <# ʂLr|ՑkA9a}*)*6Pox8^O$,fzJ^\s.-$}|BLl,EO$Hgw#scP6}&);bH6tK=4Sˑ\%A&-Ӻ t91*HJ!=K/HE~_t̆yw9@0agDUqe?դ>p殎Fj¹H!I,Fy,MF^~~WLxq( n-JNq@)v5f+O3~!ʙڂ יR ~R>WE!uoOCIo͌uyP55{'˅(ƗDH*Rrp;KiΉ$l6{~?ڥ}8*C⻧z` U1:ȭf 1(PaBH*HE?: (ݶ}}?:œ+BC e;5'5w&$#=0Ms%AyL0K ¥0]8_݇Cj`[.Yi1IA#TEʓ(KMnhɻL'D'"q*ĸuJψmކ1ajk'shT1+7pSEqpİḁ&xyzg![L;+ymyUj F'kЪ)8!8qnu=:(2hPr}+[+3`g!uD䉍^D.{zRm(5&C-LĢ;xH-T %sPW`Q>9RXR}fM}<G6"f"ٶ:?T})!!!h٢$&%hFv 5KMnnr,lq$JI6!Ѭ\U8Bf1\-*K꾟2'lF$[[(9:5J7~ENW KKrЦ|}d$F* ?R ȶ9242?Ю1X_n:?cԭMi.*QJGwÑݓ2 4lY1. 'ӾOt tء#i2^R᭷BII1IٮNg/&>S9Ե oT:gO&c{(:LInr:S8uxgL9FIpDhRnwOvct=>V,2[s=IA WW)=@^B"(/&`3Msz;`מ;ܯQ30_3)ŕ5 b6/=&$g!ǐ$=q&Q̟R%E_Nω|?l"<͞~"—?̳ڥ 4ig~{F)SS〲$EŘp'I/I6wo/dӔKCX ܗ6nn|[Bm%gxH 'F4$: Fh TO>QID*:Ӻ&Ej{PĬ{M}(Oy_wg.T_L~kKV%HZy8,1SO=%,"kRÄ ̦/\ϟ牤:K1y^$&#85Յ߉IHT^2bv KokI#Q,.9ol9Rquz-Dl.8 GKz"䅕d*ěd"/ΣNO?e_=Q9<[A"±-#=# |+^#xxY[3Rxfg6wkHO~Y6;um{C g!y2% T3cN9}g=w)_~[8$gAU|=1#I=Y;L(*F~We̜9c2x; %yZ .vIoX~`&WMRUy2p+uX) n\Wr: 3"1vhM-$)\~/hS*]*v+ \fJ?Zz:>*>g3eÞB:|_51q/#_',y&2C|e\Ribw!bYɖsqDm]a`|s?VoU56m+arLvVۨʽpt!L].-Lj Cbc,]_XR2EA^siAΜ9sŋ@׏CSQ$D̠#XJ"C:b 2xٛ @qtx5X_\S%\/ϧrL+j(8L@[B!/$<]zSxTjRiUDQ)8'G?~X̬,"/7r\!̒>$,YlCVx:S*%HYԧYqyuۖ"Ci?>GWϱ>L:7[xi |xYy@<->z@X7oQ;񿄓fplzr{ 8Qp)FpwhE$"Ģ9a\3zYTlxUOO5&adХ+͟/ޤI4i#F ?gy9T'bx9.P(0Νy]nny_6Gh0đėԕxeCr)Ԫ_W\C*rNR]끝#N-U#]$e)ȠTRF͞E;9/9}Mqql yRP8_rb*:?ϤPz,;$&&b/_H_-[$7Rzs^b4j0W]W~5[{'cڑ9h\R[:6WH+@F$3zG)IDZt |mIыh9Ng"gRPce&ÒⵒT|Նl}YdY&jdktx#< U#ݓW(vqnIV/( ]~8zfԶ=gسQg!`^Znh)9 we⿻cCm".&mӖM],Gs1RlR|2HJx'3k憥|:kSqB{ɫ&& #΢? R:d\Afr1,Ys}:B7ATV{{hkn:oѣgOؿODٮ}:LΒ˘Ka ǿܘ=ܭ{H9q ܸb/M[8ΣPi'uϓxX9m*utkס4r/SxZ'~/GǖM=ќ>|^p >߶}zK&9wd/CVf>4I"x 19zuPUe3*'5jD N )HUx?U˚}#}":ʕn(a0P~CUae0bLæEJ4zhX >H`OAR0 ťx#,A/}gV2^bo-gvfgLwM!=#A9ɏo@tQ^}ZQvRFvs!e&}Uzo*.~&}Yu+lwnxeXU9i><|8]; WK;clyѨ>R67I-l4?Iz Ǽqʣ-g۷⥢6qx{[Szxu˼ۓr,\6\DpybYj] ÓaOv6`O8,/o\W!AM ={J]=)DIo: Vj>/l7i6^L: MZ!3=ruWg|P&+zLZS`y% ubɻ9 tSTRgw8۽QZ[4-?K;8 0{3fsRJ^mRJwD ޶z'O,PU>i.kQlUدpSf.I%UtX=%WId-&2Aڂ'э}lZBl1NO| f%e;۬(]%Q$  1FdRH>Z;gj~[iIΪ7P|x( z>/w4%H+D|c8h(~#b|9fr)t7X n:E{B"y^~@G٦*o*1j>@x <ůT޾Т\&(',rtQHԶVm{HMgJ 뎘kg2-P o&J>:l(~XNW639R%iUR&ap0IU5ae:>W◡˕SR`q^a0b[a[N0CكxcP*Z/EvaľYI(97Tbrs_k'v%[$.$vy℄xqmUnS:ne*AlnҢٳY9/wj{M5+x71ZW]YDz\e+rb-hP[M&-zÒVd5ڏ^>8 &P^7J0WƇ=E$eTϩ<洡7sڴuśD\>V*d=OUaM0 2^ ٬ֹGh;tʫ^d ۫RqJ)cZ$@ @ywG^2a!+> zH5ǰ*U"ޙF'Ŷ5ewql8gXmlOuסּ/{XnbU!$v<:#؂ :֦}@Vx% +p_nsU0_eɣbn[nC0ޝ<ۓ%yRØU^d)tEtöU&edG7Ɂ=6%լH7)NvfFvAE<+g{ ɸH8Tt"K *N{V75n{H_͸# fpHC譭yǓ;^Kq'Nc 2M [dyGT˦Y #p~h'edxD"$z0?Z*(dyIENDB`parameters/man/figures/figure1.png0000644000176200001440000014362713620205633016721 0ustar liggesusersPNG  IHDR/Q)sRGBgAMA a pHYsod,IDATx^X[gf%DJDQ;~v{mvww]! lL|J x~r980@ D#@4A Q@@ DI@ $M@ 4A Q@@ DI@ $M@ 4A Q@@ DI@ $M?Y UU@d^v޽{4ÅMToR>këJʼNzSATw$YHlm%8u`zM%cbV2Dg<֭[޽ XEiii۶m{ABByϞ=;uE.\5j>|8ԥ_~MMM9lRV>}X  iRu*A ׫aE"/&P T{kE;#>i$\z!uINT;EH ňS]U 0`Νaرk֬@```۶mCBB l`` cM* ŤI `kk9- |}}yJ5qD8t аaþ}394(lu.9=?#/)?QozpMPfHlEۣsn)WgC،oȅw5H h Hi@X@,j}bt28n`SEjF}.FQ)))˖-0`1ck8x !w9rȄ  jРA;v- =aQ3233N:ydH@[xyy={6R2!piM4DMas57oތ~ŊSti,5I>r=x@8`pS"@ %ib2Ǫ feP2:l7MA@lxu*szj*(Y|#Jb$mxܢ R dqqq{7oիoܸQQQQݺuL۷9ȵ&s۷o_dIF6lذ| Baw~aP3۷ͻveB >F!JuhuVhhaxS`o @*"4vFdso>V; 1uKɩdZ(i:r)gF٦Ɯ fwH`$ǹKA\m7P8{EQ Ķ}ұLG]#^h^[1yc|>Q Gׯ;I&NNN`HyM˗! &ۘ]Qy9'Yr&RQmNuOe bIn*ůK{;}W"~12Wf ƵcP~fo"V6(i9>[d!< .<~<^^^2ׯ J%<<L(b8,, ' Q@@ q֬Y3zh0!CZ`luJES@~SΝ;U*ccufJT,N@vv8YoOfC˂lȥP 3_jFppVA͂!:P K*lh/#I&v,Fpcg>5YDU_fY?PBl\]M`hu8LB?J+C}4Z>FQ3&#{mG>Q9g`˖-OvuuD5j8vD@ VaҥpA޹s-G.fv6Ϊ#iZ 2a/ԁu43H;uA}/!awaU}p7ђ[];vX蓃k=i$-:\} a5SdQ'N<|mmmymڴi„ .]_>B DaI_A&*j{߾}7d߉IJJJt!C ŋRw!JWDAKLm 6Bnef0;C"8M9s?*&8v6$p TH {p4&':{M*I'Mi ALw} g_۶mܹ3;;ݻs-'%%9s\rǎ#k׮NNN(XLJ0l׮]%K,_={ E֭Vio{nbbiΝׯGFFr *Wƅ9<$ݻVV&Mp[iKرu+]3&O̻Qx&=vʵ;m9?'Q\"+m#r߼GKnQ̓עE kצ(v YvQC߿FiLVVV\P(|%V9vEuЁwa90#G Nsqvv @fnwE $WzJO$8EQ⩲f@[e/t,VHNB5 f'^43]&3] ;οbŊ{W\v-88СCʕ㢼`oslzxnܸ26 uV:n|S5IǎCBBx@ ?]&U^3=>Cw~ DKXг<6kIIm)ulaʼn*~&R|S isӁmi6V}z @ [qqq YD D DI9s3ز24c ە.m \IP$S5lvF5ԙj( x|4DDDlݺ OOOCiذ!|1 Mmׯoccs@MSlYHR܆O *Um@Zti2릲F3bwqY3܊wd4=DVlEr{l-K$̉Q4ͼU-cM%? 41ђwK.5gΜ5ɅO]hYj{*T+W|}.!n8:t$vlpff; ӠAyMrN'RcذaM[l~jÇN*J9<.ܺuk8Ǐ VZs%@ ?n^jbҕt''^?n"txo޼d˗/j>ڵm@rP_ 8x, TrfEM; 0dȐ\gE DaDq@ҕDױ|> 1hrRRR<==xWA^J,,,ʖ-kmmͻ0˗n@j~@ A@ DI@ $M@ 4A Q@@ DI@ $M@ 4A Q@@ DI@ $M@ 4A Q@@ DIBCA +80|F^~Kq DW.8 0axyMԴi+Wp@ y]ds,5>i(j!#wMzpttϘJ3777HJJ^zvVZŻ4B~䉩)B ՠ3Voْ?Fh>L>w}+aʗ/?ɓ'X?%JO@ 5yCQLgTzGP)toޗj>RV?U!4.1a.- >Q9?ӧO v!ClܸT~0`̙3x CCCl 5RRR8 ܹsժUb֭[y@ \aaxaV?V[Ug 6VNٓXfr܊$ W h ^ kG޿ͻ>O<{3͛ C(5k֔dt 6䢀Ǘ/_7r@QIxQؠ^fo8s#t; ԕ(flWA}UL:ޱNz1 }f ~4u2RVTFyt!Jc".V6DEN֮]TR#GիW9uԈ#1MltܹToo:pѮ]6KP.5~(Tu=E 'JvMP4"<*9K x{ZIajuKi*ŧb+$rB.#{RoIw} ٳ'ңGcwPvibddsѿSLiժl2lذ$AhG B@ Y7pE:c@ ~8?Q8aa]&#W1*Y@jKSZ2lz3 nfƧ+\  R}xfllMc&33@b.*W`CHyRf[iihZ~ŗtiB ķIJq3zt@Ia%ȥPru%eo\%&5IOO'177bjj ђd2( CR\2yǷB'^ @hKg0Q&Vl8FҤje2(Mm\DKzl] 9t묞mr[䕶eߺ'bw+A2BLJ3 gT*F`b8\\⨁ܺukر͋UVYYY-X@OO qҧOӧO'%11ի=zmDATۙt?! n@ fLt@" HU`>qTߊ@|c\Ŝ!$9# CP2CJF}h&WXrqRzuJ׶mۤw6jԈ!\NgggHv$  aL2}]la„ gϞ}򥶏 @ݽܴ/A$8[Y&@ ~%M8x`~@dkyQz;֦Mޅ(oCUVQO P2@ ĮQNq}=銍Uy@ ?< E˖-qoذvL@ Dpy:oy-j){"@ TkQ*2tgx󛐌"lי7yMD*2+]B@hA&?bYDzGNjm(\7\޶m[@ Ν; OoTe"ަ=#o@ݿZ+Kئ#B>RRRLLLx@Ə;RIS-_x7 { kyCdޓhIw Q`@`]/J.Ԫ&=McInX2a]BoqM@| pcQ񭨼bJᬾէiK!P Tgsy/f̤f_DI2g~cQ0a!Z-@AP A(χ#A* *^r`7rjh(@ oZO'/ y:"Ҍ'1L$ t qi :<,khYU'^GwD^kAr r3Pк#ay(#]<޺WNN]P HN"ׯG`|^( iRټysϟ[@䋨@1o|(W7/TڣT(Uo'B)3hދ@ $M0aI?޴iӄޅ%?Xy+_>g &W-U$>mudۆB̶䱊Q;7c5M @.P_"Ehhhr弼ϻ4(Zj9888qw!򁢲E}ÛߊxQ0 ̶ Kj*{ 0*B\?rqŤE>TNz&  -,A#(< ii:(((..lٲlmmr/( ...R<d\yDDm! Fν;vLIIqo#P'gΜiժB b.J絇(_R;@ޫȠs0ElIEU% itúV`wjT?EAً?E jM0Q\H[[ʕ+?TǍe˖C DBi:66VNBK.VXy@܀ĹvgӧժUYSfMޅuk-6})sZF^SgOUlC&tծC >"_Tq̈́' |J{K"cqڼ @ {ݰfP$zzz8.\,ЪU+%z@ !!0@Q?8ŋ/[웧Kxazz:o&(ʝ;wC T. K &˪UƏ/ 7n8;;yJCp.`jj Bzȑ#GaÆ\.<~O8/p#ș-Lzjʬa}~,#nf9}}Q+I9,7Xݷ8a4}Tm/Վ\?KKowo?j(kHMM[y0 3a„&M@$++qׯ_㾀sz***J&-YFpfNʧ|\ۀqfnFGG2s~v֯_υ% \s0r.\uΟ?oMto.D뼲 s4hSZZ&+VWf$IrV&"[n\J<<< 62A@h~wɋ GA*&6^02<`h!mtVYK"`(@:V->5 EP/~IX01772䵧O޼y'MW\.]cxq_-dccaP9o޽;{˫9\WٳT_ l{.X`p}BJ.3224ȥ &Sjiiy!xn!iӦ 8RMtoo*Q Ӆ==~-U$4ӆ8|0x [Pʕ+a~HNN5k֖-[ Kp&=K>-|] 2sLЭ111gϞ;RaÆB*kҁu:\ .\ iN <@~ضm<K | ~6PuxxAZN} Uʇ]TݮtK1K.20? -U7lx>u } j |0g/^ hajj ***%%P0oYYY5#GZjyHkk׮X(/͛7IIIA|\6@;N>$9qqq!EQu6m|Ϸ&xQԨ&]9P_~ 9EUѣGkٲef '2ݻw9j@~AkΜ9pj+V<rP8*PP~xIFDD@!Js_L& أG8`('>H _+ O ֬Y{Bx޸qL2p7o6!5|r߽{?C³ n ѽr=pƒ wr PTkxyyn{sK(yD v{U.Y9*p& ᡇ6ZyǏZ-Zp)&HNի׳g'իuqh) ]2waX>}Gׯ_Wqvo߾٘={6U"D>D]zJbl| P WWeVbqtj-[|)j|C~9_>*};P{u EIP$>tdBӻurީS]RD"߻6 "&22b!;7ǎzzz\Dx;w ֭[KÙǏARpPP!z}rrիAQ@ ˗fݽ"`pM4'PF%yM(ICrVH Wéq7@A1c +yptss333{yr6%508\54b :tTA^'EGc&nn^<-xI.Y$3H>m·|@ҤH?HxJ ҰsNٳA|BQxpˎ_]*lӉ.M3*9Z7,{ ;rfcP3PIi*ElM`hhȇtMŋoI.0hР ?I>|y69۷p PBҥ,Y&NDD6%|94A@tmӃm˗AAf ={R k*B9fopw\Ӷ> 5jąvs6('GGGs'cnj8yd^ɲ]_Wpuiܥ B,heH a8lHDs^GAv0Hw3}̹m7H5@,v'I4,Y]70k,?Uoqٞ Ӆ ST$ۼb]IC\R(ĔIvwN.ekBbGGQ fPSF3.ޛ'N;w.M+M8K\At.J7nb\ \TΕ+Wj^$$$h{(ɜ>}O>,g{\iNJJ]am۶qe7nqr@9  ߽tR^t7y n gs|ݻw]ϟ?DpsnCY4)jBxo[5}6k֌n]BغdzwTqǵ+ lZpA7 \w_~¡U6wt: 㧏+F1 y̙W&ȕiӦAV1uԭ[?~\DOOo„ k׮uqqܹ3n׬Yb lr%ՃLeСCB˗'O^r%\n mu,} )cURkϞ=Q5 Vj!+W̅W_ '7ep:˖-L'wpD"nsQ Xj\w 33?>bĈ3fh~ի9΢m۶ ,v޽;h8Am,l΅:uon>y&N HMmǜP}ϾlNռMجK/ )255[_WCgK^cF Ju͜JGd[  YF,&|U‚):yCӾ}W^Y[[[Jܸ adɒ",,jիCLL 7NNNڵ);U6|3aÆ:Q(k׆v'vJٳ 88pFz-@zsg{=S -[Yڨ-ZK |urr2(T4Q=0t.B?J_^iDP=Yn'!r* Oph  *!Ǝ B$ +tHNQ]~=ۨϜd Ԡ@|4M]8E :WCh3V-έ6Y!X])}Kf6Zcj|B)cәhދ(t\Ƚ{=zĭT(h4ٽ{[n}]L5A ۴F*ۼ&ce NRa 8.(J귮sf1ԃc@,iP(>;Zrz¼K`m o\RSE 0*B|e{38P"R_M| -,#O~&i~ k53F;zek֬yf!!ƍه@ԫWʕ+DQ}зo[#㢩l#p4qSWSh _pjv 'eIz0A [&?f!(LƏ_ݻ;;+'@l޼w!SSFS g(cCdS=fmIKi\ug׭ke.$UOӃ{Rbԫ@X bŋ/]bggUf7N͛[krAB3fLTTݻk[| qȑg>>.[l`` lrHN<ٹshnl?p@nx$u8:%%ۼVݴ9[\S3 YU ?5ap6:(p :k׮urrzٕ+Winϝ;%ybϞ=}]|IryDD {pwwUs77NУW)s7OF,5XӿX՟KF@JulR e-atދ@ _C&$IP@|*`vN:qHw%7>|Fcq?&6Z0:&+$(ۄ%C+ ZoYML]+ HUԪ'fO'nT(TRzOa?yaQ(XI=BCC6m:bĈ͛7WG l.ߏ '4N/jXPp,jZ4l{|Ka~%p4 M8 RS>|aիbx/@|ѣGE@^L4ڵkb횮ߊM\\oFBBgیE^ Pۡ*wN)e9t~+bJmS۳UCIGh`g Po8\]P! 6466uVLL?3e>wpĉmڴ</^رSv@D@W@Ι!.4`Mgm wiѧK~ ~=ʉucW!(5$)FVU&~FFFw9rܺuy̚5ȑ#a&LyǏٰaXBѷo߹srɊp+W\hQ&!!!e˖Ϭf͚Ɋ@ !s)ԩSI&=ԩSBB 066 mL_XP(ӦM"@}_詺ڵk/[ HfMJO 쪭hv9ѳGUa>~Μ9@|=HxrzYXjuBvL!I+0iU_;OWa KiTudq9hPR8M<[r=7k֮D (!C\xQP͛]fM ry=*NS11m?tC$**AFܹ30cc㌌ xȈ" [+ҙez*l~ ,{}@%daoSi0bYr uy:ZμxGCxp4v21a?Gg3$u1VAT|p p31Q˙Glp>yKI1WGLG2IYoVM'r/vT"Bö7 H&£8#ʚTD&0}SFș%C4fPc KS%&1wʥk.-r5A\wP(z%_X1(&?~R655?>IϵSKٲe~ѣG0`@lllƍꁼL077v_(ŭXN 4Mzxxq YnRJ >E}l{ZT=ᩪ`]//jժqWҷoW8~+V ħ:ur,^ EqO o`.(Fr۷r#44 ֻv{Ƴ.i}`Ǜ8!GI(54wXgUKgyQ^[1[MI)1H|^N} 2e׮]P211w| iiinnnT1cܹ3I j׮e +? Q@ԩo h_&sD`󡍳a?@loJ l~I AIlvA٢b^[NwaiD/}tB/k,}>HV AN θLst]TrP$SL9v؉'´fϞ ҥKgΜyT*tI@@H5!!!\p-ZFni˼q6RgUF9@aKڐAչ7Hմ8ֵ,Bf 8B)sHmymM쭍u.#XY3vG 9#yʎookprryɓ*ǑG/^9df/$-bnn. +VE,s3"p@ClLL H"F8PFn:QT.ir!;s ű5ʉ;f$TcNe> S4iorGO ${ @ؽ{wǎK. ~\*4 䆹rP(@FnpRFW!~ /ub|EN"wI9J9IvPO@[72//Qbw a_}jKoa(SM !Νy5ŋL& ܆`ee`ll,!N?BQ}#̳#N+*ɹT:A & /daa9%y3|]4m:=>VrMmT EIu@֐^l׀3G(Mm gϞ})7n̍8ҪU#FĐ$#.*VqΝڵkP3*B2 K6gctƗH )yi2ތWX q;0A.Lx'/BklS!Q8i"`o g^6+K;p,ólbnU>]"|(Bů@0I R 2vTjg Q/w޺@H )3[_qǖOt @:']%>GqB\2sܢ6^R/jnI%V~cpV% qc" @j4A} AFE)RV;[j> ,Zdh5&D@|i2mji^dr4)BTbsr:HII177/QDzz:/_q|ܹ766ٙЅy晙%rww/^[.8mllv9Oe.YXK Â4@2g~;w-E{,ƌ4aJ}=&1&pىԯ֯vj6 ɔ'ƮnͨAp;)A|?>&&f^^l#BCCǎ۩S7o_ bmT*}ttBʇ0di={yaÆs XWZv|DQa͇[ sҤIK.p+~ O (d&RfC#=J`.RQ߼X>RGƐkXn. v %\ W) jTH 11YML .ڕ*aA݌^TJAdP,իސZYYURPܳgAZZZTTBT*0`|J$=z@~V}|| |6hР^zǎ$/ 4nܸrevX8RJA G8q%өS>}&;@Q !ZoZֿ'Kxf:nVL$y|VFv#{+&5t 3|O q|gEgGpHI{5*KUI2]$KKw1zRiyr)JsӍg̘ެY3SS9sldddkk˻9xyҥE"v}rʅsā޿!^d(իWpCɇӄ0o~aXH߯NBDCRVQ &/\Q&o}V6% H1c]NRolR1ͱ&Q,UJKZßA^7}ժU?O87tɩS =z4B|Jjjj.]L 76GFF^KFpqFJJʳgV^b >Hg{J%EQRҥKΝk׮QF-YK @[fo*/Ξ=98zh~xî^ھ}/\h;Չ@rqt +&7oCrߡoyrUCӂ^𿹹_8 a0FJUL@Fv?7Ȗ-[<<< r~xY[n.: Eׯ?oh۶-yS믍7֭[wi: 8cǎ kx ;zZD快į>~BR#CɈәNg  #D@|zΖfpQ+O'U$XqU&_`],L$a1n9sT*ww&M۷wܙ9s3#5 aMbaawaXFŲyfCɓ'"3gy x`8mqs+VHLLLII={6d"(bPnڢE n_&x H]0P ym\\oƍkذay/ Er\W!C[ȨW͚5ksϞ=j;4irϘ1Avvv&a{ѥK$ЖQ䗔U%RJ*\~=S)o|t(PE\ 0f{ ,8Nֳ⽕ZHUjGk)vQ|.&-(B3*F*Qq\Iӎ2H;+&"SQj "a ;kh[teZj?4S.%|W ]@.PWbJ*nnn!xVݻw?|z`AH$JuرN:quرI ۷]Ss>EWPq֭ʯ5ɖEZ3x@g&RmmG鏙DIM&Gͩ"IK8ZO0Qt#G5]f |4M?dЬ5umhbHҹ)*q9&8[,.<o %D5pp `;98>;w\dɩS~yw [[ۯR=YkbD;DS_ѓOlɦKp(bR737X<𹽂d2ivOJ_W56k(4TNX05pa*|K0o,ӧOL2o<ƍ* P(>B!\O]G5 9Ռ^w=WJ';UQjMꏐfm )iĹ$7ͫcĚ L.%c[vSn2c<M bƦF$MS c' ,i%&E pAEt QHA100OnHUN[L3g>===ꠝt hD9 ;;oFnDDDptɠM_NhNBb/NL4fSH´k.z[[,c [̠xu;RXe&ߨ4m٘[}WS3&67b,'cUJ "2Jq%DItȂӧڵkٲe|ŋ-Y$[ G^[7>{pBZjԩIKKk޼um۶( \]]5p@\@p~I&ׯ_kz.-[ɢ3dDiL1s2%J#3^CObTZl8!Eb292֖cJҌAJBۊźm1 Ǝ˙/_Utb~̍D쀭Tl& ؕ9ʌ^p\TPSԺ^xaÆRqOnӦvѩG-kL0a5kּ}66ڼyٳ a&\bDO>}8;v-޿?. q"9yZYK#G{0}2ߎsJ3\F`^RLxSb 2|OK}Ae(C )_[؄}-x(-Zv齪SMD]$Uõ?w8|ׄ# /e%J.|ΐٛ~9Vք5#O&j["r{?Bw~ h^˕+ץK1%""imPVVVyeym$33Twzਨ($bq^ZA$%%k]O}vP?AAAGi׮*3YO&ylʗ*]LCe|$EۃIǞf-pᩩWײc rv̰XifTJJ5"W0_'(4}FoZ׾}{Nl)o7Eĵ0=ZEcXv&xc$PP,LNi";D7|MG ՠ3~y湹.ku 3g ޺ ? ;; OD^[YXX?۲g 0w\ss޽{0 :k׮:u*2$~O%Quw8wG]Dq*cK0Q>N_1@@.oU_}xI|F }ۦ8],] zU@КC@k9%`->_hPɸq O6i֯_piӦN*fG*^t_wuYYY{"ɢBOZ1,KݸHIWnq)V ?c%EתWbo%03aI8ff9cxUzٳfs}9G%&ϪWt<*:[}eS:+JO` \$ H E%Mu#FQD"9zhӦM!]&wŋH2qD Zlmm5kرcE~ˢpL Db#r2rc׮_o- L키1DjKeK=Zq<)e3P$Jdivj2JӴᣘGmMHKM>}41 }yZĮ*ʼvKO2sB2RX2bjQ ^]aԲL.A Cl߾ܼ_~rIw"#G/_RJ)2SN{?l =Br\AYr[ulp__޾cY 8#}ܹˆe*·NϳWr} s3V[R.`[+)w0q՗juC&d~""X5y]ʙĔ(Lp f$drAX?y{|9ń} -N}:9,C:lyyD59zH,,*ӣcMS@@(bi@YJ СC5kքv˽{tW 5Q41YX1t|pދɮ̛{Xw{vX#N9 T՘S]U21|IsaO;g&|M; pǷ狐Dj&_K?b ]cql[DMOW2v_vUin. -._%n]gF¶A@ KG&FFFA@TTo䆥%|r#$?,RgiHLLF(ĝ]g doz :fLfĔr-叏B^N&R#z;w;vM'jmܸ1ٳyg2vXD FP($*sU!?LM=H#`2JL&CbI޿utSv25޹7mX3[Phndkn,4ἒgeԂRAԮݩSǓ'_r24b%e[7R67V(߫* k`b qun+兜ƞBA?s4+Rni>6eb2eĭmGgWEqޮy!+?~|LLYl#u yf rqW^eLNNB#d|D`̙SRzԨQ|D!$ȪUp?RNI""MlmmW둑(*}EzgJ)0/n*(Vq#B@ %ݷ f3?,G`O_EGO.fYe\l6%E~PS 8gFyFRZc*m08R2%0lst"r ^1Y#^y}jJZlk@*K42/E-tbNՄDwt^_G/^=l0()Sŋ ,?< EB)ݻwCwYT:`H$=z(27n\reԫWTR 7! b4 7e/I(>|}<'Or^.TlҤIYEa?L`RR-U0Q*E?}}BOEUҶxm`ض=rL? Pg['Z#4`TZ4dGXH%\S ĉ yO'_ѵo[!֕w g]x48qoI\vmC…*<쬌%¹6Au*URZұYY7.V;?Ϙ1#==Yfs݃d<&Mҝ]c^paѢEZvĉ"A^C+5kִjՊ ʕ)Xd4S믿1/׏E$;GǛ&y`.IN(>P2I%Dq -{`0$xDD-_>Ԭ3b&0񞒿.)Q9LIMk '{?tL`TVx3K<ݻnewB0Lss.V-MǾ O/J]ơPx0];S#Td>5(]|3E& !'}e(HeB*vbYdsˣD\V&_ߋ|nh;vJ,ׇ2"r|9bdd5gg͛7ϝ;W^ (z'9\r#Gޢh5k6lؐHA,;v쀨g pĉdv"Cvp'Ok̙3ӥKF rE:!32,œ$AS 08X9lPP\+W:#.۷ԩb 'g$[_q} SkF2: 7ݲe%(ҝ([l*U [ M6TRNNN[n2GL&:u*}ŋu.Nnmjjz…kGux."oJ$1L1J} ʖpbL%cn(<}2D cd3So&X '1x//o GQрf,G@ ҹӧO >̞bjf/TR $z C=b͍u4i RI1ţq328^A݋w3"@XIU1\Bǜ9sdiҿS(yըа> w'=6zyе鄅kf4S_re͚5ܿhѢkBڵkKA| \7>y޴&&&{쁬(Owq/Cyf 8͓'OBaʊK\^;NPU&uAr_"Mj! 0Tv߇:V(+3ز8lCGʒ%N6 Sթ6۱;)ih‡v\$=ɳ+\}QLCqa@Lj#*UAAΥcܪ0]!͉SOk6!ᝣcffL*DO 8Yl=cV! Zi ĮJ6igD_7ce1hN jBNoϳ i"qD4@RjMŶ*Sg,y}dCAa`5aqTJ۶q i5Wb'gh.502_DscN:gok3}McMBYξ] 8@􏋝INNa %x%ThuD|j%eۙ.k&3?^4A tHtEeRofRyDO3wOɼw_F߻X>gK>+z1C m"1 F[saRN }W._ige+S+CZ>rdZ8~r:_KA `Y :bP3&'fnObC ⇀ )q퀏Dy$[O){w?}߁md0#.0K8p03*rff8pg >LUɥKOp"?/f5x&LիJHe _gpM6D8`==J 6;xhԻdTy em͂o3IC \PaJw&HxR;::ծS{޽JMu*}@*u7r{ kUVD}M՚f^Q:?])SqҤI z̓ 0`(BP$u%̽ Ax)[g̬_`HRȮd"$r(0}azG+T9]RB*^a%\y%q+s IƗ8|J鞖_K!AOgJReo?k[Tl/_ٳ^DE>="QMy] 1Nr+k*bRЬ@ ?$M~3 <3o#&&iӦǏXbr99;v?֚p͜ɮ€!َ% R3r%!CLR8s=9@dj 9sT|}L(p/fv嚆&c|G(f.E{ =SۚPg*K].88d&L8IM;$Xש|ףBq(Q`%*0yk5*4);w.%%eرǎ۵kצM1jԨf͚ >58;;{yy-_[y@0l>P DDŽ]BfI[CBzUW_G vR<[PVJg$ȫy%T/tydtBEgΜ}*`\ztTAjz ^Y WD"I\&P%{}Ya>g1͂dfGgd`7+EcӽSo(>MSl8疒j2Kl(~M鴿oWTPP|\gkƍ]vma̘1o߲ZN!! J\1KCƗ ZߧfQ"hy=zt?sZ?ye&ifВ+¢QagŭTqy$ !>YfPPP@@\x1B8sm.r@֪G)=Yy|j~*[$ε!T^xPU֙E&d t'3َ5ٔm+6M6 ;'s>IQ_[_O>ihj9Xf9 gSN8\No<ӹ>LIL#A)H䉌`5HiW *^Xe#~lD*%" 7bڻm2ؗ, mw8|>QWBo޼9000圙ہ. MIĭ;El7FE=z{ %*gMowrr(mCƯGW_lSܘ /$͘ mj=Uye9'c06&ŋǏ?w $xruXH$[+2׌ O I|6"45ssɁg.[hD@Rے.&'=[5RʄVʼn]m|Ϩ-'*Ny\`7lkN+ƀ^ٵX&=y)97OTNm4r7nԮͶ8h*E\kT\MLӄkI+U6*Vzz:⪚_u-u=ZP"_S*S3u'*' `l2W0GժUulܺu >ԩÙE ܥ,rp|u(W=w@P+ئI9\w\2Œ [K5M HBr/ C`]9G/ 4J}'{%,ڃ^(Z@px"8|BX,`׹?xͼpĬRqr獳]ZD)DiKbWLRP622ܺuy(r֡C9dff6j(66*}u„ 7o~1*4_p4)JwuuUՆzyuR&υc?VE Yu\秓C_"Yu>VD 'TY6č2q,.X℣1,RiF@XIa/IkUlY+Q!@LL̩S@4m^M+D*TЩS5kp-ժU;w\-xWхGݺQA0B ^WaЎ2?>"op$ak 11koӦDb$Yh'QHu2F+>ί󅈊;iTej_}B'}+fDaD׬>+_f@/F߻wo_xQV-$MgѢEaBQF (AjժH"֭[mhhغukEL<| S',YJH'MUsN4eQa)W )7pq;^rqƼKSy )_iу{mZ~L(B0_uM1~x Μ=cǏn )ntRu^Cu4"890,~=#1*ectf BȌVe$jw8Tp"uCO10aTIp: K$I?xRSS]]]u}vsss$Mr%,, .! 5C\͛7ׯ4hP&-[,QAnfΜ)}R իWÙN2%[B(Yfm۶ƦC&]v%K V^g? $MrIq.Bبj տtÂVXkx@9O޽CBBL ?Q@QԹmhŴ궥\Io JO&HPKr+^OC īȆSlTx@f*ƽzQ|\F/\$dYdr%(2RE K -pz /RA$Y:İm1}ر.]@TRk~  ]I^xPn9p̙ۨQ[nYXXi( T "sΞfbU*֭[MLZE\\܀{իpsk֬ B_~FFF*TxgI.2>qTysٗVD+)l3a#p-^@3gδmۖ70? &-ٻ:{\Rzw.](Rɽ+.ǭi Ś3-}( {VJyvljM8㴌"%:?y:{p @\+g2p! acxWiACCC'L`(28NShpww/⽬(5ٻw۴iN֐u֍KVY|sR$Iɓ~{vځ ?TrҤI${9x r (ܙO¾޹u `Ń+ .tu %nT9A~C}1D#7tvN3?[.Zej 6݇!ղry{G8P8Z!AD(+#=xQCﴺDdUz0\Y]fnoˈɦK@HC6.Dزe߰a@-Z(==G$͙3Sdgeeծ]%E|ށ * . ^:gB@PP|RaÆΝ;՚ _ }&e:&*γܧ<5wS"qtfJ]2| !n_ң6'4C?#bdR&Sq/9qZzH"V,tA m rǠAd[ H(hގ?36彅x/_>###$$tٳSr 0jɳgFѨQ#D~WW#G:½4j Te``͛7/^XRݻw;6..֭[ .+ɾ}@q2  >ʖ- 7kڴi3f8p7g޽P"3ZlA9s &נTЇj ?WIͅ0V" v\/bҥ74s=E~4.NJd$_{*6ġCњ jB. ߵfʔ+Vf.˻ xyyUV+맦^޾\(Jᅫݻw<-ϟ?r\.SNƍX\oB<`eeջwoX8 &8~[ bݹ o߾%w@oqgTv&Mɬ"pw24A ;z%N'0jTW>4.XRE s*!\@Sd\W[aiYW'?L$/(f%TTȺD`!J rM \oW9Pc[ZF\@ $Mo xlZME@qzUѶQn_U/+DHGbs!;FQ̗MAcetu^"sLFH&#:$>@ ?$M瘝^Ĝ-)"Zۜj_s`3\,.=f$ :V.kQ*C1" AJNL1n1U N x}hRJsQCA7&­3@^ 9sp7}JsgCNmU) ?npjeaP\N,ߤǓIjqhx5M Mm-:dȯ)TqØ󞬰͈9. W}™@ Br$yf"$$Z_Dl17Ь߸Pq7 gHbƖ#s`jh7}=ޝEQߙBDBDD -"/L+M,HoXf^半 ("Hxp(3v+ * xޯ^ݙl{ [s| ,eΣkٰ\7;LݙYO {GDvmwxɳ\yEKqAn?`HGYXVzM 6& B {=A`ttfA|`VͣGeC-H7r,EӊaFV5[u) 4PJ=3gFbeEw u끟4=$W,5O/ o7=i*\4!ЛNaWtJs#4M4O۶=hK(CFc5N9I>2HL$aJsP}%I ߳NְqN2fDg-gi$34%rx0n!iB7 B=f/]d/EYE6 ˺ b˸X>XtH E70/$qjnPY𢋄 Ϟs)׽w+_lnNIHGFJR*ٸ8.iJ*GN)Ns,cUy}g.f5Ͱ\vMv*ȈȾIV!XrtM1)aHq넟a>=qP5%!hu\5ޑlB@S;X_:[1vjR@29!?xNɓX1K1BH[:N˘n$vhD{05WJYԌݓs.+/ oCaG҄B)&cc.=~v l3]fi {: F67?Le[)JSp~>T\W6x0fWMY kPc&2z SB*ytw!RhjN"J:a\q*2MW+ L|{٭Jk1s o ,8sڙ"oyQl:h5:SuZbZI(K(@" \2)d9oW5:K5(;)))[jYѐTϜ9ķ Ş?ZKGꯃS5,H˲G vlyY;yRÉ?CGM F$d-d70u;)j5loo?qD=W^K.UP666o+,lRNΝ;&Xn۶gϞ=j~Ɂ7HWq,-Z%O_'Ovvv5-n߾u֎;^֬YӸJ,[Kf̙Rt֭~ׯP/cto%WqR.0}1v K$%Jʺr,lĜKSgS5#ξ6bURV]*iZZU˫y.gϞOϟNfӦMχqK,2o͚5N:M:52242GPPPtt4i*ŋg\bqd Eǎ322 !d̛7t<bժUP ҸqV ?8JW{Me.ͩ壆/Йf"*&)A {FD2*)07k:![ق)`sqRɥߥ빑jqF]æD?XHAZNOO9, amV, >w0IVZ&MO){&dtymB#3P2m<.5%.|fq{jivAK+S߭Dᣙ~Cҽ;tK֯_" @4 2 0kժh"*-]N:ƻ ကoۢiVIIIeQ*...Æ #M4-BIҥK={?B5HS9e*b:fbe]EȮQ(/U]!JCʁ %|Ԉ~(WO$J|VaGc(&Ͳ#GiݤW~P(ݻw&MvڰaCtx[ĉ5jԥKmڴ\·â ~{͚5[xqs-[S 98H0}[zG7Ž]Ip1n| 'f{N bX1*G/9r̿iN)cQP/G}yT.ɉDBq0׮];ج8p lE!X`  jn7W\ɔ|?dTJh`@e6-{TZlRfch7QMBBmr`-ZM̖Eq>]YcT0x!cIigMb?U.>{ox.H|;v>|xzz3H@o>kk={C푑E`#`۶mŰwA|9rL:tSޢ}uC{*T^ H#@ٰC<IE5gVjg -}'A'N=GDf-D?o΢I+z b *ݺu+hI&jǤnݺv#H{˗/',n:h ]& QlQf[!%%eӦM?VX3|ǤnPfcUh2]7G33J{y-<G=k]20LHi y&߰%WKyĽ+f[n&]B, )00tׯ_`˗CCCC ~ƒ^b1 $22EQ'O4kMΙ3Ʒl6ÜCmŋJr۶m3g䫼2HUE" 1N^j D]eEBJ_M@X!{mlƁY[w_1wu PA? ߼iGzUpƷ[b-vZ[֭]%0Arlܸ1666?O^fE=~6HbbWWֵ4M{yyãڶmۧOR1YfWE L0رcΤ} n^{5tPeׯdKKK7omͿZ6ڴis,D0# @{@@AM$ 5[n={w޽o߾| ~zPGe6Vz_c3OzV$r{hrnŬQO<`ٵuCsh/iz9]Æ 0`!Xtu֭ԁޢE H0L.|gH2TtcfԸO_K~ .k?ĺpLTӲOHDF믿L h;;;-_-MLLڵk8R6V<Xo}ץ1eƌ]c:r9| JZmܸ 7NNNn׮]ym۶b1ʠ ǎǏ8pر2N]kGR+p0#n)UCv%ηrtaB ?pb&5CR|1]|Nk8[VD : ?ˋ^W-*KYX ۮIթS'PxR GGG`q+Nx sbpsP1 Os.?nMOF9}W@ s`44ů_f̙3ϟ?_ֱt)rOz)"DR~J֤RYR BnWG8psӆf.j+,8\P: K%U!1Ct_s{C{[nUQ"]\eAeB/!zzϞ hF uVj! =uM #B]~G!]Fb0sD*eA/́=~%҃4qRAiӦZT0 ?K{O Hň.//Ӳ-Z޹s' LFÆ Q +1_~J&mMkҷD^1N#6s BU"&撓HPH!Peh/62)*ɤTBU<UkS ?2H"ͥ+!B ;?eдlV"$x@U{Bh=} p\BU w_=cIL="uB BiOSVԍ?&G*!*A% 0T 6m4aR>=['PR)i*vtL*!*Fg__pR=SL+ws6lX~Hݠm۶g&jB"6vJZO&H!PehbvyRI޽{k֬ILL$jرc?1cƐ&۷/]4**4UEG5[6Lv|!PU`ϟO=x <<<88ڵk ԪUojY<?))7n4h H2@wȰw)X[[dff߿?,,ÇtKKnܸg3gh4;wҼH@ `Æ |ז-[[nBmѢ˲|o<==!0` sҾ}{JwX)NNNv;vF% ̏sssꫯH#ԫ'BU&sM\cʕ/_ ۾};5tPXy3&..nƍ09 /_ΝG,i 2x1tA aoom7 ~D ¼AY CF?z(tyxxS{a =g:s 74|RG!kR1D E@@==={1rHhjXvƍ}} ~Dl277cvQV8888;;C -d={Ν;^^^ɧO 0g 80 eL#@RW~24 BU &n` QV|j( L<ɓNjEݺu ^9?jNNS;ֻwڵkO|[Y=b666yyyBU &gxMNNEs>2339[ɉ>V=uTQx>2;w;wjDDD@@Lpԩ/se->xPMۓ B!T50TLuݺu'NB~~>⇸ QKk.\ޮK]h:bĈN:y{{&~GKLL _5oޜpx-:4ßb'55U(lܸiӦX8%&&>yo)R~}xz*_53#D!9eKv%081 +V`njW(YYYСCmٲK:_5KROOOssB{nn.CHrww2h(///&O*F8A!PT EQ~~~nnnkܹ3~4K"XZZsٲeÆ j` 3foߞ44hdBxT||| [dr.]ǟ)R8h4#G\~}Uza`>uV|@K.y.><&8n8. !x?D@ԩC_~9sk^B&8uքӧ&3fl߾s B7 z9,,?HNNڵkO!PU:!2!!B& B!LFB&!2!MB!dB0 BȄ`4A! hB!!B& B!LFB&!2!x?dBBBrssrv\]]K8>=zt֭˗//\"25פbT෫;&uQQQӧO?{,qԹsHK={6RW\|rRG!db0TLB=mذ!00Ç=z4%%eܸq^B:D-[oY^JBfX>4 Nw9XNKΧ\cΌ3TZw>Dž%kD{M:J-s2Zil( F+'e90 ]@ bq>TI,ޜJg/fpbF`-#`!>V {feXJ$drRȿ;66i;;;hrJbbǓ7nq\jjT*577^Z} F),,ttt4LIC5&&ĉ0Xڵ »dff«VQթS m9---44̌o|KF9s&$$B*UM5He'){v2҇ C =e] !%izт>=B`|+b:I{`GZ6.2PuXY|Lw=F_Wy+* *$uݥ5CKb: o1hgw}wiw~4X%o\ٽzVG"WfGWae(t Xd˖-00v Ƀo^^^2vXO?]nߵjժiӦ(.]@ u ӧO ,zw|ƍ'O|Gǎiڴ)D%~H*{NII·{qqqHB!P͚|=O k v`.3K09$hohCÐ?uBR_VHjʨeFȯ3[Ca?|Z`M7)Lc.4đn1fGsdⲸgH"okWvn#r+`6-'kA9Üt+(&+O>uX}ڵkv'Oѣ-ZeswĈϟ>}||bbb:w_~_r%-[7>ƍ!,Ya2y-ϥK+x?c̘1P^>2s ܹ޽{I!ɨѤ-Gʁ*tV_vTV@j7`S0~\/ UP׊@d ,ųQ魾^EPRw&(a*LUb'nB?Oi*gc~tXb!Dܼx{yM6032$ kkkCg`Yfǒ͛gkk Ba$$$xxxecBf;))BjM\~#Dmj3OVeǼ :Y0CJ(u,(+P` oGaoJ݊٫Ov^ޮ~$) )Eh^L]?\.߲eK6lk.\!!!M4 8㖫K. i3f̀r =AJ,B20 2:I&W

Sl&_<"}[xN7Qۭ.H2CC)w 3 Gg O"k7liii6mILLx"+Grr!Cݻsθs璎WeeeEtBBLdd9s>3 JYтBȤ`4)?8iGn#^Efgp++~ \?clSW-&%+,פL ٜ՟~U}. 威?z1ev) HǏhٲ%]q%ʻ|Znݺ]\\^e^hŊBWeee-\?:\]]!됊+H!hRZu? ?(}Z:_U>g@s{O+GQU^"Fpvae]Ne}ڈP.#MzH.}_2j֬٥Kc+ kk:>Å0|tuP}bqxx84lIӦM!ԩSnݺ Lo>.Ys10111^RTTTzzz~H!ɨ5) Ԝ˞n.9cN>w7_X=xEma~~u=.E&m+alͨNuNP܎'jCSzŊxU:82sKTMm>/7ܭK&W+Ph;3 & өk?me%]x[)ڵkW^( VAAAH@%;;͛4MU ޽ ݺu[zJP^=h{acc05߿???~M6͚5P޺uStt4#]]][jޝGx >}`037o~/.}0/B |Y[[q,Xy),88xȐ!gΜ) y%66 hބ%K\~TJZzu:u+ww;wh^Մ 9r…{waş~)߂BȤ`4Ao_MLL/]5k֬J٥jG=eʔ'رB>y!LFBB!B& B!LFB&!2'~`IENDB`parameters/man/dot-data_frame.Rd0000644000176200001440000000035513641634603016337 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utils.R \name{.data_frame} \alias{.data_frame} \title{help-functions} \usage{ .data_frame(...) } \description{ help-functions } \keyword{internal} parameters/man/model_parameters.mira.Rd0000644000176200001440000001141015066721002017725 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/methods_mice.R \name{model_parameters.mira} \alias{model_parameters.mira} \title{Parameters from multiply imputed repeated analyses} \usage{ \method{model_parameters}{mira}( model, ci = 0.95, exponentiate = FALSE, p_adjust = NULL, keep = NULL, drop = NULL, verbose = TRUE, ... ) } \arguments{ \item{model}{An object of class \code{mira} or \code{mipo}.} \item{ci}{Confidence Interval (CI) level. Default to \code{0.95} (\verb{95\%}).} \item{exponentiate}{Logical, indicating whether or not to exponentiate the coefficients (and related confidence intervals). This is typical for logistic regression, or more generally speaking, for models with log or logit links. It is also recommended to use \code{exponentiate = TRUE} for models with log-transformed response values. For models with a log-transformed response variable, when \code{exponentiate = TRUE}, a one-unit increase in the predictor is associated with multiplying the outcome by that predictor's coefficient. \strong{Note:} Delta-method standard errors are also computed (by multiplying the standard errors by the transformed coefficients). This is to mimic behaviour of other software packages, such as Stata, but these standard errors poorly estimate uncertainty for the transformed coefficient. The transformed confidence interval more clearly captures this uncertainty. For \code{compare_parameters()}, \code{exponentiate = "nongaussian"} will only exponentiate coefficients from non-Gaussian families.} \item{p_adjust}{String value, if not \code{NULL}, indicates the method to adjust p-values. See \code{\link[stats:p.adjust]{stats::p.adjust()}} for details. Further possible adjustment methods are \code{"tukey"}, \code{"scheffe"}, \code{"sidak"}, \code{"sup-t"}, and \code{"none"} to explicitly disable adjustment for \code{emmGrid} objects (from \strong{emmeans}). \code{"sup-t"} computes simultaneous confidence bands, also called sup-t confidence band (Montiel Olea & Plagborg-Møller, 2019).} \item{keep}{Character containing a regular expression pattern that describes the parameters that should be included (for \code{keep}) or excluded (for \code{drop}) in the returned data frame. \code{keep} may also be a named list of regular expressions. All non-matching parameters will be removed from the output. If \code{keep} is a character vector, every parameter name in the \emph{"Parameter"} column that matches the regular expression in \code{keep} will be selected from the returned data frame (and vice versa, all parameter names matching \code{drop} will be excluded). Furthermore, if \code{keep} has more than one element, these will be merged with an \code{OR} operator into a regular expression pattern like this: \code{"(one|two|three)"}. If \code{keep} is a named list of regular expression patterns, the names of the list-element should equal the column name where selection should be applied. This is useful for model objects where \code{model_parameters()} returns multiple columns with parameter components, like in \code{\link[=model_parameters.lavaan]{model_parameters.lavaan()}}. Note that the regular expression pattern should match the parameter names as they are stored in the returned data frame, which can be different from how they are printed. Inspect the \verb{$Parameter} column of the parameters table to get the exact parameter names.} \item{drop}{See \code{keep}.} \item{verbose}{Toggle warnings and messages.} \item{...}{Arguments passed to or from other methods.} } \description{ Format models of class \code{mira}, obtained from \code{mice::width.mids()}, or of class \code{mipo}. } \details{ \code{model_parameters()} for objects of class \code{mira} works similar to \code{summary(mice::pool())}, i.e. it generates the pooled summary of multiple imputed repeated regression analyses. } \examples{ \dontshow{if (require("mice", quietly = TRUE) && require("gee", quietly = TRUE)) withAutoprint(\{ # examplesIf} library(parameters) data(nhanes2, package = "mice") imp <- mice::mice(nhanes2) fit <- with(data = imp, exp = lm(bmi ~ age + hyp + chl)) model_parameters(fit) \donttest{ # model_parameters() also works for models that have no "tidy"-method in mice data(warpbreaks) set.seed(1234) warpbreaks$tension[sample(1:nrow(warpbreaks), size = 10)] <- NA imp <- mice::mice(warpbreaks) fit <- with(data = imp, expr = gee::gee(breaks ~ tension, id = wool)) # does not work: # summary(mice::pool(fit)) model_parameters(fit) } # and it works with pooled results data("nhanes2", package = "mice") imp <- mice::mice(nhanes2) fit <- with(data = imp, exp = lm(bmi ~ age + hyp + chl)) pooled <- mice::pool(fit) model_parameters(pooled) \dontshow{\}) # examplesIf} } parameters/man/display.parameters_model.Rd0000644000176200001440000000706515066721002020455 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/display.R \name{display.parameters_model} \alias{display.parameters_model} \title{Print tables in different output formats} \usage{ \method{display}{parameters_model}(object, format = "markdown", ...) } \arguments{ \item{object}{An object returned by one of the package's function, for example \code{\link[=model_parameters]{model_parameters()}}, \code{\link[=simulate_parameters]{simulate_parameters()}}, \code{\link[=equivalence_test]{equivalence_test()}} or \code{\link[=principal_components]{principal_components()}}.} \item{format}{String, indicating the output format. Can be \code{"markdown"} \code{"html"}, or \code{"tt"}. \code{format = "tt"} creates a \code{tinytable} object, which is either printed as markdown or HTML table, depending on the environment. See \code{\link[insight:export_table]{insight::export_table()}} for details.} \item{...}{Arguments passed to the underlying functions, such as \code{print_md()} or \code{print_html()}.} } \value{ If \code{format = "markdown"}, the return value will be a character vector in markdown-table format. If \code{format = "html"}, an object of class \code{gt_tbl}. If \code{format = "tt"}, an object of class \code{tinytable}. } \description{ Prints tables (i.e. data frame) in different output formats. \code{print_md()} is an alias for \code{display(format = "markdown")} and \code{print_html()} is an alias for \code{display(format = "html")}. A third option is \code{display(format = "tt")}, which returns a \code{tinytable} object, which is either printed as markdown or HTML table, depending on the environment. } \details{ \code{display()} is useful when the table-output from functions, which is usually printed as formatted text-table to console, should be formatted for pretty table-rendering in markdown documents, or if knitted from rmarkdown to PDF or Word files. See \href{https://easystats.github.io/parameters/articles/model_parameters_formatting.html}{vignette} for examples. } \examples{ \dontshow{if (require("gt", quietly = TRUE)) withAutoprint(\{ # examplesIf} model <- lm(mpg ~ wt + cyl, data = mtcars) mp <- model_parameters(model) display(mp) \donttest{ data(iris) lm1 <- lm(Sepal.Length ~ Species, data = iris) lm2 <- lm(Sepal.Length ~ Species + Petal.Length, data = iris) lm3 <- lm(Sepal.Length ~ Species * Petal.Length, data = iris) out <- compare_parameters(lm1, lm2, lm3) print_html( out, select = "{coef}{stars}|({ci})", column_labels = c("Estimate", "95\% CI") ) # line break, unicode minus-sign print_html( out, select = "{estimate}{stars}
({ci_low} \u2212 {ci_high})", column_labels = c("Est. (95\% CI)") ) } \dontshow{\}) # examplesIf} \dontshow{if (all(insight::check_if_installed(c("glmmTMB", "lme4", "tinytable"), quietly = TRUE))) withAutoprint(\{ # examplesIf} \donttest{ data(iris) data(Salamanders, package = "glmmTMB") m1 <- lm(Sepal.Length ~ Species * Petal.Length, data = iris) m2 <- lme4::lmer( Sepal.Length ~ Petal.Length + Petal.Width + (1 | Species), data = iris ) m3 <- glmmTMB::glmmTMB( count ~ spp + mined + (1 | site), ziformula = ~mined, family = poisson(), data = Salamanders ) out <- compare_parameters(m1, m2, m3, effects = "all", component = "all") display(out, format = "tt") display(out, select = "{estimate}|{ci}", format = "tt") } \dontshow{\}) # examplesIf} } \seealso{ \code{\link[=print.parameters_model]{print.parameters_model()}} and \code{\link[=print.compare_parameters]{print.compare_parameters()}} } parameters/man/model_parameters.Rd0000644000176200001440000006544615053035103017014 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/1_model_parameters.R \name{model_parameters} \alias{model_parameters} \alias{parameters} \title{Model Parameters} \usage{ model_parameters(model, ...) parameters(model, ...) } \arguments{ \item{model}{Statistical Model.} \item{...}{Arguments passed to or from other methods. Non-documented arguments are \itemize{ \item \code{digits}, \code{p_digits}, \code{ci_digits} and \code{footer_digits} to set the number of digits for the output. \code{groups} can be used to group coefficients. These arguments will be passed to the print-method, or can directly be used in \code{print()}, see documentation in \code{\link[=print.parameters_model]{print.parameters_model()}}. \item If \code{s_value = TRUE}, the p-value will be replaced by the S-value in the output (cf. \emph{Rafi and Greenland 2020}). \item \code{pd} adds an additional column with the \emph{probability of direction} (see \code{\link[bayestestR:p_direction]{bayestestR::p_direction()}} for details). Furthermore, see 'Examples' in \code{\link[=model_parameters.default]{model_parameters.default()}}. \item For developers, whose interest mainly is to get a "tidy" data frame of model summaries, it is recommended to set \code{pretty_names = FALSE} to speed up computation of the summary table. }} } \value{ A data frame of indices related to the model's parameters. } \description{ Compute and extract model parameters. The available options and arguments depend on the modeling \strong{package} and model \code{class}. Follow one of these links to read the model-specific documentation: \itemize{ \item \link[=model_parameters.default]{Default method}: \code{lm}, \code{glm}, \strong{stats}, \strong{censReg}, \strong{MASS}, \strong{survey}, ... \item \link[=model_parameters.cgam]{Additive models}: \strong{bamlss}, \strong{gamlss}, \strong{mgcv}, \strong{scam}, \strong{VGAM}, \code{Gam} (although the output of \code{Gam} is more Anova-alike), \code{gamm}, ... \item \link[=model_parameters.aov]{ANOVA}: \strong{afex}, \code{aov}, \code{anova}, \code{Gam}, ... \item \link[=model_parameters.brmsfit]{Bayesian}: \strong{BayesFactor}, \strong{blavaan}, \strong{brms}, \strong{MCMCglmm}, \strong{posterior}, \strong{rstanarm}, \code{bayesQR}, \code{bcplm}, \code{BGGM}, \code{blmrm}, \code{blrm}, \code{mcmc.list}, \code{MCMCglmm}, ... \item \link[=model_parameters.hclust]{Clustering}: \strong{hclust}, \strong{kmeans}, \strong{mclust}, \strong{pam}, ... \item \link[=model_parameters.htest]{Correlations, t-tests, etc.}: \strong{lmtest}, \code{htest}, \code{pairwise.htest}, ... \item \link[=model_parameters.rma]{Meta-Analysis}: \strong{metaBMA}, \strong{metafor}, \strong{metaplus}, ... \item \link[=model_parameters.glmmTMB]{Mixed models}: \strong{cplm}, \strong{glmmTMB}, \strong{lme4}, \strong{lmerTest}, \strong{nlme}, \strong{ordinal}, \strong{robustlmm}, \strong{spaMM}, \code{mixed}, \code{MixMod}, ... \item \link[=model_parameters.mlm]{Multinomial, ordinal and cumulative link}: \strong{brglm2}, \strong{DirichletReg}, \strong{nnet}, \strong{ordinal}, \code{mlm}, ... \item \link[=model_parameters.mira]{Multiple imputation}: \strong{mice} \item \link[=model_parameters.principal]{PCA, FA, CFA, SEM}: \strong{FactoMineR}, \strong{lavaan}, \strong{psych}, \code{sem}, ... \item \link[=model_parameters.zcpglm]{Zero-inflated and hurdle}: \strong{cplm}, \strong{mhurdle}, \strong{pscl}, ... \item \link[=model_parameters.glimML]{Other models}: \strong{aod}, \strong{bbmle}, \strong{betareg}, \strong{emmeans}, \strong{epiR}, \strong{glmx}, \strong{ivfixed}, \strong{ivprobit}, \strong{JRM}, \strong{lmodel2}, \strong{logitsf}, \strong{marginaleffects}, \strong{margins}, \strong{maxLik}, \strong{mediation}, \strong{mfx}, \strong{multcomp}, \strong{mvord}, \strong{plm}, \strong{PMCMRplus}, \strong{quantreg}, \strong{selection}, \strong{systemfit}, \strong{tidymodels}, \strong{varEST}, \strong{WRS2}, \code{bfsl}, \code{deltaMethod}, \code{fitdistr}, \code{mjoint}, \code{mle}, \code{model.avg}, ... } } \details{ A full overview can be found here: https://easystats.github.io/parameters/reference/ } \note{ The \code{\link[=print.parameters_model]{print()}} method has several arguments to tweak the output. There is also a \href{https://easystats.github.io/see/articles/parameters.html}{\code{plot()}-method} implemented in the \href{https://easystats.github.io/see/}{\strong{see}-package}, and a dedicated method for use inside rmarkdown files, \code{\link[=print_md.parameters_model]{print_md()}}. \cr \cr \strong{For developers}, if speed performance is an issue, you can use the (undocumented) \code{pretty_names} argument, e.g. \code{model_parameters(..., pretty_names = FALSE)}. This will skip the formatting of the coefficient names and makes \code{model_parameters()} faster. } \section{Standardization of model coefficients}{ Standardization is based on \code{\link[=standardize_parameters]{standardize_parameters()}}. In case of \code{standardize = "refit"}, the data used to fit the model will be standardized and the model is completely refitted. In such cases, standard errors and confidence intervals refer to the standardized coefficient. The default, \code{standardize = "refit"}, never standardizes categorical predictors (i.e. factors), which may be a different behaviour compared to other R packages or other software packages (like SPSS). To mimic behaviour of SPSS or packages such as \strong{lm.beta}, use \code{standardize = "basic"}. } \section{Standardization Methods}{ \itemize{ \item \strong{refit}: This method is based on a complete model re-fit with a standardized version of the data. Hence, this method is equal to standardizing the variables before fitting the model. It is the "purest" and the most accurate (Neter et al., 1989), but it is also the most computationally costly and long (especially for heavy models such as Bayesian models). This method is particularly recommended for complex models that include interactions or transformations (e.g., polynomial or spline terms). The \code{robust} (default to \code{FALSE}) argument enables a robust standardization of data, i.e., based on the \code{median} and \code{MAD} instead of the \code{mean} and \code{SD}. \strong{See \code{\link[datawizard:standardize]{datawizard::standardize()}} for more details.} \strong{Note} that \code{standardize_parameters(method = "refit")} may not return the same results as fitting a model on data that has been standardized with \code{standardize()}; \code{standardize_parameters()} used the data used by the model fitting function, which might not be same data if there are missing values. see the \code{remove_na} argument in \code{standardize()}. \item \strong{posthoc}: Post-hoc standardization of the parameters, aiming at emulating the results obtained by "refit" without refitting the model. The coefficients are divided by the standard deviation (or MAD if \code{robust}) of the outcome (which becomes their expression 'unit'). Then, the coefficients related to numeric variables are additionally multiplied by the standard deviation (or MAD if \code{robust}) of the related terms, so that they correspond to changes of 1 SD of the predictor (e.g., "A change in 1 SD of \code{x} is related to a change of 0.24 of the SD of \code{y}). This does not apply to binary variables or factors, so the coefficients are still related to changes in levels. This method is not accurate and tend to give aberrant results when interactions are specified. \item \strong{basic}: This method is similar to \code{method = "posthoc"}, but treats all variables as continuous: it also scales the coefficient by the standard deviation of model's matrix' parameter of factors levels (transformed to integers) or binary predictors. Although being inappropriate for these cases, this method is the one implemented by default in other software packages, such as \code{\link[lm.beta:lm.beta]{lm.beta::lm.beta()}}. \item \strong{smart} (Standardization of Model's parameters with Adjustment, Reconnaissance and Transformation - \emph{experimental}): Similar to \code{method = "posthoc"} in that it does not involve model refitting. The difference is that the SD (or MAD if \code{robust}) of the response is computed on the relevant section of the data. For instance, if a factor with 3 levels A (the intercept), B and C is entered as a predictor, the effect corresponding to B vs. A will be scaled by the variance of the response at the intercept only. As a results, the coefficients for effects of factors are similar to a Glass' delta. \item \strong{pseudo} (\emph{for 2-level (G)LMMs only}): In this (post-hoc) method, the response and the predictor are standardized based on the level of prediction (levels are detected with \code{\link[performance:check_group_variation]{performance::check_group_variation()}}): Predictors are standardized based on their SD at level of prediction (see also \code{\link[datawizard:demean]{datawizard::demean()}}); The outcome (in linear LMMs) is standardized based on a fitted random-intercept-model, where \code{sqrt(random-intercept-variance)} is used for level 2 predictors, and \code{sqrt(residual-variance)} is used for level 1 predictors (Hoffman 2015, page 342). A warning is given when a within-group variable is found to have access between-group variance. } See also \href{https://easystats.github.io/parameters/articles/standardize_parameters_effsize.html}{package vignette}. } \section{Labeling the Degrees of Freedom}{ Throughout the \strong{parameters} package, we decided to label the residual degrees of freedom \emph{df_error}. The reason for this is that these degrees of freedom not always refer to the residuals. For certain models, they refer to the estimate error - in a linear model these are the same, but in - for instance - any mixed effects model, this isn't strictly true. Hence, we think that \code{df_error} is the most generic label for these degrees of freedom. } \section{Confidence intervals and approximation of degrees of freedom}{ There are different ways of approximating the degrees of freedom depending on different assumptions about the nature of the model and its sampling distribution. The \code{ci_method} argument modulates the method for computing degrees of freedom (df) that are used to calculate confidence intervals (CI) and the related p-values. Following options are allowed, depending on the model class: \strong{Classical methods:} Classical inference is generally based on the \strong{Wald method}. The Wald approach to inference computes a test statistic by dividing the parameter estimate by its standard error (Coefficient / SE), then comparing this statistic against a t- or normal distribution. This approach can be used to compute CIs and p-values. \code{"wald"}: \itemize{ \item Applies to \emph{non-Bayesian models}. For \emph{linear models}, CIs computed using the Wald method (SE and a \emph{t-distribution with residual df}); p-values computed using the Wald method with a \emph{t-distribution with residual df}. For other models, CIs computed using the Wald method (SE and a \emph{normal distribution}); p-values computed using the Wald method with a \emph{normal distribution}. } \code{"normal"} \itemize{ \item Applies to \emph{non-Bayesian models}. Compute Wald CIs and p-values, but always use a normal distribution. } \code{"residual"} \itemize{ \item Applies to \emph{non-Bayesian models}. Compute Wald CIs and p-values, but always use a \emph{t-distribution with residual df} when possible. If the residual df for a model cannot be determined, a normal distribution is used instead. } \strong{Methods for mixed models:} Compared to fixed effects (or single-level) models, determining appropriate df for Wald-based inference in mixed models is more difficult. See \href{https://bbolker.github.io/mixedmodels-misc/glmmFAQ.html#what-are-the-p-values-listed-by-summaryglmerfit-etc.-are-they-reliable}{the R GLMM FAQ} for a discussion. Several approximate methods for computing df are available, but you should also consider instead using profile likelihood (\code{"profile"}) or bootstrap ("\verb{boot"}) CIs and p-values instead. \code{"satterthwaite"} \itemize{ \item Applies to \emph{linear mixed models}. CIs computed using the Wald method (SE and a \emph{t-distribution with Satterthwaite df}); p-values computed using the Wald method with a \emph{t-distribution with Satterthwaite df}. } \code{"kenward"} \itemize{ \item Applies to \emph{linear mixed models}. CIs computed using the Wald method (\emph{Kenward-Roger SE} and a \emph{t-distribution with Kenward-Roger df}); p-values computed using the Wald method with \emph{Kenward-Roger SE and t-distribution with Kenward-Roger df}. } \code{"ml1"} \itemize{ \item Applies to \emph{linear mixed models}. CIs computed using the Wald method (SE and a \emph{t-distribution with m-l-1 approximated df}); p-values computed using the Wald method with a \emph{t-distribution with m-l-1 approximated df}. See \code{\link[=ci_ml1]{ci_ml1()}}. } \code{"betwithin"} \itemize{ \item Applies to \emph{linear mixed models} and \emph{generalized linear mixed models}. CIs computed using the Wald method (SE and a \emph{t-distribution with between-within df}); p-values computed using the Wald method with a \emph{t-distribution with between-within df}. See \code{\link[=ci_betwithin]{ci_betwithin()}}. } \strong{Likelihood-based methods:} Likelihood-based inference is based on comparing the likelihood for the maximum-likelihood estimate to the the likelihood for models with one or more parameter values changed (e.g., set to zero or a range of alternative values). Likelihood ratios for the maximum-likelihood and alternative models are compared to a \eqn{\chi}-squared distribution to compute CIs and p-values. \code{"profile"} \itemize{ \item Applies to \emph{non-Bayesian models} of class \code{glm}, \code{polr}, \code{merMod} or \code{glmmTMB}. CIs computed by \emph{profiling the likelihood curve for a parameter}, using linear interpolation to find where likelihood ratio equals a critical value; p-values computed using the Wald method with a \emph{normal-distribution} (note: this might change in a future update!) } \code{"uniroot"} \itemize{ \item Applies to \emph{non-Bayesian models} of class \code{glmmTMB}. CIs computed by \emph{profiling the likelihood curve for a parameter}, using root finding to find where likelihood ratio equals a critical value; p-values computed using the Wald method with a \emph{normal-distribution} (note: this might change in a future update!) } \strong{Methods for bootstrapped or Bayesian models:} Bootstrap-based inference is based on \strong{resampling} and refitting the model to the resampled datasets. The distribution of parameter estimates across resampled datasets is used to approximate the parameter's sampling distribution. Depending on the type of model, several different methods for bootstrapping and constructing CIs and p-values from the bootstrap distribution are available. For Bayesian models, inference is based on drawing samples from the model posterior distribution. \code{"quantile"} (or \code{"eti"}) \itemize{ \item Applies to \emph{all models (including Bayesian models)}. For non-Bayesian models, only applies if \code{bootstrap = TRUE}. CIs computed as \emph{equal tailed intervals} using the quantiles of the bootstrap or posterior samples; p-values are based on the \emph{probability of direction}. See \code{\link[bayestestR:eti]{bayestestR::eti()}}. } \code{"hdi"} \itemize{ \item Applies to \emph{all models (including Bayesian models)}. For non-Bayesian models, only applies if \code{bootstrap = TRUE}. CIs computed as \emph{highest density intervals} for the bootstrap or posterior samples; p-values are based on the \emph{probability of direction}. See \code{\link[bayestestR:hdi]{bayestestR::hdi()}}. } \code{"bci"} (or \code{"bcai"}) \itemize{ \item Applies to \emph{all models (including Bayesian models)}. For non-Bayesian models, only applies if \code{bootstrap = TRUE}. CIs computed as \emph{bias corrected and accelerated intervals} for the bootstrap or posterior samples; p-values are based on the \emph{probability of direction}. See \code{\link[bayestestR:bci]{bayestestR::bci()}}. } \code{"si"} \itemize{ \item Applies to \emph{Bayesian models} with proper priors. CIs computed as \emph{support intervals} comparing the posterior samples against the prior samples; p-values are based on the \emph{probability of direction}. See \code{\link[bayestestR:si]{bayestestR::si()}}. } \code{"boot"} \itemize{ \item Applies to \emph{non-Bayesian models} of class \code{merMod}. CIs computed using \emph{parametric bootstrapping} (simulating data from the fitted model); p-values computed using the Wald method with a \emph{normal-distribution)} (note: this might change in a future update!). } For all iteration-based methods other than \code{"boot"} (\code{"hdi"}, \code{"quantile"}, \code{"ci"}, \code{"eti"}, \code{"si"}, \code{"bci"}, \code{"bcai"}), p-values are based on the probability of direction (\code{\link[bayestestR:p_direction]{bayestestR::p_direction()}}), which is converted into a p-value using \code{\link[bayestestR:pd_to_p]{bayestestR::pd_to_p()}}. } \section{Statistical inference - how to quantify evidence}{ There is no standardized approach to drawing conclusions based on the available data and statistical models. A frequently chosen but also much criticized approach is to evaluate results based on their statistical significance (\emph{Amrhein et al. 2017}). A more sophisticated way would be to test whether estimated effects exceed the "smallest effect size of interest", to avoid even the smallest effects being considered relevant simply because they are statistically significant, but clinically or practically irrelevant (\emph{Lakens et al. 2018, Lakens 2024}). A rather unconventional approach, which is nevertheless advocated by various authors, is to interpret results from classical regression models either in terms of probabilities, similar to the usual approach in Bayesian statistics (\emph{Schweder 2018; Schweder and Hjort 2003; Vos 2022}) or in terms of relative measure of "evidence" or "compatibility" with the data (\emph{Greenland et al. 2022; Rafi and Greenland 2020}), which nevertheless comes close to a probabilistic interpretation. A more detailed discussion of this topic is found in the documentation of \code{\link[=p_function]{p_function()}}. The \strong{parameters} package provides several options or functions to aid statistical inference. These are, for example: \itemize{ \item \code{\link[=equivalence_test.lm]{equivalence_test()}}, to compute the (conditional) equivalence test for frequentist models \item \code{\link[=p_significance.lm]{p_significance()}}, to compute the probability of \emph{practical significance}, which can be conceptualized as a unidirectional equivalence test \item \code{\link[=p_function]{p_function()}}, or \emph{consonance function}, to compute p-values and compatibility (confidence) intervals for statistical models \item the \code{pd} argument (setting \code{pd = TRUE}) in \code{model_parameters()} includes a column with the \emph{probability of direction}, i.e. the probability that a parameter is strictly positive or negative. See \code{\link[bayestestR:p_direction]{bayestestR::p_direction()}} for details. If plotting is desired, the \code{\link[=p_direction.lm]{p_direction()}} function can be used, together with \code{plot()}. \item the \code{s_value} argument (setting \code{s_value = TRUE}) in \code{model_parameters()} replaces the p-values with their related \emph{S}-values (\emph{Rafi and Greenland 2020}) \item finally, it is possible to generate distributions of model coefficients by generating bootstrap-samples (setting \code{bootstrap = TRUE}) or simulating draws from model coefficients using \code{\link[=simulate_model]{simulate_model()}}. These samples can then be treated as "posterior samples" and used in many functions from the \strong{bayestestR} package. } Most of the above shown options or functions derive from methods originally implemented for Bayesian models (\emph{Makowski et al. 2019}). However, assuming that model assumptions are met (which means, the model fits well to the data, the correct model is chosen that reflects the data generating process (distributional model family) etc.), it seems appropriate to interpret results from classical frequentist models in a "Bayesian way" (more details: documentation in \code{\link[=p_function]{p_function()}}). } \section{Interpretation of Interaction Terms}{ Note that the \emph{interpretation} of interaction terms depends on many characteristics of the model. The number of parameters, and overall performance of the model, can differ \emph{or not} between \code{a * b}, \code{a : b}, and \code{a / b}, suggesting that sometimes interaction terms give different parameterizations of the same model, but other times it gives completely different models (depending on \code{a} or \code{b} being factors of covariates, included as main effects or not, etc.). Their interpretation depends of the full context of the model, which should not be inferred from the parameters table alone - rather, we recommend to use packages that calculate estimated marginal means or marginal effects, such as \CRANpkg{modelbased}, \CRANpkg{emmeans}, \CRANpkg{ggeffects}, or \CRANpkg{marginaleffects}. To raise awareness for this issue, you may use \code{print(...,show_formula=TRUE)} to add the model-specification to the output of the \code{\link[=print.parameters_model]{print()}} method for \code{model_parameters()}. } \section{Global Options to Customize Messages and Tables when Printing}{ The \code{verbose} argument can be used to display or silence messages and warnings for the different functions in the \strong{parameters} package. However, some messages providing additional information can be displayed or suppressed using \code{options()}: \itemize{ \item \code{parameters_info}: \code{options(parameters_info = TRUE)} will override the \code{include_info} argument in \code{model_parameters()} and always show the model summary for non-mixed models. \item \code{parameters_mixed_info}: \code{options(parameters_mixed_info = TRUE)} will override the \code{include_info} argument in \code{model_parameters()} for mixed models, and will then always show the model summary. \item \code{parameters_cimethod}: \code{options(parameters_cimethod = TRUE)} will show the additional information about the approximation method used to calculate confidence intervals and p-values. Set to \code{FALSE} to hide this message when printing \code{model_parameters()} objects. \item \code{parameters_exponentiate}: \code{options(parameters_exponentiate = TRUE)} will show the additional information on how to interpret coefficients of models with log-transformed response variables or with log-/logit-links when the \code{exponentiate} argument in \code{model_parameters()} is not \code{TRUE}. Set this option to \code{FALSE} to hide this message when printing \code{model_parameters()} objects. } There are further options that can be used to modify the default behaviour for printed outputs: \itemize{ \item \code{parameters_labels}: \code{options(parameters_labels = TRUE)} will use variable and value labels for pretty names, if data is labelled. If no labels available, default pretty names are used. \item \code{parameters_interaction}: \verb{options(parameters_interaction = )} will replace the interaction mark (by default, \code{*}) with the related character. \item \code{parameters_select}: \verb{options(parameters_select = )} will set the default for the \code{select} argument. See argument's documentation for available options. \item \code{easystats_table_width}: \verb{options(easystats_table_width = )} will set the default width for tables in text-format, i.e. for most of the outputs printed to console. If not specified, tables will be adjusted to the current available width, e.g. of the of the console (or any other source for textual output, like markdown files). The argument \code{table_width} can also be used in most \code{print()} methods to specify the table width as desired. \item \code{insight_use_symbols}: \code{options(insight_use_symbols = TRUE)} will try to print unicode-chars for symbols as column names, wherever possible (e.g., \ifelse{html}{\out{ω}}{\eqn{\omega}} instead of \code{Omega}). } } \references{ \itemize{ \item Amrhein, V., Korner-Nievergelt, F., and Roth, T. (2017). The earth is flat (p > 0.05): Significance thresholds and the crisis of unreplicable research. PeerJ, 5, e3544. \doi{10.7717/peerj.3544} \item Greenland S, Rafi Z, Matthews R, Higgs M. To Aid Scientific Inference, Emphasize Unconditional Compatibility Descriptions of Statistics. (2022) https://arxiv.org/abs/1909.08583v7 (Accessed November 10, 2022) \item Hoffman, L. (2015). Longitudinal analysis: Modeling within-person fluctuation and change. Routledge. \item Lakens, D. (2024). Improving Your Statistical Inferences (Version v1.5.1). Retrieved from https://lakens.github.io/statistical_inferences/. \doi{10.5281/ZENODO.6409077} \item Lakens, D., Scheel, A. M., and Isager, P. M. (2018). Equivalence Testing for Psychological Research: A Tutorial. Advances in Methods and Practices in Psychological Science, 1(2), 259–269. \item Makowski, D., Ben-Shachar, M. S., Chen, S. H. A., and Lüdecke, D. (2019). Indices of Effect Existence and Significance in the Bayesian Framework. Frontiers in Psychology, 10, 2767. \doi{10.3389/fpsyg.2019.02767} \item Montiel Olea, J. L., and Plagborg-Møller, M. (2019). Simultaneous confidence bands: Theory, implementation, and an application to SVARs. Journal of Applied Econometrics, 34(1), 1–17. \doi{10.1002/jae.2656} \item Neter, J., Wasserman, W., and Kutner, M. H. (1989). Applied linear regression models. \item Rafi Z, Greenland S. Semantic and cognitive tools to aid statistical science: replace confidence and significance by compatibility and surprise. BMC Medical Research Methodology (2020) 20:244. \item Schweder T. Confidence is epistemic probability for empirical science. Journal of Statistical Planning and Inference (2018) 195:116–125. \doi{10.1016/j.jspi.2017.09.016} \item Schweder T, Hjort NL. Frequentist analogues of priors and posteriors. In Stigum, B. (ed.), Econometrics and the Philosophy of Economics: Theory Data Confrontation in Economics, pp. 285-217. Princeton University Press, Princeton, NJ, 2003 \item Vos P, Holbert D. Frequentist statistical inference without repeated sampling. Synthese 200, 89 (2022). \doi{10.1007/s11229-022-03560-x} } } \seealso{ \code{\link[insight:standardize_names]{insight::standardize_names()}} to rename columns into a consistent, standardized naming scheme. } parameters/man/dot-filter_component.Rd0000644000176200001440000000062613641634603017624 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/utils.R \name{.filter_component} \alias{.filter_component} \title{for models with zero-inflation component, return required component of model-summary} \usage{ .filter_component(dat, component) } \description{ for models with zero-inflation component, return required component of model-summary } \keyword{internal} parameters/man/p_value_betwithin.Rd0000644000176200001440000000610414716604200017170 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ci_betwithin.R, R/dof_betwithin.R, % R/p_value_betwithin.R \name{ci_betwithin} \alias{ci_betwithin} \alias{dof_betwithin} \alias{p_value_betwithin} \title{Between-within approximation for SEs, CIs and p-values} \usage{ ci_betwithin(model, ci = 0.95, ...) dof_betwithin(model) p_value_betwithin(model, dof = NULL, ...) } \arguments{ \item{model}{A mixed model.} \item{ci}{Confidence Interval (CI) level. Default to \code{0.95} (\verb{95\%}).} \item{...}{Additional arguments passed down to the underlying functions. E.g., arguments like \code{vcov} or \code{vcov_args} can be used to compute confidence intervals using a specific variance-covariance matrix for the standard errors.} \item{dof}{Degrees of Freedom.} } \value{ A data frame. } \description{ Approximation of degrees of freedom based on a "between-within" heuristic. } \details{ \subsection{Small Sample Cluster corrected Degrees of Freedom}{ Inferential statistics (like p-values, confidence intervals and standard errors) may be biased in mixed models when the number of clusters is small (even if the sample size of level-1 units is high). In such cases it is recommended to approximate a more accurate number of degrees of freedom for such inferential statistics (see \emph{Li and Redden 2015}). The \emph{Between-within} denominator degrees of freedom approximation is recommended in particular for (generalized) linear mixed models with repeated measurements (longitudinal design). \code{dof_betwithin()} implements a heuristic based on the between-within approach. \strong{Note} that this implementation does not return exactly the same results as shown in \emph{Li and Redden 2015}, but similar. } \subsection{Degrees of Freedom for Longitudinal Designs (Repeated Measures)}{ In particular for repeated measure designs (longitudinal data analysis), the \emph{between-within} heuristic is likely to be more accurate than simply using the residual or infinite degrees of freedom, because \code{dof_betwithin()} returns different degrees of freedom for within-cluster and between-cluster effects. } } \examples{ \donttest{ if (require("lme4")) { data(sleepstudy) model <- lmer(Reaction ~ Days + (1 + Days | Subject), data = sleepstudy) dof_betwithin(model) p_value_betwithin(model) } } } \references{ \itemize{ \item Elff, M.; Heisig, J.P.; Schaeffer, M.; Shikano, S. (2019). Multilevel Analysis with Few Clusters: Improving Likelihood-based Methods to Provide Unbiased Estimates and Accurate Inference, British Journal of Political Science. \item Li, P., Redden, D. T. (2015). Comparing denominator degrees of freedom approximations for the generalized linear mixed model in analyzing binary outcome in small sample cluster-randomized trials. BMC Medical Research Methodology, 15(1), 38. \doi{10.1186/s12874-015-0026-x} } } \seealso{ \code{dof_betwithin()} is a small helper-function to calculate approximated degrees of freedom of model parameters, based on the "between-within" heuristic. } parameters/man/parameters-package.Rd0000644000176200001440000000561715005076551017230 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/parameters-package.R \docType{package} \name{parameters-package} \alias{parameters-package} \title{parameters: Extracting, Computing and Exploring the Parameters of Statistical Models using R} \description{ \strong{parameters}' primary goal is to provide utilities for processing the parameters of various statistical models (see \href{https://easystats.github.io/insight/}{here} for a list of supported models). Beyond computing \emph{p-values}, \emph{CIs}, \emph{Bayesian indices} and other measures for a wide variety of models, this package implements features like \emph{bootstrapping} of parameters and models, \emph{feature reduction} (feature extraction and variable selection), or tools for data reduction like functions to perform cluster, factor or principal component analysis. Another important goal of the \strong{parameters} package is to facilitate and streamline the process of reporting results of statistical models, which includes the easy and intuitive calculation of standardized estimates or robust standard errors and p-values. \strong{parameters} therefor offers a simple and unified syntax to process a large variety of (model) objects from many different packages. References: Lüdecke et al. (2020) \doi{10.21105/joss.02445} } \seealso{ Useful links: \itemize{ \item \url{https://easystats.github.io/parameters/} \item Report bugs at \url{https://github.com/easystats/parameters/issues} } } \author{ \strong{Maintainer}: Daniel Lüdecke \email{officialeasystats@gmail.com} (\href{https://orcid.org/0000-0002-8895-3206}{ORCID}) Authors: \itemize{ \item Dominique Makowski \email{dom.makowski@gmail.com} (\href{https://orcid.org/0000-0001-5375-9967}{ORCID}) \item Mattan S. Ben-Shachar \email{matanshm@post.bgu.ac.il} (\href{https://orcid.org/0000-0002-4287-4801}{ORCID}) \item Indrajeet Patil \email{patilindrajeet.science@gmail.com} (\href{https://orcid.org/0000-0003-1995-6531}{ORCID}) \item Søren Højsgaard \email{sorenh@math.aau.dk} \item Brenton M. Wiernik \email{brenton@wiernik.org} (\href{https://orcid.org/0000-0001-9560-6336}{ORCID}) } Other contributors: \itemize{ \item Zen J. Lau \email{zenjuen.lau@ntu.edu.sg} [contributor] \item Vincent Arel-Bundock \email{vincent.arel-bundock@umontreal.ca} (\href{https://orcid.org/0000-0003-2042-7063}{ORCID}) [contributor] \item Jeffrey Girard \email{me@jmgirard.com} (\href{https://orcid.org/0000-0002-7359-3746}{ORCID}) [contributor] \item Christina Maimone \email{christina.maimone@northwestern.edu} [reviewer] \item Niels Ohlsen [reviewer] \item Douglas Ezra Morrison \email{dmorrison01@ucla.edu} (\href{https://orcid.org/0000-0002-7195-830X}{ORCID}) [contributor] \item Joseph Luchman \email{jluchman@gmail.com} (\href{https://orcid.org/0000-0002-8886-9717}{ORCID}) [contributor] } } \keyword{internal} parameters/man/model_parameters.hclust.Rd0000644000176200001440000000421115066721002020300 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/methods_hclust.R \name{model_parameters.hclust} \alias{model_parameters.hclust} \title{Parameters from Cluster Models (k-means, ...)} \usage{ \method{model_parameters}{hclust}(model, data = NULL, clusters = NULL, ...) } \arguments{ \item{model}{Cluster model.} \item{data}{A data frame.} \item{clusters}{A vector with clusters assignments (must be same length as rows in data).} \item{...}{Arguments passed to or from other methods.} } \description{ Format cluster models obtained for example by \code{\link[=kmeans]{kmeans()}}. } \examples{ \dontshow{if (all(insight::check_if_installed(c("dbscan", "cluster", "fpc"), quietly = TRUE))) withAutoprint(\{ # examplesIf} \donttest{ # # K-means ------------------------------- model <- kmeans(iris[1:4], centers = 3) rez <- model_parameters(model) rez # Get clusters predict(rez) # Clusters centers in long form attributes(rez)$means # Between and Total Sum of Squares attributes(rez)$Sum_Squares_Total attributes(rez)$Sum_Squares_Between # # Hierarchical clustering (hclust) --------------------------- data <- iris[1:4] model <- hclust(dist(data)) clusters <- cutree(model, 3) rez <- model_parameters(model, data, clusters) rez # Get clusters predict(rez) # Clusters centers in long form attributes(rez)$means # Between and Total Sum of Squares attributes(rez)$Total_Sum_Squares attributes(rez)$Between_Sum_Squares # # K-Medoids (PAM and HPAM) ============== model <- cluster::pam(iris[1:4], k = 3) model_parameters(model) model <- fpc::pamk(iris[1:4], criterion = "ch") model_parameters(model) # DBSCAN --------------------------- model <- dbscan::dbscan(iris[1:4], eps = 1.45, minPts = 10) rez <- model_parameters(model, iris[1:4]) rez # Get clusters predict(rez) # Clusters centers in long form attributes(rez)$means # Between and Total Sum of Squares attributes(rez)$Sum_Squares_Total attributes(rez)$Sum_Squares_Between # HDBSCAN model <- dbscan::hdbscan(iris[1:4], minPts = 10) model_parameters(model, iris[1:4]) } \dontshow{\}) # examplesIf} } parameters/man/bootstrap_model.Rd0000644000176200001440000000576615066721002016671 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/bootstrap_model.R \name{bootstrap_model} \alias{bootstrap_model} \alias{bootstrap_model.default} \title{Model bootstrapping} \usage{ bootstrap_model(model, iterations = 1000, ...) \method{bootstrap_model}{default}( model, iterations = 1000, type = "ordinary", parallel = "no", n_cpus = 1, cluster = NULL, verbose = FALSE, ... ) } \arguments{ \item{model}{Statistical model.} \item{iterations}{The number of draws to simulate/bootstrap.} \item{...}{Arguments passed to or from other methods.} \item{type}{Character string specifying the type of bootstrap. For mixed models of class \code{merMod} or \code{glmmTMB}, may be \code{"parametric"} (default) or \code{"semiparametric"} (see \code{?lme4::bootMer} for details). For all other models, see argument \code{sim} in \code{?boot::boot} (defaults to \code{"ordinary"}).} \item{parallel}{The type of parallel operation to be used (if any).} \item{n_cpus}{Number of processes to be used in parallel operation.} \item{cluster}{Optional cluster when \code{parallel = "snow"}. See \code{?lme4::bootMer} for details.} \item{verbose}{Toggle warnings and messages.} } \value{ A data frame of bootstrapped estimates. } \description{ Bootstrap a statistical model n times to return a data frame of estimates. } \details{ By default, \code{boot::boot()} is used to generate bootstraps from the model data, which are then used to \code{update()} the model, i.e. refit the model with the bootstrapped samples. For \code{merMod} objects (\strong{lme4}) or models from \strong{glmmTMB}, the \code{lme4::bootMer()} function is used to obtain bootstrapped samples. \code{bootstrap_parameters()} summarizes the bootstrapped model estimates. } \section{Using with \strong{emmeans}}{ The output can be passed directly to the various functions from the \strong{emmeans} package, to obtain bootstrapped estimates, contrasts, simple slopes, etc. and their confidence intervals. These can then be passed to \code{model_parameter()} to obtain standard errors, p-values, etc. (see example). Note that that p-values returned here are estimated under the assumption of \emph{translation equivariance}: that shape of the sampling distribution is unaffected by the null being true or not. If this assumption does not hold, p-values can be biased, and it is suggested to use proper permutation tests to obtain non-parametric p-values. } \examples{ \dontshow{if (require("boot", quietly = TRUE) && require("emmeans", quietly = TRUE)) withAutoprint(\{ # examplesIf} \donttest{ model <- lm(mpg ~ wt + factor(cyl), data = mtcars) b <- bootstrap_model(model) print(head(b)) est <- emmeans::emmeans(b, consec ~ cyl) print(model_parameters(est)) } \dontshow{\}) # examplesIf} } \seealso{ \code{\link[=bootstrap_parameters]{bootstrap_parameters()}}, \code{\link[=simulate_model]{simulate_model()}}, \code{\link[=simulate_parameters]{simulate_parameters()}} } parameters/man/qol_cancer.Rd0000644000176200001440000000145714077467603015612 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/datasets.R \docType{data} \name{qol_cancer} \alias{qol_cancer} \title{Sample data set} \format{ A data frame with 564 rows and 7 variables: \describe{ \item{ID}{Patient ID} \item{QoL}{Quality of Life Score} \item{time}{Timepoint of measurement} \item{age}{Age in years} \item{phq4}{Patients' Health Questionnaire, 4-item version} \item{hospital}{Hospital ID, where patient was treated} \item{education}{Patients' educational level} } } \description{ A sample data set with longitudinal data, used in the vignette describing the \code{datawizard::demean()} function. Health-related quality of life from cancer-patients was measured at three time points (pre-surgery, 6 and 12 months after surgery). } \keyword{data} parameters/man/model_parameters.glimML.Rd0000644000176200001440000002400515033425412020162 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/methods_aod.R \name{model_parameters.glimML} \alias{model_parameters.glimML} \title{Parameters from special models} \usage{ \method{model_parameters}{glimML}( model, ci = 0.95, bootstrap = FALSE, iterations = 1000, component = "conditional", standardize = NULL, exponentiate = FALSE, p_adjust = NULL, include_info = getOption("parameters_info", FALSE), keep = NULL, drop = NULL, verbose = TRUE, ... ) } \arguments{ \item{model}{Model object.} \item{ci}{Confidence Interval (CI) level. Default to \code{0.95} (\verb{95\%}).} \item{bootstrap}{Should estimates be based on bootstrapped model? If \code{TRUE}, then arguments of \link[=model_parameters.brmsfit]{Bayesian regressions} apply (see also \code{\link[=bootstrap_parameters]{bootstrap_parameters()}}).} \item{iterations}{The number of bootstrap replicates. This only apply in the case of bootstrapped frequentist models.} \item{component}{Model component for which parameters should be shown. May be one of \code{"conditional"}, \code{"precision"} (e.g. \strong{betareg}), \code{"scale"} (e.g. \strong{ordinal}), \code{"extra"} (e.g. \strong{glmx}), \code{"marginal"} (e.g. \strong{mfx}), \code{"conditional"} or \code{"full"} (for \code{MuMIn::model.avg()}) or \code{"all"}. See section \emph{Model components} for an overview of possible options for \code{component}.} \item{standardize}{The method used for standardizing the parameters. Can be \code{NULL} (default; no standardization), \code{"refit"} (for re-fitting the model on standardized data) or one of \code{"basic"}, \code{"posthoc"}, \code{"smart"}, \code{"pseudo"}. See 'Details' in \code{\link[=standardize_parameters]{standardize_parameters()}}. \strong{Importantly}: \itemize{ \item The \code{"refit"} method does \emph{not} standardize categorical predictors (i.e. factors), which may be a different behaviour compared to other R packages (such as \strong{lm.beta}) or other software packages (like SPSS). to mimic such behaviours, either use \code{standardize="basic"} or standardize the data with \code{datawizard::standardize(force=TRUE)} \emph{before} fitting the model. \item By default, the response (dependent) variable is also standardized, \emph{if applicable}. Set \code{include_response = FALSE} to avoid standardization of the response variable. See details in \code{\link[datawizard:standardize.default]{datawizard::standardize.default()}}. \item For mixed models, when using methods other than \code{"refit"}, only the fixed effects will be standardized. \item Robust estimation (i.e., \code{vcov} set to a value other than \code{NULL}) of standardized parameters only works when \code{standardize="refit"}. }} \item{exponentiate}{Logical, indicating whether or not to exponentiate the coefficients (and related confidence intervals). This is typical for logistic regression, or more generally speaking, for models with log or logit links. It is also recommended to use \code{exponentiate = TRUE} for models with log-transformed response values. For models with a log-transformed response variable, when \code{exponentiate = TRUE}, a one-unit increase in the predictor is associated with multiplying the outcome by that predictor's coefficient. \strong{Note:} Delta-method standard errors are also computed (by multiplying the standard errors by the transformed coefficients). This is to mimic behaviour of other software packages, such as Stata, but these standard errors poorly estimate uncertainty for the transformed coefficient. The transformed confidence interval more clearly captures this uncertainty. For \code{compare_parameters()}, \code{exponentiate = "nongaussian"} will only exponentiate coefficients from non-Gaussian families.} \item{p_adjust}{String value, if not \code{NULL}, indicates the method to adjust p-values. See \code{\link[stats:p.adjust]{stats::p.adjust()}} for details. Further possible adjustment methods are \code{"tukey"}, \code{"scheffe"}, \code{"sidak"}, \code{"sup-t"}, and \code{"none"} to explicitly disable adjustment for \code{emmGrid} objects (from \strong{emmeans}). \code{"sup-t"} computes simultaneous confidence bands, also called sup-t confidence band (Montiel Olea & Plagborg-Møller, 2019).} \item{include_info}{Logical, if \code{TRUE}, prints summary information about the model (model formula, number of observations, residual standard deviation and more).} \item{keep}{Character containing a regular expression pattern that describes the parameters that should be included (for \code{keep}) or excluded (for \code{drop}) in the returned data frame. \code{keep} may also be a named list of regular expressions. All non-matching parameters will be removed from the output. If \code{keep} is a character vector, every parameter name in the \emph{"Parameter"} column that matches the regular expression in \code{keep} will be selected from the returned data frame (and vice versa, all parameter names matching \code{drop} will be excluded). Furthermore, if \code{keep} has more than one element, these will be merged with an \code{OR} operator into a regular expression pattern like this: \code{"(one|two|three)"}. If \code{keep} is a named list of regular expression patterns, the names of the list-element should equal the column name where selection should be applied. This is useful for model objects where \code{model_parameters()} returns multiple columns with parameter components, like in \code{\link[=model_parameters.lavaan]{model_parameters.lavaan()}}. Note that the regular expression pattern should match the parameter names as they are stored in the returned data frame, which can be different from how they are printed. Inspect the \verb{$Parameter} column of the parameters table to get the exact parameter names.} \item{drop}{See \code{keep}.} \item{verbose}{Toggle warnings and messages.} \item{...}{Arguments passed to or from other methods. For instance, when \code{bootstrap = TRUE}, arguments like \code{type} or \code{parallel} are passed down to \code{bootstrap_model()}. Further non-documented arguments are: \itemize{ \item \code{digits}, \code{p_digits}, \code{ci_digits} and \code{footer_digits} to set the number of digits for the output. \code{groups} can be used to group coefficients. These arguments will be passed to the print-method, or can directly be used in \code{print()}, see documentation in \code{\link[=print.parameters_model]{print.parameters_model()}}. \item If \code{s_value = TRUE}, the p-value will be replaced by the S-value in the output (cf. \emph{Rafi and Greenland 2020}). \item \code{pd} adds an additional column with the \emph{probability of direction} (see \code{\link[bayestestR:p_direction]{bayestestR::p_direction()}} for details). Furthermore, see 'Examples' for this function. \item For developers, whose interest mainly is to get a "tidy" data frame of model summaries, it is recommended to set \code{pretty_names = FALSE} to speed up computation of the summary table. }} } \value{ A data frame of indices related to the model's parameters. } \description{ Parameters from special regression models not listed under one of the previous categories yet. } \section{Model components}{ Possible values for the \code{component} argument depend on the model class. Following are valid options: \itemize{ \item \code{"all"}: returns all model components, applies to all models, but will only have an effect for models with more than just the conditional model component. \item \code{"conditional"}: only returns the conditional component, i.e. "fixed effects" terms from the model. Will only have an effect for models with more than just the conditional model component. \item \code{"smooth_terms"}: returns smooth terms, only applies to GAMs (or similar models that may contain smooth terms). \item \code{"zero_inflated"} (or \code{"zi"}): returns the zero-inflation component. \item \code{"dispersion"}: returns the dispersion model component. This is common for models with zero-inflation or that can model the dispersion parameter. \item \code{"instruments"}: for instrumental-variable or some fixed effects regression, returns the instruments. \item \code{"nonlinear"}: for non-linear models (like models of class \code{nlmerMod} or \code{nls}), returns staring estimates for the nonlinear parameters. \item \code{"correlation"}: for models with correlation-component, like \code{gls}, the variables used to describe the correlation structure are returned. } \strong{Special models} Some model classes also allow rather uncommon options. These are: \itemize{ \item \strong{mhurdle}: \code{"infrequent_purchase"}, \code{"ip"}, and \code{"auxiliary"} \item \strong{BGGM}: \code{"correlation"} and \code{"intercept"} \item \strong{BFBayesFactor}, \strong{glmx}: \code{"extra"} \item \strong{averaging}:\code{"conditional"} and \code{"full"} \item \strong{mjoint}: \code{"survival"} \item \strong{mfx}: \code{"precision"}, \code{"marginal"} \item \strong{betareg}, \strong{DirichletRegModel}: \code{"precision"} \item \strong{mvord}: \code{"thresholds"} and \code{"correlation"} \item \strong{clm2}: \code{"scale"} \item \strong{selection}: \code{"selection"}, \code{"outcome"}, and \code{"auxiliary"} \item \strong{lavaan}: One or more of \code{"regression"}, \code{"correlation"}, \code{"loading"}, \code{"variance"}, \code{"defined"}, or \code{"mean"}. Can also be \code{"all"} to include all components. } For models of class \code{brmsfit} (package \strong{brms}), even more options are possible for the \code{component} argument, which are not all documented in detail here. } \examples{ library(parameters) if (require("brglm2", quietly = TRUE)) { data("stemcell") model <- bracl( research ~ as.numeric(religion) + gender, weights = frequency, data = stemcell, type = "ML" ) model_parameters(model) } } \seealso{ \code{\link[insight:standardize_names]{insight::standardize_names()}} to rename columns into a consistent, standardized naming scheme. } parameters/man/dot-n_factors_scree.Rd0000644000176200001440000000051313641634603017407 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/n_factors.R \name{.n_factors_scree} \alias{.n_factors_scree} \title{Non Graphical Cattell's Scree Test} \usage{ .n_factors_scree(eigen_values = NULL, model = "factors") } \description{ Non Graphical Cattell's Scree Test } \keyword{internal} parameters/man/model_parameters.BFBayesFactor.Rd0000644000176200001440000001165515066721002021422 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/methods_BayesFactor.R \name{model_parameters.BFBayesFactor} \alias{model_parameters.BFBayesFactor} \title{Parameters from BayesFactor objects} \usage{ \method{model_parameters}{BFBayesFactor}( model, centrality = "median", dispersion = FALSE, ci = 0.95, ci_method = "eti", test = "pd", rope_range = "default", rope_ci = 0.95, priors = TRUE, es_type = NULL, include_proportions = FALSE, verbose = TRUE, ... ) } \arguments{ \item{model}{Object of class \code{BFBayesFactor}.} \item{centrality}{The point-estimates (centrality indices) to compute. Character (vector) or list with one or more of these options: \code{"median"}, \code{"mean"}, \code{"MAP"} (see \code{\link[bayestestR:map_estimate]{map_estimate()}}), \code{"trimmed"} (which is just \code{mean(x, trim = threshold)}), \code{"mode"} or \code{"all"}.} \item{dispersion}{Logical, if \code{TRUE}, computes indices of dispersion related to the estimate(s) (\code{SD} and \code{MAD} for \code{mean} and \code{median}, respectively). Dispersion is not available for \code{"MAP"} or \code{"mode"} centrality indices.} \item{ci}{Value or vector of probability of the CI (between 0 and 1) to be estimated. Default to \code{0.95} (\verb{95\%}).} \item{ci_method}{The type of index used for Credible Interval. Can be \code{"ETI"} (default, see \code{\link[bayestestR:eti]{eti()}}), \code{"HDI"} (see \code{\link[bayestestR:hdi]{hdi()}}), \code{"BCI"} (see \code{\link[bayestestR:bci]{bci()}}), \code{"SPI"} (see \code{\link[bayestestR:spi]{spi()}}), or \code{"SI"} (see \code{\link[bayestestR:si]{si()}}).} \item{test}{The indices of effect existence to compute. Character (vector) or list with one or more of these options: \code{"p_direction"} (or \code{"pd"}), \code{"rope"}, \code{"p_map"}, \code{"p_significance"} (or \code{"ps"}), \code{"p_rope"}, \code{"equivalence_test"} (or \code{"equitest"}), \code{"bayesfactor"} (or \code{"bf"}) or \code{"all"} to compute all tests. For each "test", the corresponding \strong{bayestestR} function is called (e.g. \code{\link[bayestestR:rope]{rope()}} or \code{\link[bayestestR:p_direction]{p_direction()}}) and its results included in the summary output.} \item{rope_range}{ROPE's lower and higher bounds. Should be a vector of two values (e.g., \code{c(-0.1, 0.1)}), \code{"default"} or a list of numeric vectors of the same length as numbers of parameters. If \code{"default"}, the bounds are set to \code{x +- 0.1*SD(response)}.} \item{rope_ci}{The Credible Interval (CI) probability, corresponding to the proportion of HDI, to use for the percentage in ROPE.} \item{priors}{Add the prior used for each parameter.} \item{es_type}{The effect size of interest. Not that possibly not all effect sizes are applicable to the model object. See 'Details'. For Anova models, can also be a character vector with multiple effect size names.} \item{include_proportions}{Logical that decides whether to include posterior cell proportions/counts for Bayesian contingency table analysis (from \code{BayesFactor::contingencyTableBF()}). Defaults to \code{FALSE}, as this information is often redundant.} \item{verbose}{Toggle off warnings.} \item{...}{Additional arguments to be passed to or from methods.} } \value{ A data frame of indices related to the model's parameters. } \description{ Parameters from \code{BFBayesFactor} objects from \code{{BayesFactor}} package. } \details{ The meaning of the extracted parameters: \itemize{ \item For \code{\link[BayesFactor:ttestBF]{BayesFactor::ttestBF()}}: \code{Difference} is the raw difference between the means. \item For \code{\link[BayesFactor:correlationBF]{BayesFactor::correlationBF()}}: \code{rho} is the linear correlation estimate (equivalent to Pearson's \emph{r}). \item For \code{\link[BayesFactor:lmBF]{BayesFactor::lmBF()}} / \code{\link[BayesFactor:generalTestBF]{BayesFactor::generalTestBF()}} / \code{\link[BayesFactor:regressionBF]{BayesFactor::regressionBF()}} / \code{\link[BayesFactor:anovaBF]{BayesFactor::anovaBF()}}: in addition to parameters of the fixed and random effects, there are: \code{mu} is the (mean-centered) intercept; \code{sig2} is the model's sigma; \code{g} / \verb{g_*} are the \emph{g} parameters; See the \emph{Bayes Factors for ANOVAs} paper (\doi{10.1016/j.jmp.2012.08.001}). } } \examples{ \dontshow{if (require("BayesFactor")) withAutoprint(\{ # examplesIf} \donttest{ # Bayesian t-test model <- BayesFactor::ttestBF(x = rnorm(100, 1, 1)) model_parameters(model) model_parameters(model, es_type = "cohens_d", ci = 0.9) # Bayesian contingency table analysis data(raceDolls) bf <- BayesFactor::contingencyTableBF( raceDolls, sampleType = "indepMulti", fixedMargin = "cols" ) model_parameters(bf, centrality = "mean", dispersion = TRUE, verbose = FALSE, es_type = "cramers_v" ) } \dontshow{\}) # examplesIf} } parameters/man/model_parameters.htest.Rd0000644000176200001440000002136615053035103020133 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/methods_htest.R, R/methods_lmtest.R \name{model_parameters.htest} \alias{model_parameters.htest} \alias{model_parameters.coeftest} \title{Parameters from hypothesis tests} \usage{ \method{model_parameters}{htest}( model, ci = 0.95, alternative = NULL, bootstrap = FALSE, es_type = NULL, verbose = TRUE, ... ) \method{model_parameters}{coeftest}( model, ci = 0.95, ci_method = "wald", keep = NULL, drop = NULL, verbose = TRUE, ... ) } \arguments{ \item{model}{Object of class \code{htest} or \code{pairwise.htest}.} \item{ci}{Level of confidence intervals for effect size statistic. Currently only applies to objects from \code{chisq.test()} or \code{oneway.test()}.} \item{alternative}{A character string specifying the alternative hypothesis; Controls the type of CI returned: \code{"two.sided"} (default, two-sided CI), \code{"greater"} or \code{"less"} (one-sided CI). Partial matching is allowed (e.g., \code{"g"}, \code{"l"}, \code{"two"}...). See section \emph{One-Sided CIs} in the \href{https://easystats.github.io/effectsize/}{effectsize_CIs vignette}.} \item{bootstrap}{Should estimates be bootstrapped?} \item{es_type}{The effect size of interest. Not that possibly not all effect sizes are applicable to the model object. See 'Details'. For Anova models, can also be a character vector with multiple effect size names.} \item{verbose}{Toggle warnings and messages.} \item{...}{Arguments passed to or from other methods. For instance, when \code{bootstrap = TRUE}, arguments like \code{type} or \code{parallel} are passed down to \code{bootstrap_model()}. Further non-documented arguments are: \itemize{ \item \code{digits}, \code{p_digits}, \code{ci_digits} and \code{footer_digits} to set the number of digits for the output. \code{groups} can be used to group coefficients. These arguments will be passed to the print-method, or can directly be used in \code{print()}, see documentation in \code{\link[=print.parameters_model]{print.parameters_model()}}. \item If \code{s_value = TRUE}, the p-value will be replaced by the S-value in the output (cf. \emph{Rafi and Greenland 2020}). \item \code{pd} adds an additional column with the \emph{probability of direction} (see \code{\link[bayestestR:p_direction]{bayestestR::p_direction()}} for details). Furthermore, see 'Examples' for this function. \item For developers, whose interest mainly is to get a "tidy" data frame of model summaries, it is recommended to set \code{pretty_names = FALSE} to speed up computation of the summary table. }} \item{ci_method}{Method for computing degrees of freedom for confidence intervals (CI) and the related p-values. Allowed are following options (which vary depending on the model class): \code{"residual"}, \code{"normal"}, \code{"likelihood"}, \code{"satterthwaite"}, \code{"kenward"}, \code{"wald"}, \code{"profile"}, \code{"boot"}, \code{"uniroot"}, \code{"ml1"}, \code{"betwithin"}, \code{"hdi"}, \code{"quantile"}, \code{"ci"}, \code{"eti"}, \code{"si"}, \code{"bci"}, or \code{"bcai"}. See section \emph{Confidence intervals and approximation of degrees of freedom} in \code{\link[=model_parameters]{model_parameters()}} for further details. When \code{ci_method=NULL}, in most cases \code{"wald"} is used then.} \item{keep}{Character containing a regular expression pattern that describes the parameters that should be included (for \code{keep}) or excluded (for \code{drop}) in the returned data frame. \code{keep} may also be a named list of regular expressions. All non-matching parameters will be removed from the output. If \code{keep} is a character vector, every parameter name in the \emph{"Parameter"} column that matches the regular expression in \code{keep} will be selected from the returned data frame (and vice versa, all parameter names matching \code{drop} will be excluded). Furthermore, if \code{keep} has more than one element, these will be merged with an \code{OR} operator into a regular expression pattern like this: \code{"(one|two|three)"}. If \code{keep} is a named list of regular expression patterns, the names of the list-element should equal the column name where selection should be applied. This is useful for model objects where \code{model_parameters()} returns multiple columns with parameter components, like in \code{\link[=model_parameters.lavaan]{model_parameters.lavaan()}}. Note that the regular expression pattern should match the parameter names as they are stored in the returned data frame, which can be different from how they are printed. Inspect the \verb{$Parameter} column of the parameters table to get the exact parameter names.} \item{drop}{See \code{keep}.} } \value{ A data frame of indices related to the model's parameters. } \description{ Parameters of h-tests (correlations, t-tests, chi-squared, ...). } \details{ \itemize{ \item For an object of class \code{htest}, data is extracted via \code{\link[insight:get_data]{insight::get_data()}}, and passed to the relevant function according to: \itemize{ \item A \strong{t-test} depending on \code{type}: \code{"cohens_d"} (default), \code{"hedges_g"}, or one of \code{"p_superiority"}, \code{"u1"}, \code{"u2"}, \code{"u3"}, \code{"overlap"}. \itemize{ \item For a \strong{Paired t-test}: depending on \code{type}: \code{"rm_rm"}, \code{"rm_av"}, \code{"rm_b"}, \code{"rm_d"}, \code{"rm_z"}. } \item A \strong{Chi-squared tests of independence} or \strong{Fisher's Exact Test}, depending on \code{type}: \code{"cramers_v"} (default), \code{"tschuprows_t"}, \code{"phi"}, \code{"cohens_w"}, \code{"pearsons_c"}, \code{"cohens_h"}, \code{"oddsratio"}, \code{"riskratio"}, \code{"arr"}, or \code{"nnt"}. \item A \strong{Chi-squared tests of goodness-of-fit}, depending on \code{type}: \code{"fei"} (default) \code{"cohens_w"}, \code{"pearsons_c"} \item A \strong{One-way ANOVA test}, depending on \code{type}: \code{"eta"} (default), \code{"omega"} or \code{"epsilon"} -squared, \code{"f"}, or \code{"f2"}. \item A \strong{McNemar test} returns \emph{Cohen's g}. \item A \strong{Wilcoxon test} depending on \code{type}: returns "\code{rank_biserial}" correlation (default) or one of \code{"p_superiority"}, \code{"vda"}, \code{"u2"}, \code{"u3"}, \code{"overlap"}. \item A \strong{Kruskal-Wallis test} depending on \code{type}: \code{"epsilon"} (default) or \code{"eta"}. \item A \strong{Friedman test} returns \emph{Kendall's W}. (Where applicable, \code{ci} and \code{alternative} are taken from the \code{htest} if not otherwise provided.) } \item For an object of class \code{BFBayesFactor}, using \code{\link[bayestestR:describe_posterior]{bayestestR::describe_posterior()}}, \itemize{ \item A \strong{t-test} depending on \code{type}: \code{"cohens_d"} (default) or one of \code{"p_superiority"}, \code{"u1"}, \code{"u2"}, \code{"u3"}, \code{"overlap"}. \item A \strong{correlation test} returns \emph{r}. \item A \strong{contingency table test}, depending on \code{type}: \code{"cramers_v"} (default), \code{"phi"}, \code{"tschuprows_t"}, \code{"cohens_w"}, \code{"pearsons_c"}, \code{"cohens_h"}, \code{"oddsratio"}, or \code{"riskratio"}, \code{"arr"}, or \code{"nnt"}. \item A \strong{proportion test} returns \emph{p}. } \item Objects of class \code{anova}, \code{aov}, \code{aovlist} or \code{afex_aov}, depending on \code{type}: \code{"eta"} (default), \code{"omega"} or \code{"epsilon"} -squared, \code{"f"}, or \code{"f2"}. \item Objects of class \code{datawizard_crosstab(s)} / \code{datawizard_table(s)} built with \code{\link[datawizard:data_tabulate]{datawizard::data_tabulate()}} - same as Chi-squared tests of independence / goodness-of-fit, respectively. \item Other objects are passed to \code{\link[parameters:standardize_parameters]{parameters::standardize_parameters()}}. } \strong{For statistical models it is recommended to directly use the listed functions, for the full range of options they provide.} } \examples{ model <- cor.test(mtcars$mpg, mtcars$cyl, method = "pearson") model_parameters(model) model <- t.test(iris$Sepal.Width, iris$Sepal.Length) model_parameters(model, es_type = "hedges_g") model <- t.test(mtcars$mpg ~ mtcars$vs) model_parameters(model, es_type = "hedges_g") model <- t.test(iris$Sepal.Width, mu = 1) model_parameters(model, es_type = "cohens_d") data(airquality) airquality$Month <- factor(airquality$Month, labels = month.abb[5:9]) model <- pairwise.t.test(airquality$Ozone, airquality$Month) model_parameters(model) smokers <- c(83, 90, 129, 70) patients <- c(86, 93, 136, 82) model <- suppressWarnings(pairwise.prop.test(smokers, patients)) model_parameters(model) model <- suppressWarnings(chisq.test(table(mtcars$am, mtcars$cyl))) model_parameters(model, es_type = "cramers_v") } parameters/man/cluster_analysis.Rd0000644000176200001440000001512115063017426017047 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/cluster_analysis.R \name{cluster_analysis} \alias{cluster_analysis} \title{Cluster Analysis} \usage{ cluster_analysis( x, n = NULL, method = "kmeans", include_factors = FALSE, standardize = TRUE, verbose = TRUE, distance_method = "euclidean", hclust_method = "complete", kmeans_method = "Hartigan-Wong", dbscan_eps = 15, iterations = 100, ... ) } \arguments{ \item{x}{A data frame (with at least two variables), or a matrix (with at least two columns).} \item{n}{Number of clusters used for supervised cluster methods. If \code{NULL}, the number of clusters to extract is determined by calling \code{\link[=n_clusters]{n_clusters()}}. Note that this argument does not apply for unsupervised clustering methods like \code{dbscan}, \code{hdbscan}, \code{mixture}, \code{pvclust}, or \code{pamk}.} \item{method}{Method for computing the cluster analysis. Can be \code{"kmeans"} (default; k-means using \code{kmeans()}), \code{"hkmeans"} (hierarchical k-means using \code{factoextra::hkmeans()}), \code{pam} (K-Medoids using \code{cluster::pam()}), \code{pamk} (K-Medoids that finds out the number of clusters), \code{"hclust"} (hierarchical clustering using \code{hclust()} or \code{pvclust::pvclust()}), \code{dbscan} (DBSCAN using \code{dbscan::dbscan()}), \code{hdbscan} (Hierarchical DBSCAN using \code{dbscan::hdbscan()}), or \code{mixture} (Mixture modeling using \code{mclust::Mclust()}, which requires the user to run \code{library(mclust)} before).} \item{include_factors}{Logical, if \code{TRUE}, factors are converted to numerical values in order to be included in the data for determining the number of clusters. By default, factors are removed, because most methods that determine the number of clusters need numeric input only.} \item{standardize}{Standardize the dataframe before clustering (default).} \item{verbose}{Toggle warnings and messages.} \item{distance_method}{Distance measure to be used for methods based on distances (e.g., when \code{method = "hclust"} for hierarchical clustering. For other methods, such as \code{"kmeans"}, this argument will be ignored). Must be one of \code{"euclidean"}, \code{"maximum"}, \code{"manhattan"}, \code{"canberra"}, \code{"binary"} or \code{"minkowski"}. See \code{\link[=dist]{dist()}} and \code{pvclust::pvclust()} for more information.} \item{hclust_method}{Agglomeration method to be used when \code{method = "hclust"} or \code{method = "hkmeans"} (for hierarchical clustering). This should be one of \code{"ward"}, \code{"ward.D2"}, \code{"single"}, \code{"complete"}, \code{"average"}, \code{"mcquitty"}, \code{"median"} or \code{"centroid"}. Default is \code{"complete"} (see \code{\link[=hclust]{hclust()}}).} \item{kmeans_method}{Algorithm used for calculating kmeans cluster. Only applies, if \code{method = "kmeans"}. May be one of \code{"Hartigan-Wong"} (default), \code{"Lloyd"} (used by SPSS), or \code{"MacQueen"}. See \code{\link[=kmeans]{kmeans()}} for details on this argument.} \item{dbscan_eps}{The \code{eps} argument for DBSCAN method. See \code{\link[=n_clusters_dbscan]{n_clusters_dbscan()}}.} \item{iterations}{The number of replications.} \item{...}{Arguments passed to or from other methods.} } \value{ The group classification for each observation as vector. The returned vector includes missing values, so it has the same length as \code{nrow(x)}. } \description{ Compute hierarchical or kmeans cluster analysis and return the group assignment for each observation as vector. } \details{ The \code{print()} and \code{plot()} methods show the (standardized) mean value for each variable within each cluster. Thus, a higher absolute value indicates that a certain variable characteristic is more pronounced within that specific cluster (as compared to other cluster groups with lower absolute mean values). Clusters classification can be obtained via \code{print(x, newdata = NULL, ...)}. } \note{ There is also a \href{https://easystats.github.io/see/articles/parameters.html}{\code{plot()}-method} implemented in the \href{https://easystats.github.io/see/}{\strong{see}-package}. } \examples{ set.seed(33) # K-Means ==================================================== rez <- cluster_analysis(iris[1:4], n = 3, method = "kmeans") rez # Show results predict(rez) # Get clusters summary(rez) # Extract the centers values (can use 'plot()' on that) if (requireNamespace("MASS", quietly = TRUE)) { cluster_discrimination(rez) # Perform LDA } # Hierarchical k-means (more robust k-means) if (require("factoextra", quietly = TRUE)) { rez <- cluster_analysis(iris[1:4], n = 3, method = "hkmeans") rez # Show results predict(rez) # Get clusters } # Hierarchical Clustering (hclust) =========================== rez <- cluster_analysis(iris[1:4], n = 3, method = "hclust") rez # Show results predict(rez) # Get clusters # K-Medoids (pam) ============================================ if (require("cluster", quietly = TRUE)) { rez <- cluster_analysis(iris[1:4], n = 3, method = "pam") rez # Show results predict(rez) # Get clusters } # PAM with automated number of clusters if (require("fpc", quietly = TRUE)) { rez <- cluster_analysis(iris[1:4], method = "pamk") rez # Show results predict(rez) # Get clusters } # DBSCAN ==================================================== if (require("dbscan", quietly = TRUE)) { # Note that you can assimilate more outliers (cluster 0) to neighbouring # clusters by setting borderPoints = TRUE. rez <- cluster_analysis(iris[1:4], method = "dbscan", dbscan_eps = 1.45) rez # Show results predict(rez) # Get clusters } # Mixture ==================================================== if (require("mclust", quietly = TRUE)) { library(mclust) # Needs the package to be loaded rez <- cluster_analysis(iris[1:4], method = "mixture") rez # Show results predict(rez) # Get clusters } } \references{ \itemize{ \item Maechler M, Rousseeuw P, Struyf A, Hubert M, Hornik K (2014) cluster: Cluster Analysis Basics and Extensions. R package. } } \seealso{ \itemize{ \item \code{\link[=n_clusters]{n_clusters()}} to determine the number of clusters to extract. \item \code{\link[=cluster_discrimination]{cluster_discrimination()}} to determine the accuracy of cluster group classification via linear discriminant analysis (LDA). \item \code{\link[performance:check_clusterstructure]{performance::check_clusterstructure()}} to check suitability of data for clustering. \item https://www.datanovia.com/en/lessons/ } } parameters/man/model_parameters.glht.Rd0000644000176200001440000001157414716604200017747 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/methods_multcomp.R \name{model_parameters.glht} \alias{model_parameters.glht} \title{Parameters from Hypothesis Testing} \usage{ \method{model_parameters}{glht}( model, ci = 0.95, exponentiate = FALSE, keep = NULL, drop = NULL, verbose = TRUE, ... ) } \arguments{ \item{model}{Object of class \code{\link[multcomp:glht]{multcomp::glht()}} (\strong{multcomp}) or of class \code{PMCMR}, \code{trendPMCMR} or \code{osrt} (\strong{PMCMRplus}).} \item{ci}{Confidence Interval (CI) level. Default to \code{0.95} (\verb{95\%}).} \item{exponentiate}{Logical, indicating whether or not to exponentiate the coefficients (and related confidence intervals). This is typical for logistic regression, or more generally speaking, for models with log or logit links. It is also recommended to use \code{exponentiate = TRUE} for models with log-transformed response values. For models with a log-transformed response variable, when \code{exponentiate = TRUE}, a one-unit increase in the predictor is associated with multiplying the outcome by that predictor's coefficient. \strong{Note:} Delta-method standard errors are also computed (by multiplying the standard errors by the transformed coefficients). This is to mimic behaviour of other software packages, such as Stata, but these standard errors poorly estimate uncertainty for the transformed coefficient. The transformed confidence interval more clearly captures this uncertainty. For \code{compare_parameters()}, \code{exponentiate = "nongaussian"} will only exponentiate coefficients from non-Gaussian families.} \item{keep}{Character containing a regular expression pattern that describes the parameters that should be included (for \code{keep}) or excluded (for \code{drop}) in the returned data frame. \code{keep} may also be a named list of regular expressions. All non-matching parameters will be removed from the output. If \code{keep} is a character vector, every parameter name in the \emph{"Parameter"} column that matches the regular expression in \code{keep} will be selected from the returned data frame (and vice versa, all parameter names matching \code{drop} will be excluded). Furthermore, if \code{keep} has more than one element, these will be merged with an \code{OR} operator into a regular expression pattern like this: \code{"(one|two|three)"}. If \code{keep} is a named list of regular expression patterns, the names of the list-element should equal the column name where selection should be applied. This is useful for model objects where \code{model_parameters()} returns multiple columns with parameter components, like in \code{\link[=model_parameters.lavaan]{model_parameters.lavaan()}}. Note that the regular expression pattern should match the parameter names as they are stored in the returned data frame, which can be different from how they are printed. Inspect the \verb{$Parameter} column of the parameters table to get the exact parameter names.} \item{drop}{See \code{keep}.} \item{verbose}{Toggle warnings and messages.} \item{...}{Arguments passed to or from other methods. For instance, when \code{bootstrap = TRUE}, arguments like \code{type} or \code{parallel} are passed down to \code{bootstrap_model()}. Further non-documented arguments are: \itemize{ \item \code{digits}, \code{p_digits}, \code{ci_digits} and \code{footer_digits} to set the number of digits for the output. \code{groups} can be used to group coefficients. These arguments will be passed to the print-method, or can directly be used in \code{print()}, see documentation in \code{\link[=print.parameters_model]{print.parameters_model()}}. \item If \code{s_value = TRUE}, the p-value will be replaced by the S-value in the output (cf. \emph{Rafi and Greenland 2020}). \item \code{pd} adds an additional column with the \emph{probability of direction} (see \code{\link[bayestestR:p_direction]{bayestestR::p_direction()}} for details). Furthermore, see 'Examples' for this function. \item For developers, whose interest mainly is to get a "tidy" data frame of model summaries, it is recommended to set \code{pretty_names = FALSE} to speed up computation of the summary table. }} } \value{ A data frame of indices related to the model's parameters. } \description{ Parameters from Hypothesis Testing. } \examples{ \donttest{ if (require("multcomp", quietly = TRUE)) { # multiple linear model, swiss data lmod <- lm(Fertility ~ ., data = swiss) mod <- glht( model = lmod, linfct = c( "Agriculture = 0", "Examination = 0", "Education = 0", "Catholic = 0", "Infant.Mortality = 0" ) ) model_parameters(mod) } if (require("PMCMRplus", quietly = TRUE)) { model <- suppressWarnings( kwAllPairsConoverTest(count ~ spray, data = InsectSprays) ) model_parameters(model) } } } parameters/man/predict.parameters_clusters.Rd0000644000176200001440000000114214205441531021174 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/methods_kmeans.R \name{predict.parameters_clusters} \alias{predict.parameters_clusters} \title{Predict method for parameters_clusters objects} \usage{ \method{predict}{parameters_clusters}(object, newdata = NULL, names = NULL, ...) } \arguments{ \item{object}{a model object for which prediction is desired.} \item{newdata}{data.frame} \item{names}{character vector or list} \item{...}{additional arguments affecting the predictions produced.} } \description{ Predict method for parameters_clusters objects } parameters/man/model_parameters.default.Rd0000644000176200001440000004363315066721002020435 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/1_model_parameters.R \name{model_parameters.default} \alias{model_parameters.default} \title{Parameters from (General) Linear Models} \usage{ \method{model_parameters}{default}( model, ci = 0.95, ci_method = NULL, bootstrap = FALSE, iterations = 1000, standardize = NULL, exponentiate = FALSE, p_adjust = NULL, vcov = NULL, vcov_args = NULL, include_info = getOption("parameters_info", FALSE), keep = NULL, drop = NULL, verbose = TRUE, ... ) } \arguments{ \item{model}{Model object.} \item{ci}{Confidence Interval (CI) level. Default to \code{0.95} (\verb{95\%}).} \item{ci_method}{Method for computing degrees of freedom for confidence intervals (CI) and the related p-values. Allowed are following options (which vary depending on the model class): \code{"residual"}, \code{"normal"}, \code{"likelihood"}, \code{"satterthwaite"}, \code{"kenward"}, \code{"wald"}, \code{"profile"}, \code{"boot"}, \code{"uniroot"}, \code{"ml1"}, \code{"betwithin"}, \code{"hdi"}, \code{"quantile"}, \code{"ci"}, \code{"eti"}, \code{"si"}, \code{"bci"}, or \code{"bcai"}. See section \emph{Confidence intervals and approximation of degrees of freedom} in \code{\link[=model_parameters]{model_parameters()}} for further details. When \code{ci_method=NULL}, in most cases \code{"wald"} is used then.} \item{bootstrap}{Should estimates be based on bootstrapped model? If \code{TRUE}, then arguments of \link[=model_parameters.brmsfit]{Bayesian regressions} apply (see also \code{\link[=bootstrap_parameters]{bootstrap_parameters()}}).} \item{iterations}{The number of bootstrap replicates. This only apply in the case of bootstrapped frequentist models.} \item{standardize}{The method used for standardizing the parameters. Can be \code{NULL} (default; no standardization), \code{"refit"} (for re-fitting the model on standardized data) or one of \code{"basic"}, \code{"posthoc"}, \code{"smart"}, \code{"pseudo"}. See 'Details' in \code{\link[=standardize_parameters]{standardize_parameters()}}. \strong{Importantly}: \itemize{ \item The \code{"refit"} method does \emph{not} standardize categorical predictors (i.e. factors), which may be a different behaviour compared to other R packages (such as \strong{lm.beta}) or other software packages (like SPSS). to mimic such behaviours, either use \code{standardize="basic"} or standardize the data with \code{datawizard::standardize(force=TRUE)} \emph{before} fitting the model. \item By default, the response (dependent) variable is also standardized, \emph{if applicable}. Set \code{include_response = FALSE} to avoid standardization of the response variable. See details in \code{\link[datawizard:standardize.default]{datawizard::standardize.default()}}. \item For mixed models, when using methods other than \code{"refit"}, only the fixed effects will be standardized. \item Robust estimation (i.e., \code{vcov} set to a value other than \code{NULL}) of standardized parameters only works when \code{standardize="refit"}. }} \item{exponentiate}{Logical, indicating whether or not to exponentiate the coefficients (and related confidence intervals). This is typical for logistic regression, or more generally speaking, for models with log or logit links. It is also recommended to use \code{exponentiate = TRUE} for models with log-transformed response values. For models with a log-transformed response variable, when \code{exponentiate = TRUE}, a one-unit increase in the predictor is associated with multiplying the outcome by that predictor's coefficient. \strong{Note:} Delta-method standard errors are also computed (by multiplying the standard errors by the transformed coefficients). This is to mimic behaviour of other software packages, such as Stata, but these standard errors poorly estimate uncertainty for the transformed coefficient. The transformed confidence interval more clearly captures this uncertainty. For \code{compare_parameters()}, \code{exponentiate = "nongaussian"} will only exponentiate coefficients from non-Gaussian families.} \item{p_adjust}{String value, if not \code{NULL}, indicates the method to adjust p-values. See \code{\link[stats:p.adjust]{stats::p.adjust()}} for details. Further possible adjustment methods are \code{"tukey"}, \code{"scheffe"}, \code{"sidak"}, \code{"sup-t"}, and \code{"none"} to explicitly disable adjustment for \code{emmGrid} objects (from \strong{emmeans}). \code{"sup-t"} computes simultaneous confidence bands, also called sup-t confidence band (Montiel Olea & Plagborg-Møller, 2019).} \item{vcov}{Variance-covariance matrix used to compute uncertainty estimates (e.g., for robust standard errors). This argument accepts a covariance matrix, a function which returns a covariance matrix, or a string which identifies the function to be used to compute the covariance matrix. \itemize{ \item A covariance matrix \item A function which returns a covariance matrix (e.g., \code{stats::vcov()}) \item A string which indicates the kind of uncertainty estimates to return. \itemize{ \item Heteroskedasticity-consistent: \code{"HC"}, \code{"HC0"}, \code{"HC1"}, \code{"HC2"}, \code{"HC3"}, \code{"HC4"}, \code{"HC4m"}, \code{"HC5"}. See \code{?sandwich::vcovHC} \item Cluster-robust: \code{"CR"}, \code{"CR0"}, \code{"CR1"}, \code{"CR1p"}, \code{"CR1S"}, \code{"CR2"}, \code{"CR3"}. See \code{?clubSandwich::vcovCR} \item Bootstrap: \code{"BS"}, \code{"xy"}, \code{"residual"}, \code{"wild"}, \code{"mammen"}, \code{"fractional"}, \code{"jackknife"}, \code{"norm"}, \code{"webb"}. See \code{?sandwich::vcovBS} \item Other \code{sandwich} package functions: \code{"HAC"}, \code{"PC"}, \code{"CL"}, \code{"OPG"}, \code{"PL"}. } }} \item{vcov_args}{List of arguments to be passed to the function identified by the \code{vcov} argument. This function is typically supplied by the \strong{sandwich} or \strong{clubSandwich} packages. Please refer to their documentation (e.g., \code{?sandwich::vcovHAC}) to see the list of available arguments. If no estimation type (argument \code{type}) is given, the default type for \code{"HC"} equals the default from the \strong{sandwich} package; for type \code{"CR"}, the default is set to \code{"CR3"}.} \item{include_info}{Logical, if \code{TRUE}, prints summary information about the model (model formula, number of observations, residual standard deviation and more).} \item{keep}{Character containing a regular expression pattern that describes the parameters that should be included (for \code{keep}) or excluded (for \code{drop}) in the returned data frame. \code{keep} may also be a named list of regular expressions. All non-matching parameters will be removed from the output. If \code{keep} is a character vector, every parameter name in the \emph{"Parameter"} column that matches the regular expression in \code{keep} will be selected from the returned data frame (and vice versa, all parameter names matching \code{drop} will be excluded). Furthermore, if \code{keep} has more than one element, these will be merged with an \code{OR} operator into a regular expression pattern like this: \code{"(one|two|three)"}. If \code{keep} is a named list of regular expression patterns, the names of the list-element should equal the column name where selection should be applied. This is useful for model objects where \code{model_parameters()} returns multiple columns with parameter components, like in \code{\link[=model_parameters.lavaan]{model_parameters.lavaan()}}. Note that the regular expression pattern should match the parameter names as they are stored in the returned data frame, which can be different from how they are printed. Inspect the \verb{$Parameter} column of the parameters table to get the exact parameter names.} \item{drop}{See \code{keep}.} \item{verbose}{Toggle warnings and messages.} \item{...}{Arguments passed to or from other methods. For instance, when \code{bootstrap = TRUE}, arguments like \code{type} or \code{parallel} are passed down to \code{bootstrap_model()}. Further non-documented arguments are: \itemize{ \item \code{digits}, \code{p_digits}, \code{ci_digits} and \code{footer_digits} to set the number of digits for the output. \code{groups} can be used to group coefficients. These arguments will be passed to the print-method, or can directly be used in \code{print()}, see documentation in \code{\link[=print.parameters_model]{print.parameters_model()}}. \item If \code{s_value = TRUE}, the p-value will be replaced by the S-value in the output (cf. \emph{Rafi and Greenland 2020}). \item \code{pd} adds an additional column with the \emph{probability of direction} (see \code{\link[bayestestR:p_direction]{bayestestR::p_direction()}} for details). Furthermore, see 'Examples' for this function. \item For developers, whose interest mainly is to get a "tidy" data frame of model summaries, it is recommended to set \code{pretty_names = FALSE} to speed up computation of the summary table. }} } \value{ A data frame of indices related to the model's parameters. } \description{ Extract and compute indices and measures to describe parameters of (generalized) linear models (GLMs). } \section{Confidence intervals and approximation of degrees of freedom}{ There are different ways of approximating the degrees of freedom depending on different assumptions about the nature of the model and its sampling distribution. The \code{ci_method} argument modulates the method for computing degrees of freedom (df) that are used to calculate confidence intervals (CI) and the related p-values. Following options are allowed, depending on the model class: \strong{Classical methods:} Classical inference is generally based on the \strong{Wald method}. The Wald approach to inference computes a test statistic by dividing the parameter estimate by its standard error (Coefficient / SE), then comparing this statistic against a t- or normal distribution. This approach can be used to compute CIs and p-values. \code{"wald"}: \itemize{ \item Applies to \emph{non-Bayesian models}. For \emph{linear models}, CIs computed using the Wald method (SE and a \emph{t-distribution with residual df}); p-values computed using the Wald method with a \emph{t-distribution with residual df}. For other models, CIs computed using the Wald method (SE and a \emph{normal distribution}); p-values computed using the Wald method with a \emph{normal distribution}. } \code{"normal"} \itemize{ \item Applies to \emph{non-Bayesian models}. Compute Wald CIs and p-values, but always use a normal distribution. } \code{"residual"} \itemize{ \item Applies to \emph{non-Bayesian models}. Compute Wald CIs and p-values, but always use a \emph{t-distribution with residual df} when possible. If the residual df for a model cannot be determined, a normal distribution is used instead. } \strong{Methods for mixed models:} Compared to fixed effects (or single-level) models, determining appropriate df for Wald-based inference in mixed models is more difficult. See \href{https://bbolker.github.io/mixedmodels-misc/glmmFAQ.html#what-are-the-p-values-listed-by-summaryglmerfit-etc.-are-they-reliable}{the R GLMM FAQ} for a discussion. Several approximate methods for computing df are available, but you should also consider instead using profile likelihood (\code{"profile"}) or bootstrap ("\verb{boot"}) CIs and p-values instead. \code{"satterthwaite"} \itemize{ \item Applies to \emph{linear mixed models}. CIs computed using the Wald method (SE and a \emph{t-distribution with Satterthwaite df}); p-values computed using the Wald method with a \emph{t-distribution with Satterthwaite df}. } \code{"kenward"} \itemize{ \item Applies to \emph{linear mixed models}. CIs computed using the Wald method (\emph{Kenward-Roger SE} and a \emph{t-distribution with Kenward-Roger df}); p-values computed using the Wald method with \emph{Kenward-Roger SE and t-distribution with Kenward-Roger df}. } \code{"ml1"} \itemize{ \item Applies to \emph{linear mixed models}. CIs computed using the Wald method (SE and a \emph{t-distribution with m-l-1 approximated df}); p-values computed using the Wald method with a \emph{t-distribution with m-l-1 approximated df}. See \code{\link[=ci_ml1]{ci_ml1()}}. } \code{"betwithin"} \itemize{ \item Applies to \emph{linear mixed models} and \emph{generalized linear mixed models}. CIs computed using the Wald method (SE and a \emph{t-distribution with between-within df}); p-values computed using the Wald method with a \emph{t-distribution with between-within df}. See \code{\link[=ci_betwithin]{ci_betwithin()}}. } \strong{Likelihood-based methods:} Likelihood-based inference is based on comparing the likelihood for the maximum-likelihood estimate to the the likelihood for models with one or more parameter values changed (e.g., set to zero or a range of alternative values). Likelihood ratios for the maximum-likelihood and alternative models are compared to a \eqn{\chi}-squared distribution to compute CIs and p-values. \code{"profile"} \itemize{ \item Applies to \emph{non-Bayesian models} of class \code{glm}, \code{polr}, \code{merMod} or \code{glmmTMB}. CIs computed by \emph{profiling the likelihood curve for a parameter}, using linear interpolation to find where likelihood ratio equals a critical value; p-values computed using the Wald method with a \emph{normal-distribution} (note: this might change in a future update!) } \code{"uniroot"} \itemize{ \item Applies to \emph{non-Bayesian models} of class \code{glmmTMB}. CIs computed by \emph{profiling the likelihood curve for a parameter}, using root finding to find where likelihood ratio equals a critical value; p-values computed using the Wald method with a \emph{normal-distribution} (note: this might change in a future update!) } \strong{Methods for bootstrapped or Bayesian models:} Bootstrap-based inference is based on \strong{resampling} and refitting the model to the resampled datasets. The distribution of parameter estimates across resampled datasets is used to approximate the parameter's sampling distribution. Depending on the type of model, several different methods for bootstrapping and constructing CIs and p-values from the bootstrap distribution are available. For Bayesian models, inference is based on drawing samples from the model posterior distribution. \code{"quantile"} (or \code{"eti"}) \itemize{ \item Applies to \emph{all models (including Bayesian models)}. For non-Bayesian models, only applies if \code{bootstrap = TRUE}. CIs computed as \emph{equal tailed intervals} using the quantiles of the bootstrap or posterior samples; p-values are based on the \emph{probability of direction}. See \code{\link[bayestestR:eti]{bayestestR::eti()}}. } \code{"hdi"} \itemize{ \item Applies to \emph{all models (including Bayesian models)}. For non-Bayesian models, only applies if \code{bootstrap = TRUE}. CIs computed as \emph{highest density intervals} for the bootstrap or posterior samples; p-values are based on the \emph{probability of direction}. See \code{\link[bayestestR:hdi]{bayestestR::hdi()}}. } \code{"bci"} (or \code{"bcai"}) \itemize{ \item Applies to \emph{all models (including Bayesian models)}. For non-Bayesian models, only applies if \code{bootstrap = TRUE}. CIs computed as \emph{bias corrected and accelerated intervals} for the bootstrap or posterior samples; p-values are based on the \emph{probability of direction}. See \code{\link[bayestestR:bci]{bayestestR::bci()}}. } \code{"si"} \itemize{ \item Applies to \emph{Bayesian models} with proper priors. CIs computed as \emph{support intervals} comparing the posterior samples against the prior samples; p-values are based on the \emph{probability of direction}. See \code{\link[bayestestR:si]{bayestestR::si()}}. } \code{"boot"} \itemize{ \item Applies to \emph{non-Bayesian models} of class \code{merMod}. CIs computed using \emph{parametric bootstrapping} (simulating data from the fitted model); p-values computed using the Wald method with a \emph{normal-distribution)} (note: this might change in a future update!). } For all iteration-based methods other than \code{"boot"} (\code{"hdi"}, \code{"quantile"}, \code{"ci"}, \code{"eti"}, \code{"si"}, \code{"bci"}, \code{"bcai"}), p-values are based on the probability of direction (\code{\link[bayestestR:p_direction]{bayestestR::p_direction()}}), which is converted into a p-value using \code{\link[bayestestR:pd_to_p]{bayestestR::pd_to_p()}}. } \examples{ \dontshow{if (require("boot", quietly = TRUE) && require("sandwich") && require("clubSandwich") && require("brglm2")) withAutoprint(\{ # examplesIf} library(parameters) model <- lm(mpg ~ wt + cyl, data = mtcars) model_parameters(model) # bootstrapped parameters model_parameters(model, bootstrap = TRUE) # standardized parameters model_parameters(model, standardize = "refit") # robust, heteroskedasticity-consistent standard errors model_parameters(model, vcov = "HC3") model_parameters(model, vcov = "vcovCL", vcov_args = list(cluster = mtcars$cyl) ) # different p-value style in output model_parameters(model, p_digits = 5) model_parameters(model, digits = 3, ci_digits = 4, p_digits = "scientific") # report S-value or probability of direction for parameters model_parameters(model, s_value = TRUE) model_parameters(model, pd = TRUE) \donttest{ # logistic regression model model <- glm(vs ~ wt + cyl, data = mtcars, family = "binomial") model_parameters(model) # show odds ratio / exponentiated coefficients model_parameters(model, exponentiate = TRUE) # bias-corrected logistic regression with penalized maximum likelihood model <- glm( vs ~ wt + cyl, data = mtcars, family = "binomial", method = "brglmFit" ) model_parameters(model) } \dontshow{\}) # examplesIf} } \seealso{ \code{\link[insight:standardize_names]{insight::standardize_names()}} to rename columns into a consistent, standardized naming scheme. } parameters/man/model_parameters.glmmTMB.Rd0000644000176200001440000006613515066721002020312 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/methods_glmmTMB.R \name{model_parameters.glmmTMB} \alias{model_parameters.glmmTMB} \title{Parameters from Mixed Models} \usage{ \method{model_parameters}{glmmTMB}( model, ci = 0.95, ci_method = "wald", ci_random = NULL, bootstrap = FALSE, iterations = 1000, standardize = NULL, effects = "all", component = "all", group_level = FALSE, exponentiate = FALSE, p_adjust = NULL, vcov = NULL, vcov_args = NULL, wb_component = FALSE, include_info = getOption("parameters_mixed_info", FALSE), include_sigma = FALSE, keep = NULL, drop = NULL, verbose = TRUE, ... ) } \arguments{ \item{model}{A mixed model.} \item{ci}{Confidence Interval (CI) level. Default to \code{0.95} (\verb{95\%}).} \item{ci_method}{Method for computing degrees of freedom for confidence intervals (CI) and the related p-values. Allowed are following options (which vary depending on the model class): \code{"residual"}, \code{"normal"}, \code{"likelihood"}, \code{"satterthwaite"}, \code{"kenward"}, \code{"wald"}, \code{"profile"}, \code{"boot"}, \code{"uniroot"}, \code{"ml1"}, \code{"betwithin"}, \code{"hdi"}, \code{"quantile"}, \code{"ci"}, \code{"eti"}, \code{"si"}, \code{"bci"}, or \code{"bcai"}. See section \emph{Confidence intervals and approximation of degrees of freedom} in \code{\link[=model_parameters]{model_parameters()}} for further details. When \code{ci_method=NULL}, in most cases \code{"wald"} is used then.} \item{ci_random}{Logical, if \code{TRUE}, includes the confidence intervals for random effects parameters. Only applies if \code{effects} is not \code{"fixed"} and if \code{ci} is not \code{NULL}. Set \code{ci_random = FALSE} if computation of the model summary is too much time consuming. By default, \code{ci_random = NULL}, which uses a heuristic to guess if computation of confidence intervals for random effects is fast enough or not. For models with larger sample size and/or more complex random effects structures, confidence intervals will not be computed by default, for simpler models or fewer observations, confidence intervals will be included. Set explicitly to \code{TRUE} or \code{FALSE} to enforce or omit calculation of confidence intervals.} \item{bootstrap}{Should estimates be based on bootstrapped model? If \code{TRUE}, then arguments of \link[=model_parameters.brmsfit]{Bayesian regressions} apply (see also \code{\link[=bootstrap_parameters]{bootstrap_parameters()}}).} \item{iterations}{The number of bootstrap replicates. This only apply in the case of bootstrapped frequentist models.} \item{standardize}{The method used for standardizing the parameters. Can be \code{NULL} (default; no standardization), \code{"refit"} (for re-fitting the model on standardized data) or one of \code{"basic"}, \code{"posthoc"}, \code{"smart"}, \code{"pseudo"}. See 'Details' in \code{\link[=standardize_parameters]{standardize_parameters()}}. \strong{Importantly}: \itemize{ \item The \code{"refit"} method does \emph{not} standardize categorical predictors (i.e. factors), which may be a different behaviour compared to other R packages (such as \strong{lm.beta}) or other software packages (like SPSS). to mimic such behaviours, either use \code{standardize="basic"} or standardize the data with \code{datawizard::standardize(force=TRUE)} \emph{before} fitting the model. \item By default, the response (dependent) variable is also standardized, \emph{if applicable}. Set \code{include_response = FALSE} to avoid standardization of the response variable. See details in \code{\link[datawizard:standardize.default]{datawizard::standardize.default()}}. \item For mixed models, when using methods other than \code{"refit"}, only the fixed effects will be standardized. \item Robust estimation (i.e., \code{vcov} set to a value other than \code{NULL}) of standardized parameters only works when \code{standardize="refit"}. }} \item{effects}{Should parameters for fixed effects (\code{"fixed"}), random effects (\code{"random"}), or both fixed and random effects (\code{"all"}) be returned? By default, the variance components for random effects are returned. If group-level effects are requested, \code{"grouplevel"} returns the group-level random effects (BLUPs), while \code{"random_total"} return the overall (sum of fixed and random) effects (similar to what \code{coef()} returns). Using \code{"grouplevel"} is equivalent to setting \code{group_level = TRUE}. The \code{effects} argument only applies to mixed models. If the calculation of random effects parameters takes too long, you may use \code{effects = "fixed"}.} \item{component}{Which type of parameters to return, such as parameters for the conditional model, the zero-inflation part of the model, the dispersion term, or other auxiliary parameters be returned? Applies to models with zero-inflation and/or dispersion formula, or if parameters such as \code{sigma} should be included. May be abbreviated. Note that the \emph{conditional} component is also called \emph{count} or \emph{mean} component, depending on the model. There are three convenient shortcuts: \code{component = "all"} returns all possible parameters. If \code{component = "location"}, location parameters such as \code{conditional}, \code{zero_inflated}, or \code{smooth_terms}, are returned (everything that are fixed or random effects - depending on the \code{effects} argument - but no auxiliary parameters). For \code{component = "distributional"} (or \code{"auxiliary"}), components like \code{sigma}, \code{dispersion}, or \code{beta} (and other auxiliary parameters) are returned.} \item{group_level}{Logical, for multilevel models (i.e. models with random effects) and when \code{effects = "random"}, include the parameters for each group level from random effects. If \code{group_level = FALSE} (the default), only information on SD and COR are shown.} \item{exponentiate}{Logical, indicating whether or not to exponentiate the coefficients (and related confidence intervals). This is typical for logistic regression, or more generally speaking, for models with log or logit links. It is also recommended to use \code{exponentiate = TRUE} for models with log-transformed response values. For models with a log-transformed response variable, when \code{exponentiate = TRUE}, a one-unit increase in the predictor is associated with multiplying the outcome by that predictor's coefficient. \strong{Note:} Delta-method standard errors are also computed (by multiplying the standard errors by the transformed coefficients). This is to mimic behaviour of other software packages, such as Stata, but these standard errors poorly estimate uncertainty for the transformed coefficient. The transformed confidence interval more clearly captures this uncertainty. For \code{compare_parameters()}, \code{exponentiate = "nongaussian"} will only exponentiate coefficients from non-Gaussian families.} \item{p_adjust}{String value, if not \code{NULL}, indicates the method to adjust p-values. See \code{\link[stats:p.adjust]{stats::p.adjust()}} for details. Further possible adjustment methods are \code{"tukey"}, \code{"scheffe"}, \code{"sidak"}, \code{"sup-t"}, and \code{"none"} to explicitly disable adjustment for \code{emmGrid} objects (from \strong{emmeans}). \code{"sup-t"} computes simultaneous confidence bands, also called sup-t confidence band (Montiel Olea & Plagborg-Møller, 2019).} \item{vcov}{Variance-covariance matrix used to compute uncertainty estimates (e.g., for robust standard errors). This argument accepts a covariance matrix, a function which returns a covariance matrix, or a string which identifies the function to be used to compute the covariance matrix. \itemize{ \item A covariance matrix \item A function which returns a covariance matrix (e.g., \code{stats::vcov()}) \item A string which indicates the kind of uncertainty estimates to return. \itemize{ \item Heteroskedasticity-consistent: \code{"HC"}, \code{"HC0"}, \code{"HC1"}, \code{"HC2"}, \code{"HC3"}, \code{"HC4"}, \code{"HC4m"}, \code{"HC5"}. See \code{?sandwich::vcovHC} \item Cluster-robust: \code{"CR"}, \code{"CR0"}, \code{"CR1"}, \code{"CR1p"}, \code{"CR1S"}, \code{"CR2"}, \code{"CR3"}. See \code{?clubSandwich::vcovCR} \item Bootstrap: \code{"BS"}, \code{"xy"}, \code{"residual"}, \code{"wild"}, \code{"mammen"}, \code{"fractional"}, \code{"jackknife"}, \code{"norm"}, \code{"webb"}. See \code{?sandwich::vcovBS} \item Other \code{sandwich} package functions: \code{"HAC"}, \code{"PC"}, \code{"CL"}, \code{"OPG"}, \code{"PL"}. } }} \item{vcov_args}{List of arguments to be passed to the function identified by the \code{vcov} argument. This function is typically supplied by the \strong{sandwich} or \strong{clubSandwich} packages. Please refer to their documentation (e.g., \code{?sandwich::vcovHAC}) to see the list of available arguments. If no estimation type (argument \code{type}) is given, the default type for \code{"HC"} equals the default from the \strong{sandwich} package; for type \code{"CR"}, the default is set to \code{"CR3"}.} \item{wb_component}{Logical, if \code{TRUE} and models contains within- and between-effects (see \code{datawizard::demean()}), the \code{Component} column will indicate which variables belong to the within-effects, between-effects, and cross-level interactions. By default, the \code{Component} column indicates, which parameters belong to the conditional or zero-inflation component of the model.} \item{include_info}{Logical, if \code{TRUE}, prints summary information about the model (model formula, number of observations, residual standard deviation and more).} \item{include_sigma}{Logical, if \code{TRUE}, includes the residual standard deviation. For mixed models, this is defined as the sum of the distribution-specific variance and the variance for the additive overdispersion term (see \code{\link[insight:get_variance]{insight::get_variance()}} for details). Defaults to \code{FALSE} for mixed models due to the longer computation time.} \item{keep}{Character containing a regular expression pattern that describes the parameters that should be included (for \code{keep}) or excluded (for \code{drop}) in the returned data frame. \code{keep} may also be a named list of regular expressions. All non-matching parameters will be removed from the output. If \code{keep} is a character vector, every parameter name in the \emph{"Parameter"} column that matches the regular expression in \code{keep} will be selected from the returned data frame (and vice versa, all parameter names matching \code{drop} will be excluded). Furthermore, if \code{keep} has more than one element, these will be merged with an \code{OR} operator into a regular expression pattern like this: \code{"(one|two|three)"}. If \code{keep} is a named list of regular expression patterns, the names of the list-element should equal the column name where selection should be applied. This is useful for model objects where \code{model_parameters()} returns multiple columns with parameter components, like in \code{\link[=model_parameters.lavaan]{model_parameters.lavaan()}}. Note that the regular expression pattern should match the parameter names as they are stored in the returned data frame, which can be different from how they are printed. Inspect the \verb{$Parameter} column of the parameters table to get the exact parameter names.} \item{drop}{See \code{keep}.} \item{verbose}{Toggle warnings and messages.} \item{...}{Arguments passed to or from other methods. For instance, when \code{bootstrap = TRUE}, arguments like \code{type} or \code{parallel} are passed down to \code{bootstrap_model()}. Further non-documented arguments are: \itemize{ \item \code{digits}, \code{p_digits}, \code{ci_digits} and \code{footer_digits} to set the number of digits for the output. \code{groups} can be used to group coefficients. These arguments will be passed to the print-method, or can directly be used in \code{print()}, see documentation in \code{\link[=print.parameters_model]{print.parameters_model()}}. \item If \code{s_value = TRUE}, the p-value will be replaced by the S-value in the output (cf. \emph{Rafi and Greenland 2020}). \item \code{pd} adds an additional column with the \emph{probability of direction} (see \code{\link[bayestestR:p_direction]{bayestestR::p_direction()}} for details). Furthermore, see 'Examples' for this function. \item For developers, whose interest mainly is to get a "tidy" data frame of model summaries, it is recommended to set \code{pretty_names = FALSE} to speed up computation of the summary table. }} } \value{ A data frame of indices related to the model's parameters. } \description{ Parameters from (linear) mixed models. } \note{ If the calculation of random effects parameters takes too long, you may use \code{effects = "fixed"}. There is also a \href{https://easystats.github.io/see/articles/parameters.html}{\code{plot()}-method} implemented in the \href{https://easystats.github.io/see/}{\strong{see}-package}. } \section{Confidence intervals for random effects variances}{ For models of class \code{merMod} and \code{glmmTMB}, confidence intervals for random effect variances can be calculated. \itemize{ \item For models of from package \strong{lme4}, when \code{ci_method} is either \code{"profile"} or \code{"boot"}, and \code{effects} is either \code{"random"} or \code{"all"}, profiled resp. bootstrapped confidence intervals are computed for the random effects. \item For all other options of \code{ci_method}, and only when the \strong{merDeriv} package is installed, confidence intervals for random effects are based on normal-distribution approximation, using the delta-method to transform standard errors for constructing the intervals around the log-transformed SD parameters. These are than back-transformed, so that random effect variances, standard errors and confidence intervals are shown on the original scale. Due to the transformation, the intervals are asymmetrical, however, they are within the correct bounds (i.e. no negative interval for the SD, and the interval for the correlations is within the range from -1 to +1). \item For models of class \code{glmmTMB}, confidence intervals for random effect variances always use a Wald t-distribution approximation. } } \section{Singular fits (random effects variances near zero)}{ If a model is "singular", this means that some dimensions of the variance-covariance matrix have been estimated as exactly zero. This often occurs for mixed models with complex random effects structures. There is no gold-standard about how to deal with singularity and which random-effects specification to choose. One way is to fully go Bayesian (with informative priors). Other proposals are listed in the documentation of \code{\link[performance:check_singularity]{performance::check_singularity()}}. However, since version 1.1.9, the \strong{glmmTMB} package allows to use priors in a frequentist framework, too. One recommendation is to use a Gamma prior (\emph{Chung et al. 2013}). The mean may vary from 1 to very large values (like \code{1e8}), and the shape parameter should be set to a value of 2.5. You can then \code{update()} your model with the specified prior. In \strong{glmmTMB}, the code would look like this: \if{html}{\out{

}}\preformatted{# "model" is an object of class gmmmTMB prior <- data.frame( prior = "gamma(1, 2.5)", # mean can be 1, but even 1e8 class = "ranef" # for random effects ) model_with_priors <- update(model, priors = prior) }\if{html}{\out{
}} Large values for the mean parameter of the Gamma prior have no large impact on the random effects variances in terms of a "bias". Thus, if \code{1} doesn't fix the singular fit, you can safely try larger values. } \section{Dispersion parameters in \emph{glmmTMB}}{ For some models from package \strong{glmmTMB}, both the dispersion parameter and the residual variance from the random effects parameters are shown. Usually, these are the same but presented on different scales, e.g. \if{html}{\out{
}}\preformatted{model <- glmmTMB(Sepal.Width ~ Petal.Length + (1|Species), data = iris) exp(fixef(model)$disp) # 0.09902987 sigma(model)^2 # 0.09902987 }\if{html}{\out{
}} For models where the dispersion parameter and the residual variance are the same, only the residual variance is shown in the output. } \section{Model components}{ Possible values for the \code{component} argument depend on the model class. Following are valid options: \itemize{ \item \code{"all"}: returns all model components, applies to all models, but will only have an effect for models with more than just the conditional model component. \item \code{"conditional"}: only returns the conditional component, i.e. "fixed effects" terms from the model. Will only have an effect for models with more than just the conditional model component. \item \code{"smooth_terms"}: returns smooth terms, only applies to GAMs (or similar models that may contain smooth terms). \item \code{"zero_inflated"} (or \code{"zi"}): returns the zero-inflation component. \item \code{"dispersion"}: returns the dispersion model component. This is common for models with zero-inflation or that can model the dispersion parameter. \item \code{"instruments"}: for instrumental-variable or some fixed effects regression, returns the instruments. \item \code{"nonlinear"}: for non-linear models (like models of class \code{nlmerMod} or \code{nls}), returns staring estimates for the nonlinear parameters. \item \code{"correlation"}: for models with correlation-component, like \code{gls}, the variables used to describe the correlation structure are returned. } \strong{Special models} Some model classes also allow rather uncommon options. These are: \itemize{ \item \strong{mhurdle}: \code{"infrequent_purchase"}, \code{"ip"}, and \code{"auxiliary"} \item \strong{BGGM}: \code{"correlation"} and \code{"intercept"} \item \strong{BFBayesFactor}, \strong{glmx}: \code{"extra"} \item \strong{averaging}:\code{"conditional"} and \code{"full"} \item \strong{mjoint}: \code{"survival"} \item \strong{mfx}: \code{"precision"}, \code{"marginal"} \item \strong{betareg}, \strong{DirichletRegModel}: \code{"precision"} \item \strong{mvord}: \code{"thresholds"} and \code{"correlation"} \item \strong{clm2}: \code{"scale"} \item \strong{selection}: \code{"selection"}, \code{"outcome"}, and \code{"auxiliary"} \item \strong{lavaan}: One or more of \code{"regression"}, \code{"correlation"}, \code{"loading"}, \code{"variance"}, \code{"defined"}, or \code{"mean"}. Can also be \code{"all"} to include all components. } For models of class \code{brmsfit} (package \strong{brms}), even more options are possible for the \code{component} argument, which are not all documented in detail here. } \section{Confidence intervals and approximation of degrees of freedom}{ There are different ways of approximating the degrees of freedom depending on different assumptions about the nature of the model and its sampling distribution. The \code{ci_method} argument modulates the method for computing degrees of freedom (df) that are used to calculate confidence intervals (CI) and the related p-values. Following options are allowed, depending on the model class: \strong{Classical methods:} Classical inference is generally based on the \strong{Wald method}. The Wald approach to inference computes a test statistic by dividing the parameter estimate by its standard error (Coefficient / SE), then comparing this statistic against a t- or normal distribution. This approach can be used to compute CIs and p-values. \code{"wald"}: \itemize{ \item Applies to \emph{non-Bayesian models}. For \emph{linear models}, CIs computed using the Wald method (SE and a \emph{t-distribution with residual df}); p-values computed using the Wald method with a \emph{t-distribution with residual df}. For other models, CIs computed using the Wald method (SE and a \emph{normal distribution}); p-values computed using the Wald method with a \emph{normal distribution}. } \code{"normal"} \itemize{ \item Applies to \emph{non-Bayesian models}. Compute Wald CIs and p-values, but always use a normal distribution. } \code{"residual"} \itemize{ \item Applies to \emph{non-Bayesian models}. Compute Wald CIs and p-values, but always use a \emph{t-distribution with residual df} when possible. If the residual df for a model cannot be determined, a normal distribution is used instead. } \strong{Methods for mixed models:} Compared to fixed effects (or single-level) models, determining appropriate df for Wald-based inference in mixed models is more difficult. See \href{https://bbolker.github.io/mixedmodels-misc/glmmFAQ.html#what-are-the-p-values-listed-by-summaryglmerfit-etc.-are-they-reliable}{the R GLMM FAQ} for a discussion. Several approximate methods for computing df are available, but you should also consider instead using profile likelihood (\code{"profile"}) or bootstrap ("\verb{boot"}) CIs and p-values instead. \code{"satterthwaite"} \itemize{ \item Applies to \emph{linear mixed models}. CIs computed using the Wald method (SE and a \emph{t-distribution with Satterthwaite df}); p-values computed using the Wald method with a \emph{t-distribution with Satterthwaite df}. } \code{"kenward"} \itemize{ \item Applies to \emph{linear mixed models}. CIs computed using the Wald method (\emph{Kenward-Roger SE} and a \emph{t-distribution with Kenward-Roger df}); p-values computed using the Wald method with \emph{Kenward-Roger SE and t-distribution with Kenward-Roger df}. } \code{"ml1"} \itemize{ \item Applies to \emph{linear mixed models}. CIs computed using the Wald method (SE and a \emph{t-distribution with m-l-1 approximated df}); p-values computed using the Wald method with a \emph{t-distribution with m-l-1 approximated df}. See \code{\link[=ci_ml1]{ci_ml1()}}. } \code{"betwithin"} \itemize{ \item Applies to \emph{linear mixed models} and \emph{generalized linear mixed models}. CIs computed using the Wald method (SE and a \emph{t-distribution with between-within df}); p-values computed using the Wald method with a \emph{t-distribution with between-within df}. See \code{\link[=ci_betwithin]{ci_betwithin()}}. } \strong{Likelihood-based methods:} Likelihood-based inference is based on comparing the likelihood for the maximum-likelihood estimate to the the likelihood for models with one or more parameter values changed (e.g., set to zero or a range of alternative values). Likelihood ratios for the maximum-likelihood and alternative models are compared to a \eqn{\chi}-squared distribution to compute CIs and p-values. \code{"profile"} \itemize{ \item Applies to \emph{non-Bayesian models} of class \code{glm}, \code{polr}, \code{merMod} or \code{glmmTMB}. CIs computed by \emph{profiling the likelihood curve for a parameter}, using linear interpolation to find where likelihood ratio equals a critical value; p-values computed using the Wald method with a \emph{normal-distribution} (note: this might change in a future update!) } \code{"uniroot"} \itemize{ \item Applies to \emph{non-Bayesian models} of class \code{glmmTMB}. CIs computed by \emph{profiling the likelihood curve for a parameter}, using root finding to find where likelihood ratio equals a critical value; p-values computed using the Wald method with a \emph{normal-distribution} (note: this might change in a future update!) } \strong{Methods for bootstrapped or Bayesian models:} Bootstrap-based inference is based on \strong{resampling} and refitting the model to the resampled datasets. The distribution of parameter estimates across resampled datasets is used to approximate the parameter's sampling distribution. Depending on the type of model, several different methods for bootstrapping and constructing CIs and p-values from the bootstrap distribution are available. For Bayesian models, inference is based on drawing samples from the model posterior distribution. \code{"quantile"} (or \code{"eti"}) \itemize{ \item Applies to \emph{all models (including Bayesian models)}. For non-Bayesian models, only applies if \code{bootstrap = TRUE}. CIs computed as \emph{equal tailed intervals} using the quantiles of the bootstrap or posterior samples; p-values are based on the \emph{probability of direction}. See \code{\link[bayestestR:eti]{bayestestR::eti()}}. } \code{"hdi"} \itemize{ \item Applies to \emph{all models (including Bayesian models)}. For non-Bayesian models, only applies if \code{bootstrap = TRUE}. CIs computed as \emph{highest density intervals} for the bootstrap or posterior samples; p-values are based on the \emph{probability of direction}. See \code{\link[bayestestR:hdi]{bayestestR::hdi()}}. } \code{"bci"} (or \code{"bcai"}) \itemize{ \item Applies to \emph{all models (including Bayesian models)}. For non-Bayesian models, only applies if \code{bootstrap = TRUE}. CIs computed as \emph{bias corrected and accelerated intervals} for the bootstrap or posterior samples; p-values are based on the \emph{probability of direction}. See \code{\link[bayestestR:bci]{bayestestR::bci()}}. } \code{"si"} \itemize{ \item Applies to \emph{Bayesian models} with proper priors. CIs computed as \emph{support intervals} comparing the posterior samples against the prior samples; p-values are based on the \emph{probability of direction}. See \code{\link[bayestestR:si]{bayestestR::si()}}. } \code{"boot"} \itemize{ \item Applies to \emph{non-Bayesian models} of class \code{merMod}. CIs computed using \emph{parametric bootstrapping} (simulating data from the fitted model); p-values computed using the Wald method with a \emph{normal-distribution)} (note: this might change in a future update!). } For all iteration-based methods other than \code{"boot"} (\code{"hdi"}, \code{"quantile"}, \code{"ci"}, \code{"eti"}, \code{"si"}, \code{"bci"}, \code{"bcai"}), p-values are based on the probability of direction (\code{\link[bayestestR:p_direction]{bayestestR::p_direction()}}), which is converted into a p-value using \code{\link[bayestestR:pd_to_p]{bayestestR::pd_to_p()}}. } \examples{ \dontshow{if (require("lme4") && require("glmmTMB")) withAutoprint(\{ # examplesIf} library(parameters) data(mtcars) model <- lme4::lmer(mpg ~ wt + (1 | gear), data = mtcars) model_parameters(model) \donttest{ data(Salamanders, package = "glmmTMB") model <- glmmTMB::glmmTMB( count ~ spp + mined + (1 | site), ziformula = ~mined, family = poisson(), data = Salamanders ) model_parameters(model, effects = "all") model <- lme4::lmer(mpg ~ wt + (1 | gear), data = mtcars) model_parameters(model, bootstrap = TRUE, iterations = 50, verbose = FALSE) } \dontshow{\}) # examplesIf} } \references{ Chung Y, Rabe-Hesketh S, Dorie V, Gelman A, and Liu J. 2013. "A Nondegenerate Penalized Likelihood Estimator for Variance Parameters in Multilevel Models." Psychometrika 78 (4): 685–709. \doi{10.1007/s11336-013-9328-2} } \seealso{ \code{\link[insight:standardize_names]{insight::standardize_names()}} to rename columns into a consistent, standardized naming scheme. } parameters/man/dot-n_factors_cng.Rd0000644000176200001440000000050513641634603017056 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/n_factors.R \name{.n_factors_cng} \alias{.n_factors_cng} \title{Cattell-Nelson-Gorsuch CNG Indices} \usage{ .n_factors_cng(eigen_values = NULL, model = "factors") } \description{ Cattell-Nelson-Gorsuch CNG Indices } \keyword{internal} parameters/man/p_significance.lm.Rd0000644000176200001440000003142715066721002017035 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/p_significance.R \name{p_significance.lm} \alias{p_significance.lm} \title{Practical Significance (ps)} \usage{ \method{p_significance}{lm}( x, threshold = "default", ci = 0.95, vcov = NULL, vcov_args = NULL, verbose = TRUE, ... ) } \arguments{ \item{x}{A statistical model.} \item{threshold}{The threshold value that separates significant from negligible effect, which can have following possible values: \itemize{ \item \code{"default"}, in which case the range is set to \code{0.1} if input is a vector, and based on \code{\link[bayestestR:rope_range]{rope_range()}} if a (Bayesian) model is provided. \item a single numeric value (e.g., 0.1), which is used as range around zero (i.e. the threshold range is set to -0.1 and 0.1, i.e. reflects a symmetric interval) \item a numeric vector of length two (e.g., \code{c(-0.2, 0.1)}), useful for asymmetric intervals \item a list of numeric vectors, where each vector corresponds to a parameter \item a list of \emph{named} numeric vectors, where names correspond to parameter names. In this case, all parameters that have no matching name in \code{threshold} will be set to \code{"default"}. }} \item{ci}{Confidence Interval (CI) level. Default to \code{0.95} (\verb{95\%}).} \item{vcov}{Variance-covariance matrix used to compute uncertainty estimates (e.g., for robust standard errors). This argument accepts a covariance matrix, a function which returns a covariance matrix, or a string which identifies the function to be used to compute the covariance matrix. \itemize{ \item A covariance matrix \item A function which returns a covariance matrix (e.g., \code{stats::vcov()}) \item A string which indicates the kind of uncertainty estimates to return. \itemize{ \item Heteroskedasticity-consistent: \code{"HC"}, \code{"HC0"}, \code{"HC1"}, \code{"HC2"}, \code{"HC3"}, \code{"HC4"}, \code{"HC4m"}, \code{"HC5"}. See \code{?sandwich::vcovHC} \item Cluster-robust: \code{"CR"}, \code{"CR0"}, \code{"CR1"}, \code{"CR1p"}, \code{"CR1S"}, \code{"CR2"}, \code{"CR3"}. See \code{?clubSandwich::vcovCR} \item Bootstrap: \code{"BS"}, \code{"xy"}, \code{"residual"}, \code{"wild"}, \code{"mammen"}, \code{"fractional"}, \code{"jackknife"}, \code{"norm"}, \code{"webb"}. See \code{?sandwich::vcovBS} \item Other \code{sandwich} package functions: \code{"HAC"}, \code{"PC"}, \code{"CL"}, \code{"OPG"}, \code{"PL"}. } }} \item{vcov_args}{List of arguments to be passed to the function identified by the \code{vcov} argument. This function is typically supplied by the \strong{sandwich} or \strong{clubSandwich} packages. Please refer to their documentation (e.g., \code{?sandwich::vcovHAC}) to see the list of available arguments. If no estimation type (argument \code{type}) is given, the default type for \code{"HC"} equals the default from the \strong{sandwich} package; for type \code{"CR"}, the default is set to \code{"CR3"}.} \item{verbose}{Toggle warnings and messages.} \item{...}{Arguments passed to other methods.} } \value{ A data frame with columns for the parameter names, the confidence intervals and the values for practical significance. Higher values indicate more practical significance (upper bound is one). } \description{ Compute the probability of \strong{Practical Significance} (\emph{ps}), which can be conceptualized as a unidirectional equivalence test. It returns the probability that an effect is above a given threshold corresponding to a negligible effect in the median's direction, considering a parameter's \emph{full} confidence interval. In other words, it returns the probability of a clear direction of an effect, which is larger than the smallest effect size of interest (e.g., a minimal important difference). Its theoretical range is from zero to one, but the \emph{ps} is typically larger than 0.5 (to indicate practical significance). In comparison the the \code{\link[=equivalence_test]{equivalence_test()}} function, where the \emph{SGPV} (second generation p-value) describes the proportion of the \emph{full} confidence interval that is \emph{inside} the ROPE, the value returned by \code{p_significance()} describes the \emph{larger} proportion of the \emph{full} confidence interval that is \emph{outside} the ROPE. This makes \code{p_significance()} comparable to \code{\link[bayestestR:p_direction]{bayestestR::p_direction()}}, however, while \code{p_direction()} compares to a point-null by default, \code{p_significance()} compares to a range-null. } \details{ \code{p_significance()} returns the proportion of the \emph{full} confidence interval range (assuming a normally or t-distributed, equal-tailed interval, based on the model) that is outside a certain range (the negligible effect, or ROPE, see argument \code{threshold}). If there are values of the distribution both below and above the ROPE, \code{p_significance()} returns the higher probability of a value being outside the ROPE. Typically, this value should be larger than 0.5 to indicate practical significance. However, if the range of the negligible effect is rather large compared to the range of the confidence interval, \code{p_significance()} will be less than 0.5, which indicates no clear practical significance. Note that the assumed interval, which is used to calculate the practical significance, is an estimation of the \emph{full interval} based on the chosen confidence level. For example, if the 95\% confidence interval of a coefficient ranges from -1 to 1, the underlying \emph{full (normally or t-distributed) interval} approximately ranges from -1.9 to 1.9, see also following code: \if{html}{\out{
}}\preformatted{# simulate full normal distribution out <- bayestestR::distribution_normal(10000, 0, 0.5) # range of "full" distribution range(out) # range of 95\% CI round(quantile(out, probs = c(0.025, 0.975)), 2) }\if{html}{\out{
}} This ensures that the practical significance always refers to the general compatible parameter space of coefficients. Therefore, the \emph{full interval} is similar to a Bayesian posterior distribution of an equivalent Bayesian model, see following code: \if{html}{\out{
}}\preformatted{library(bayestestR) library(brms) m <- lm(mpg ~ gear + wt + cyl + hp, data = mtcars) m2 <- brm(mpg ~ gear + wt + cyl + hp, data = mtcars) # probability of significance (ps) for frequentist model p_significance(m) # similar to ps of Bayesian models p_significance(m2) # similar to ps of simulated draws / bootstrap samples p_significance(simulate_model(m)) }\if{html}{\out{
}} } \note{ There is also a \href{https://easystats.github.io/see/articles/bayestestR.html}{\code{plot()}-method} implemented in the \href{https://easystats.github.io/see/}{\strong{see}-package}. } \section{Statistical inference - how to quantify evidence}{ There is no standardized approach to drawing conclusions based on the available data and statistical models. A frequently chosen but also much criticized approach is to evaluate results based on their statistical significance (\emph{Amrhein et al. 2017}). A more sophisticated way would be to test whether estimated effects exceed the "smallest effect size of interest", to avoid even the smallest effects being considered relevant simply because they are statistically significant, but clinically or practically irrelevant (\emph{Lakens et al. 2018, Lakens 2024}). A rather unconventional approach, which is nevertheless advocated by various authors, is to interpret results from classical regression models either in terms of probabilities, similar to the usual approach in Bayesian statistics (\emph{Schweder 2018; Schweder and Hjort 2003; Vos 2022}) or in terms of relative measure of "evidence" or "compatibility" with the data (\emph{Greenland et al. 2022; Rafi and Greenland 2020}), which nevertheless comes close to a probabilistic interpretation. A more detailed discussion of this topic is found in the documentation of \code{\link[=p_function]{p_function()}}. The \strong{parameters} package provides several options or functions to aid statistical inference. These are, for example: \itemize{ \item \code{\link[=equivalence_test.lm]{equivalence_test()}}, to compute the (conditional) equivalence test for frequentist models \item \code{\link[=p_significance.lm]{p_significance()}}, to compute the probability of \emph{practical significance}, which can be conceptualized as a unidirectional equivalence test \item \code{\link[=p_function]{p_function()}}, or \emph{consonance function}, to compute p-values and compatibility (confidence) intervals for statistical models \item the \code{pd} argument (setting \code{pd = TRUE}) in \code{model_parameters()} includes a column with the \emph{probability of direction}, i.e. the probability that a parameter is strictly positive or negative. See \code{\link[bayestestR:p_direction]{bayestestR::p_direction()}} for details. If plotting is desired, the \code{\link[=p_direction.lm]{p_direction()}} function can be used, together with \code{plot()}. \item the \code{s_value} argument (setting \code{s_value = TRUE}) in \code{model_parameters()} replaces the p-values with their related \emph{S}-values (\emph{Rafi and Greenland 2020}) \item finally, it is possible to generate distributions of model coefficients by generating bootstrap-samples (setting \code{bootstrap = TRUE}) or simulating draws from model coefficients using \code{\link[=simulate_model]{simulate_model()}}. These samples can then be treated as "posterior samples" and used in many functions from the \strong{bayestestR} package. } Most of the above shown options or functions derive from methods originally implemented for Bayesian models (\emph{Makowski et al. 2019}). However, assuming that model assumptions are met (which means, the model fits well to the data, the correct model is chosen that reflects the data generating process (distributional model family) etc.), it seems appropriate to interpret results from classical frequentist models in a "Bayesian way" (more details: documentation in \code{\link[=p_function]{p_function()}}). } \examples{ \dontshow{if (requireNamespace("bayestestR") && packageVersion("bayestestR") > "0.14.0" && requireNamespace("sandwich")) withAutoprint(\{ # examplesIf} data(qol_cancer) model <- lm(QoL ~ time + age + education, data = qol_cancer) p_significance(model) p_significance(model, threshold = c(-0.5, 1.5)) # based on heteroscedasticity-robust standard errors p_significance(model, vcov = "HC3") if (require("see", quietly = TRUE)) { result <- p_significance(model) plot(result) } \dontshow{\}) # examplesIf} } \references{ \itemize{ \item Amrhein, V., Korner-Nievergelt, F., and Roth, T. (2017). The earth is flat (p > 0.05): Significance thresholds and the crisis of unreplicable research. PeerJ, 5, e3544. \doi{10.7717/peerj.3544} \item Greenland S, Rafi Z, Matthews R, Higgs M. To Aid Scientific Inference, Emphasize Unconditional Compatibility Descriptions of Statistics. (2022) https://arxiv.org/abs/1909.08583v7 (Accessed November 10, 2022) \item Lakens, D. (2024). Improving Your Statistical Inferences (Version v1.5.1). Retrieved from https://lakens.github.io/statistical_inferences/. \doi{10.5281/ZENODO.6409077} \item Lakens, D., Scheel, A. M., and Isager, P. M. (2018). Equivalence Testing for Psychological Research: A Tutorial. Advances in Methods and Practices in Psychological Science, 1(2), 259–269. \item Makowski, D., Ben-Shachar, M. S., Chen, S. H. A., and Lüdecke, D. (2019). Indices of Effect Existence and Significance in the Bayesian Framework. Frontiers in Psychology, 10, 2767. \doi{10.3389/fpsyg.2019.02767} \item Rafi Z, Greenland S. Semantic and cognitive tools to aid statistical science: replace confidence and significance by compatibility and surprise. BMC Medical Research Methodology (2020) 20:244. \item Schweder T. Confidence is epistemic probability for empirical science. Journal of Statistical Planning and Inference (2018) 195:116–125. \doi{10.1016/j.jspi.2017.09.016} \item Schweder T, Hjort NL. Frequentist analogues of priors and posteriors. In Stigum, B. (ed.), Econometrics and the Philosophy of Economics: Theory Data Confrontation in Economics, pp. 285-217. Princeton University Press, Princeton, NJ, 2003 \item Vos P, Holbert D. Frequentist statistical inference without repeated sampling. Synthese 200, 89 (2022). \doi{10.1007/s11229-022-03560-x} } } \seealso{ For more details, see \code{\link[bayestestR:p_significance]{bayestestR::p_significance()}}. See also \code{\link[=equivalence_test]{equivalence_test()}}, \code{\link[=p_function]{p_function()}} and \code{\link[bayestestR:p_direction]{bayestestR::p_direction()}} for functions related to checking effect existence and significance. } parameters/man/cluster_performance.Rd0000644000176200001440000000173714717111737017542 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/cluster_performance.R \name{cluster_performance} \alias{cluster_performance} \alias{cluster_performance.hclust} \title{Performance of clustering models} \usage{ cluster_performance(model, ...) \method{cluster_performance}{hclust}(model, data, clusters, ...) } \arguments{ \item{model}{Cluster model.} \item{...}{Arguments passed to or from other methods.} \item{data}{A data frame.} \item{clusters}{A vector with clusters assignments (must be same length as rows in data).} } \description{ Compute performance indices for clustering solutions. } \examples{ # kmeans model <- kmeans(iris[1:4], 3) cluster_performance(model) # hclust data <- iris[1:4] model <- hclust(dist(data)) clusters <- cutree(model, 3) cluster_performance(model, data, clusters) # Retrieve performance from parameters params <- model_parameters(kmeans(iris[1:4], 3)) cluster_performance(params) } parameters/man/model_parameters.zcpglm.Rd0000644000176200001440000002375515066721002020310 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/methods_cplm.R \name{model_parameters.zcpglm} \alias{model_parameters.zcpglm} \title{Parameters from Zero-Inflated Models} \usage{ \method{model_parameters}{zcpglm}( model, ci = 0.95, bootstrap = FALSE, iterations = 1000, component = "all", standardize = NULL, exponentiate = FALSE, p_adjust = NULL, include_info = getOption("parameters_info", FALSE), keep = NULL, drop = NULL, verbose = TRUE, ... ) } \arguments{ \item{model}{A model with zero-inflation component.} \item{ci}{Confidence Interval (CI) level. Default to \code{0.95} (\verb{95\%}).} \item{bootstrap}{Should estimates be based on bootstrapped model? If \code{TRUE}, then arguments of \link[=model_parameters.brmsfit]{Bayesian regressions} apply (see also \code{\link[=bootstrap_parameters]{bootstrap_parameters()}}).} \item{iterations}{The number of bootstrap replicates. This only apply in the case of bootstrapped frequentist models.} \item{component}{Should all parameters, parameters for the conditional model, for the zero-inflation part of the model, or the dispersion model be returned? Applies to models with zero-inflation and/or dispersion component. \code{component} may be one of \code{"conditional"}, \code{"zi"}, \code{"zero-inflated"}, \code{"dispersion"} or \code{"all"} (default). May be abbreviated.} \item{standardize}{The method used for standardizing the parameters. Can be \code{NULL} (default; no standardization), \code{"refit"} (for re-fitting the model on standardized data) or one of \code{"basic"}, \code{"posthoc"}, \code{"smart"}, \code{"pseudo"}. See 'Details' in \code{\link[=standardize_parameters]{standardize_parameters()}}. \strong{Importantly}: \itemize{ \item The \code{"refit"} method does \emph{not} standardize categorical predictors (i.e. factors), which may be a different behaviour compared to other R packages (such as \strong{lm.beta}) or other software packages (like SPSS). to mimic such behaviours, either use \code{standardize="basic"} or standardize the data with \code{datawizard::standardize(force=TRUE)} \emph{before} fitting the model. \item By default, the response (dependent) variable is also standardized, \emph{if applicable}. Set \code{include_response = FALSE} to avoid standardization of the response variable. See details in \code{\link[datawizard:standardize.default]{datawizard::standardize.default()}}. \item For mixed models, when using methods other than \code{"refit"}, only the fixed effects will be standardized. \item Robust estimation (i.e., \code{vcov} set to a value other than \code{NULL}) of standardized parameters only works when \code{standardize="refit"}. }} \item{exponentiate}{Logical, indicating whether or not to exponentiate the coefficients (and related confidence intervals). This is typical for logistic regression, or more generally speaking, for models with log or logit links. It is also recommended to use \code{exponentiate = TRUE} for models with log-transformed response values. For models with a log-transformed response variable, when \code{exponentiate = TRUE}, a one-unit increase in the predictor is associated with multiplying the outcome by that predictor's coefficient. \strong{Note:} Delta-method standard errors are also computed (by multiplying the standard errors by the transformed coefficients). This is to mimic behaviour of other software packages, such as Stata, but these standard errors poorly estimate uncertainty for the transformed coefficient. The transformed confidence interval more clearly captures this uncertainty. For \code{compare_parameters()}, \code{exponentiate = "nongaussian"} will only exponentiate coefficients from non-Gaussian families.} \item{p_adjust}{String value, if not \code{NULL}, indicates the method to adjust p-values. See \code{\link[stats:p.adjust]{stats::p.adjust()}} for details. Further possible adjustment methods are \code{"tukey"}, \code{"scheffe"}, \code{"sidak"}, \code{"sup-t"}, and \code{"none"} to explicitly disable adjustment for \code{emmGrid} objects (from \strong{emmeans}). \code{"sup-t"} computes simultaneous confidence bands, also called sup-t confidence band (Montiel Olea & Plagborg-Møller, 2019).} \item{include_info}{Logical, if \code{TRUE}, prints summary information about the model (model formula, number of observations, residual standard deviation and more).} \item{keep}{Character containing a regular expression pattern that describes the parameters that should be included (for \code{keep}) or excluded (for \code{drop}) in the returned data frame. \code{keep} may also be a named list of regular expressions. All non-matching parameters will be removed from the output. If \code{keep} is a character vector, every parameter name in the \emph{"Parameter"} column that matches the regular expression in \code{keep} will be selected from the returned data frame (and vice versa, all parameter names matching \code{drop} will be excluded). Furthermore, if \code{keep} has more than one element, these will be merged with an \code{OR} operator into a regular expression pattern like this: \code{"(one|two|three)"}. If \code{keep} is a named list of regular expression patterns, the names of the list-element should equal the column name where selection should be applied. This is useful for model objects where \code{model_parameters()} returns multiple columns with parameter components, like in \code{\link[=model_parameters.lavaan]{model_parameters.lavaan()}}. Note that the regular expression pattern should match the parameter names as they are stored in the returned data frame, which can be different from how they are printed. Inspect the \verb{$Parameter} column of the parameters table to get the exact parameter names.} \item{drop}{See \code{keep}.} \item{verbose}{Toggle warnings and messages.} \item{...}{Arguments passed to or from other methods. For instance, when \code{bootstrap = TRUE}, arguments like \code{type} or \code{parallel} are passed down to \code{bootstrap_model()}. Further non-documented arguments are: \itemize{ \item \code{digits}, \code{p_digits}, \code{ci_digits} and \code{footer_digits} to set the number of digits for the output. \code{groups} can be used to group coefficients. These arguments will be passed to the print-method, or can directly be used in \code{print()}, see documentation in \code{\link[=print.parameters_model]{print.parameters_model()}}. \item If \code{s_value = TRUE}, the p-value will be replaced by the S-value in the output (cf. \emph{Rafi and Greenland 2020}). \item \code{pd} adds an additional column with the \emph{probability of direction} (see \code{\link[bayestestR:p_direction]{bayestestR::p_direction()}} for details). Furthermore, see 'Examples' for this function. \item For developers, whose interest mainly is to get a "tidy" data frame of model summaries, it is recommended to set \code{pretty_names = FALSE} to speed up computation of the summary table. }} } \value{ A data frame of indices related to the model's parameters. } \description{ Parameters from zero-inflated models (from packages like \strong{pscl}, \strong{cplm} or \strong{countreg}). } \section{Model components}{ Possible values for the \code{component} argument depend on the model class. Following are valid options: \itemize{ \item \code{"all"}: returns all model components, applies to all models, but will only have an effect for models with more than just the conditional model component. \item \code{"conditional"}: only returns the conditional component, i.e. "fixed effects" terms from the model. Will only have an effect for models with more than just the conditional model component. \item \code{"smooth_terms"}: returns smooth terms, only applies to GAMs (or similar models that may contain smooth terms). \item \code{"zero_inflated"} (or \code{"zi"}): returns the zero-inflation component. \item \code{"dispersion"}: returns the dispersion model component. This is common for models with zero-inflation or that can model the dispersion parameter. \item \code{"instruments"}: for instrumental-variable or some fixed effects regression, returns the instruments. \item \code{"nonlinear"}: for non-linear models (like models of class \code{nlmerMod} or \code{nls}), returns staring estimates for the nonlinear parameters. \item \code{"correlation"}: for models with correlation-component, like \code{gls}, the variables used to describe the correlation structure are returned. } \strong{Special models} Some model classes also allow rather uncommon options. These are: \itemize{ \item \strong{mhurdle}: \code{"infrequent_purchase"}, \code{"ip"}, and \code{"auxiliary"} \item \strong{BGGM}: \code{"correlation"} and \code{"intercept"} \item \strong{BFBayesFactor}, \strong{glmx}: \code{"extra"} \item \strong{averaging}:\code{"conditional"} and \code{"full"} \item \strong{mjoint}: \code{"survival"} \item \strong{mfx}: \code{"precision"}, \code{"marginal"} \item \strong{betareg}, \strong{DirichletRegModel}: \code{"precision"} \item \strong{mvord}: \code{"thresholds"} and \code{"correlation"} \item \strong{clm2}: \code{"scale"} \item \strong{selection}: \code{"selection"}, \code{"outcome"}, and \code{"auxiliary"} \item \strong{lavaan}: One or more of \code{"regression"}, \code{"correlation"}, \code{"loading"}, \code{"variance"}, \code{"defined"}, or \code{"mean"}. Can also be \code{"all"} to include all components. } For models of class \code{brmsfit} (package \strong{brms}), even more options are possible for the \code{component} argument, which are not all documented in detail here. } \examples{ \dontshow{if (require("pscl")) withAutoprint(\{ # examplesIf} data("bioChemists", package = "pscl") model <- pscl::zeroinfl( art ~ fem + mar + kid5 + ment | kid5 + phd, data = bioChemists ) model_parameters(model) \dontshow{\}) # examplesIf} } \seealso{ \code{\link[insight:standardize_names]{insight::standardize_names()}} to rename columns into a consistent, standardized naming scheme. } parameters/man/parameters-options.Rd0000644000176200001440000000613015057525051017320 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/options.R \name{parameters-options} \alias{parameters-options} \title{Global options from the parameters package} \description{ Global options from the parameters package } \section{Global options to set defaults for function arguments}{ The \code{verbose} argument can be used to display or silence messages and warnings for the different functions in the \strong{parameters} package. However, some messages providing additional information can be displayed or suppressed using \code{options()}: \itemize{ \item \code{options(parameters_info = TRUE)} will override the \code{include_info} argument in \code{model_parameters()} and always show the model summary for non-mixed models. \item \code{options(parameters_mixed_info = TRUE)} will override the \code{include_info} argument in \code{model_parameters()} for mixed models, and will then always show the model summary. \item \code{options(parameters_cimethod = TRUE)} will show the additional information about the approximation method used to calculate confidence intervals and p-values. Set to \code{FALSE} to hide this message when printing \code{model_parameters()} objects. \item \code{options(parameters_exponentiate = TRUE)} will show the additional information on how to interpret coefficients of models with log-transformed response variables or with log-/logit-links when the \code{exponentiate} argument in \code{model_parameters()} is not \code{TRUE}. Set this option to \code{FALSE} to hide this message when printing \code{model_parameters()} objects. } There are further options that can be used to modify the default behaviour for printed outputs: \itemize{ \item \code{options(parameters_labels = TRUE)} will use variable and value labels for pretty names, if data is labelled. If no labels available, default pretty names are used. \item \verb{options(parameters_interaction = )} will replace the interaction mark (by default, \code{*}) with the related character. \item \verb{options(parameters_select = )} will set the default for the \code{select} argument. See argument's documentation for available options. \item \verb{options(easystats_table_width = )} will set the default width for tables in text-format, i.e. for most of the outputs printed to console. If not specified, tables will be adjusted to the current available width, e.g. of the of the console (or any other source for textual output, like markdown files). The argument \code{table_width} can also be used in most \code{print()} methods to specify the table width as desired. \item \code{options(insight_use_symbols = TRUE)} will try to print unicode-chars for symbols as column names, wherever possible (e.g., \ifelse{html}{\out{ω}}{\eqn{\omega}} instead of \code{Omega}). \item \verb{options(easystats_display_format = )} will set the default format for the \code{display()} methods. Can be one of \code{"markdown"}, \code{"html"}, or \code{"tt"}. See \code{\link[=display.parameters_model]{display.parameters_model()}} for details. } } parameters/man/simulate_parameters.Rd0000644000176200001440000001022715004371714017531 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/simulate_parameters.R \name{simulate_parameters} \alias{simulate_parameters} \alias{simulate_parameters.default} \title{Simulate Model Parameters} \usage{ simulate_parameters(model, ...) \method{simulate_parameters}{default}( model, iterations = 1000, centrality = "median", ci = 0.95, ci_method = "quantile", test = "p-value", ... ) } \arguments{ \item{model}{Statistical model (no Bayesian models).} \item{...}{Arguments passed to \code{\link[insight:get_varcov]{insight::get_varcov()}}, e.g. to allow simulated draws to be based on heteroscedasticity consistent variance covariance matrices.} \item{iterations}{The number of draws to simulate/bootstrap.} \item{centrality}{The point-estimates (centrality indices) to compute. Character (vector) or list with one or more of these options: \code{"median"}, \code{"mean"}, \code{"MAP"} (see \code{\link[bayestestR:map_estimate]{map_estimate()}}), \code{"trimmed"} (which is just \code{mean(x, trim = threshold)}), \code{"mode"} or \code{"all"}.} \item{ci}{Value or vector of probability of the CI (between 0 and 1) to be estimated. Default to \code{0.95} (\verb{95\%}).} \item{ci_method}{The type of index used for Credible Interval. Can be \code{"ETI"} (default, see \code{\link[bayestestR:eti]{eti()}}), \code{"HDI"} (see \code{\link[bayestestR:hdi]{hdi()}}), \code{"BCI"} (see \code{\link[bayestestR:bci]{bci()}}), \code{"SPI"} (see \code{\link[bayestestR:spi]{spi()}}), or \code{"SI"} (see \code{\link[bayestestR:si]{si()}}).} \item{test}{The indices of effect existence to compute. Character (vector) or list with one or more of these options: \code{"p_direction"} (or \code{"pd"}), \code{"rope"}, \code{"p_map"}, \code{"p_significance"} (or \code{"ps"}), \code{"p_rope"}, \code{"equivalence_test"} (or \code{"equitest"}), \code{"bayesfactor"} (or \code{"bf"}) or \code{"all"} to compute all tests. For each "test", the corresponding \strong{bayestestR} function is called (e.g. \code{\link[bayestestR:rope]{rope()}} or \code{\link[bayestestR:p_direction]{p_direction()}}) and its results included in the summary output.} } \value{ A data frame with simulated parameters. } \description{ Compute simulated draws of parameters and their related indices such as Confidence Intervals (CI) and p-values. Simulating parameter draws can be seen as a (computationally faster) alternative to bootstrapping. } \details{ \subsection{Technical Details}{ \code{simulate_parameters()} is a computationally faster alternative to \code{bootstrap_parameters()}. Simulated draws for coefficients are based on a multivariate normal distribution (\code{MASS::mvrnorm()}) with mean \code{mu = coef(model)} and variance \code{Sigma = vcov(model)}. } \subsection{Models with Zero-Inflation Component}{ For models from packages \strong{glmmTMB}, \strong{pscl}, \strong{GLMMadaptive} and \strong{countreg}, the \code{component} argument can be used to specify which parameters should be simulated. For all other models, parameters from the conditional component (fixed effects) are simulated. This may include smooth terms, but not random effects. } } \note{ There is also a \href{https://easystats.github.io/see/articles/parameters.html}{\code{plot()}-method} implemented in the \href{https://easystats.github.io/see/}{\strong{see}-package}. } \examples{ model <- lm(Sepal.Length ~ Species * Petal.Width + Petal.Length, data = iris) simulate_parameters(model) \donttest{ if (require("glmmTMB", quietly = TRUE)) { model <- glmmTMB( count ~ spp + mined + (1 | site), ziformula = ~mined, family = poisson(), data = Salamanders ) simulate_parameters(model, centrality = "mean") simulate_parameters(model, ci = c(.8, .95), component = "zero_inflated") } } } \references{ Gelman A, Hill J. Data analysis using regression and multilevel/hierarchical models. Cambridge; New York: Cambridge University Press 2007: 140-143 } \seealso{ \code{\link[=bootstrap_model]{bootstrap_model()}}, \code{\link[=bootstrap_parameters]{bootstrap_parameters()}}, \code{\link[=simulate_model]{simulate_model()}} } parameters/DESCRIPTION0000644000176200001440000001356515111307212014126 0ustar liggesusersType: Package Package: parameters Title: Processing of Model Parameters Version: 0.28.3 Authors@R: c(person(given = "Daniel", family = "Lüdecke", role = c("aut", "cre"), email = "officialeasystats@gmail.com", comment = c(ORCID = "0000-0002-8895-3206")), person(given = "Dominique", family = "Makowski", role = "aut", email = "dom.makowski@gmail.com", comment = c(ORCID = "0000-0001-5375-9967")), person(given = "Mattan S.", family = "Ben-Shachar", role = "aut", email = "matanshm@post.bgu.ac.il", comment = c(ORCID = "0000-0002-4287-4801")), person(given = "Indrajeet", family = "Patil", role = "aut", email = "patilindrajeet.science@gmail.com", comment = c(ORCID = "0000-0003-1995-6531")), person(given = "Søren", family = "Højsgaard", role = "aut", email = "sorenh@math.aau.dk"), person(given = "Brenton M.", family = "Wiernik", role = "aut", email = "brenton@wiernik.org", comment = c(ORCID = "0000-0001-9560-6336")), person(given = "Zen J.", family = "Lau", role = "ctb", email = "zenjuen.lau@ntu.edu.sg"), person(given = "Vincent", family = "Arel-Bundock", role = "ctb", email = "vincent.arel-bundock@umontreal.ca", comment = c(ORCID = "0000-0003-2042-7063")), person(given = "Jeffrey", family = "Girard", role = "ctb", email = "me@jmgirard.com", comment = c(ORCID = "0000-0002-7359-3746")), person(given = "Christina", family = "Maimone", role = "rev", email = "christina.maimone@northwestern.edu"), person(given = "Niels", family = "Ohlsen", role = "rev"), person(given = "Douglas Ezra", family = "Morrison", role = "ctb", email = "dmorrison01@ucla.edu", comment = c(ORCID = "0000-0002-7195-830X")), person(given = "Joseph", family = "Luchman", role = "ctb", email = "jluchman@gmail.com", comment = c(ORCID = "0000-0002-8886-9717"))) Maintainer: Daniel Lüdecke Description: Utilities for processing the parameters of various statistical models. Beyond computing p values, CIs, and other indices for a wide variety of models (see list of supported models using the function 'insight::supported_models()'), this package implements features like bootstrapping or simulating of parameters and models, feature reduction (feature extraction and variable selection) as well as functions to describe data and variable characteristics (e.g. skewness, kurtosis, smoothness or distribution). License: GPL-3 URL: https://easystats.github.io/parameters/ BugReports: https://github.com/easystats/parameters/issues Depends: R (>= 3.6) Imports: bayestestR (>= 0.17.0), datawizard (>= 1.3.0), insight (>= 1.4.2), graphics, methods, stats, utils Suggests: AER, afex, aod, BayesFactor (>= 0.9.12-4.7), BayesFM, bbmle, betareg, BH, biglm, blme, boot, brglm2, brms, broom, broom.mixed, cAIC4, car, carData, cgam, ClassDiscovery, clubSandwich, cluster, cobalt, coda, correlation (>= 0.8.8), coxme, cplm, curl, dbscan, did, discovr, distributional, domir (>= 0.2.0), drc, DRR, effectsize (>= 1.0.1), EGAnet, emmeans (>= 1.7.0), epiR, estimatr, factoextra, FactoMineR, faraway, fastICA, fixest, fpc, gam, gamlss, gee, geepack, ggplot2, GLMMadaptive, glmmTMB (>= 1.1.12), glmtoolbox, GPArotation, gt, haven, httr2, Hmisc, ivreg, knitr, lavaan, lcmm, lfe, lm.beta, lme4, lmerTest, lmtest, logistf, logitr, logspline, lqmm, M3C, marginaleffects (>= 0.29.0), modelbased (>= 0.9.0), MASS, Matrix, mclogit, mclust, MCMCglmm, mediation, merDeriv, metaBMA, metafor, mfx, mgcv, mice (>= 3.17.0), mmrm, multcomp, MuMIn, mvtnorm, NbClust, nFactors, nestedLogit, nlme, nnet, openxlsx, ordinal, panelr, pbkrtest, PCDimension, performance (>= 0.14.0), plm, PMCMRplus, poorman, posterior, PROreg (>= 1.3.0), pscl, psych, pvclust, quantreg, randomForest, RcppEigen, rmarkdown, rms, rstan, rstanarm, sampleSelection, sandwich, see (>= 0.8.1), serp, sparsepca, survey, survival, svylme, testthat (>= 3.2.1), tidyselect, tinytable (>= 0.13.0), TMB, truncreg, vdiffr, VGAM, WeightIt (>= 1.2.0), withr, WRS2 VignetteBuilder: knitr Encoding: UTF-8 Language: en-US RoxygenNote: 7.3.3 Config/testthat/edition: 3 Config/testthat/parallel: true Config/Needs/website: easystats/easystatstemplate Config/Needs/check: stan-dev/cmdstanr Config/rcmdcheck/ignore-inconsequential-notes: true NeedsCompilation: no Packaged: 2025-11-25 10:23:57 UTC; mail Author: Daniel Lüdecke [aut, cre] (ORCID: ), Dominique Makowski [aut] (ORCID: ), Mattan S. Ben-Shachar [aut] (ORCID: ), Indrajeet Patil [aut] (ORCID: ), Søren Højsgaard [aut], Brenton M. Wiernik [aut] (ORCID: ), Zen J. Lau [ctb], Vincent Arel-Bundock [ctb] (ORCID: ), Jeffrey Girard [ctb] (ORCID: ), Christina Maimone [rev], Niels Ohlsen [rev], Douglas Ezra Morrison [ctb] (ORCID: ), Joseph Luchman [ctb] (ORCID: ) Repository: CRAN Date/Publication: 2025-11-25 11:10:02 UTC